diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d40cd46c3b362540dbba9e91e7d548d98bdcdd46 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd70a644277be324cee2f5344cd1bb8d09d549a4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/bllip.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/bllip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1a7a390c99aded41aca966b924c31cb488ffcd4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/bllip.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/chart.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/chart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61900c1193c8fbcd2f6d4627d9bcfe6c0b07757d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/chart.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/corenlp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/corenlp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..523683f9504ca7b9371c8d992bf4e4931a955f7d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/corenlp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/dependencygraph.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/dependencygraph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..312d75a63914b82afbdad139ab18098de534986f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/dependencygraph.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/earleychart.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/earleychart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cc31452053cb947ab72f27c7b3e32b2b4f6f429 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/earleychart.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/evaluate.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/evaluate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0ae2fca9bcdd5f415e29ec08ce3fe8c0181c9e0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/evaluate.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/featurechart.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/featurechart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..007be9630c9c6f3b6abde3f7e1febebf34a94cdd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/featurechart.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/generate.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/generate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc9643bbe2e7d8b2b3de2f3a96c93dc156ac362c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/generate.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/malt.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/malt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..492448e0767c4c557125784f0ecd001f9d00a0f7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/malt.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb67f3c8257ba66c1d350c88be6663346c0beb33 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/pchart.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/pchart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd2dff12dfd2311d9add5f7adb12056bbda3acd5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/pchart.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..235fbf052cd31a99d98faf86b6d3c51046753a8a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/recursivedescent.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/recursivedescent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df21bd67471c25410b4d1e2efe985bd8822e74d7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/recursivedescent.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/shiftreduce.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/shiftreduce.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c57b68bad2b0215adeb2126dfccb4554beba4cd6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/shiftreduce.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/stanford.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/stanford.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa5e359f087af174ddeca8f7a6f570f4c8016e90 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/stanford.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/transitionparser.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/transitionparser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19b7f60f6f69ba6fe7bfd489eddd7b0adcc7cdcd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/transitionparser.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a79b3666418a3a885ce2eb2ada7f55ca72aa1bad Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/viterbi.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/viterbi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30630582f9b5a5392967528a54c738bb449ca896 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/viterbi.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/api.py b/env-llmeval/lib/python3.10/site-packages/nltk/parse/api.py new file mode 100644 index 0000000000000000000000000000000000000000..280c1a5a8225e7832ecb6f80e4e96feb25ca4f8d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/parse/api.py @@ -0,0 +1,72 @@ +# Natural Language Toolkit: Parser API +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +# + +import itertools + +from nltk.internals import overridden + + +class ParserI: + """ + A processing class for deriving trees that represent possible + structures for a sequence of tokens. These tree structures are + known as "parses". Typically, parsers are used to derive syntax + trees for sentences. But parsers can also be used to derive other + kinds of tree structure, such as morphological trees and discourse + structures. + + Subclasses must define: + - at least one of: ``parse()``, ``parse_sents()``. + + Subclasses may define: + - ``grammar()`` + """ + + def grammar(self): + """ + :return: The grammar used by this parser. + """ + raise NotImplementedError() + + def parse(self, sent, *args, **kwargs): + """ + :return: An iterator that generates parse trees for the sentence. + When possible this list is sorted from most likely to least likely. + + :param sent: The sentence to be parsed + :type sent: list(str) + :rtype: iter(Tree) + """ + if overridden(self.parse_sents): + return next(self.parse_sents([sent], *args, **kwargs)) + elif overridden(self.parse_one): + return ( + tree + for tree in [self.parse_one(sent, *args, **kwargs)] + if tree is not None + ) + elif overridden(self.parse_all): + return iter(self.parse_all(sent, *args, **kwargs)) + else: + raise NotImplementedError() + + def parse_sents(self, sents, *args, **kwargs): + """ + Apply ``self.parse()`` to each element of ``sents``. + :rtype: iter(iter(Tree)) + """ + return (self.parse(sent, *args, **kwargs) for sent in sents) + + def parse_all(self, sent, *args, **kwargs): + """:rtype: list(Tree)""" + return list(self.parse(sent, *args, **kwargs)) + + def parse_one(self, sent, *args, **kwargs): + """:rtype: Tree or None""" + return next(self.parse(sent, *args, **kwargs), None) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/corenlp.py b/env-llmeval/lib/python3.10/site-packages/nltk/parse/corenlp.py new file mode 100644 index 0000000000000000000000000000000000000000..5c3146d1a086d4e49a0eaae585e09cab4a267834 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/parse/corenlp.py @@ -0,0 +1,800 @@ +# Natural Language Toolkit: Interface to the CoreNLP REST API. +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Dmitrijs Milajevs +# +# URL: +# For license information, see LICENSE.TXT + +import json +import os # required for doctests +import re +import socket +import time +from typing import List, Tuple + +from nltk.internals import _java_options, config_java, find_jar_iter, java +from nltk.parse.api import ParserI +from nltk.parse.dependencygraph import DependencyGraph +from nltk.tag.api import TaggerI +from nltk.tokenize.api import TokenizerI +from nltk.tree import Tree + +_stanford_url = "https://stanfordnlp.github.io/CoreNLP/" + + +class CoreNLPServerError(EnvironmentError): + """Exceptions associated with the Core NLP server.""" + + +def try_port(port=0): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.bind(("", port)) + + p = sock.getsockname()[1] + sock.close() + + return p + + +class CoreNLPServer: + + _MODEL_JAR_PATTERN = r"stanford-corenlp-(\d+)\.(\d+)\.(\d+)-models\.jar" + _JAR = r"stanford-corenlp-(\d+)\.(\d+)\.(\d+)\.jar" + + def __init__( + self, + path_to_jar=None, + path_to_models_jar=None, + verbose=False, + java_options=None, + corenlp_options=None, + port=None, + ): + + if corenlp_options is None: + corenlp_options = ["-preload", "tokenize,ssplit,pos,lemma,parse,depparse"] + + jars = list( + find_jar_iter( + self._JAR, + path_to_jar, + env_vars=("CORENLP",), + searchpath=(), + url=_stanford_url, + verbose=verbose, + is_regex=True, + ) + ) + + # find the most recent code and model jar + stanford_jar = max(jars, key=lambda model_name: re.match(self._JAR, model_name)) + + if port is None: + try: + port = try_port(9000) + except OSError: + port = try_port() + corenlp_options.extend(["-port", str(port)]) + else: + try_port(port) + corenlp_options.extend(["-port", str(port)]) + + self.url = f"http://localhost:{port}" + + model_jar = max( + find_jar_iter( + self._MODEL_JAR_PATTERN, + path_to_models_jar, + env_vars=("CORENLP_MODELS",), + searchpath=(), + url=_stanford_url, + verbose=verbose, + is_regex=True, + ), + key=lambda model_name: re.match(self._MODEL_JAR_PATTERN, model_name), + ) + + self.verbose = verbose + + self._classpath = stanford_jar, model_jar + + self.corenlp_options = corenlp_options + self.java_options = java_options or ["-mx2g"] + + def start(self, stdout="devnull", stderr="devnull"): + """Starts the CoreNLP server + + :param stdout, stderr: Specifies where CoreNLP output is redirected. Valid values are 'devnull', 'stdout', 'pipe' + """ + import requests + + cmd = ["edu.stanford.nlp.pipeline.StanfordCoreNLPServer"] + + if self.corenlp_options: + cmd.extend(self.corenlp_options) + + # Configure java. + default_options = " ".join(_java_options) + config_java(options=self.java_options, verbose=self.verbose) + + try: + self.popen = java( + cmd, + classpath=self._classpath, + blocking=False, + stdout=stdout, + stderr=stderr, + ) + finally: + # Return java configurations to their default values. + config_java(options=default_options, verbose=self.verbose) + + # Check that the server is istill running. + returncode = self.popen.poll() + if returncode is not None: + _, stderrdata = self.popen.communicate() + raise CoreNLPServerError( + returncode, + "Could not start the server. " + "The error was: {}".format(stderrdata.decode("ascii")), + ) + + for i in range(30): + try: + response = requests.get(requests.compat.urljoin(self.url, "live")) + except requests.exceptions.ConnectionError: + time.sleep(1) + else: + if response.ok: + break + else: + raise CoreNLPServerError("Could not connect to the server.") + + for i in range(60): + try: + response = requests.get(requests.compat.urljoin(self.url, "ready")) + except requests.exceptions.ConnectionError: + time.sleep(1) + else: + if response.ok: + break + else: + raise CoreNLPServerError("The server is not ready.") + + def stop(self): + self.popen.terminate() + self.popen.wait() + + def __enter__(self): + self.start() + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.stop() + return False + + +class GenericCoreNLPParser(ParserI, TokenizerI, TaggerI): + """Interface to the CoreNLP Parser.""" + + def __init__( + self, + url="http://localhost:9000", + encoding="utf8", + tagtype=None, + strict_json=True, + ): + import requests + + self.url = url + self.encoding = encoding + + if tagtype not in ["pos", "ner", None]: + raise ValueError("tagtype must be either 'pos', 'ner' or None") + + self.tagtype = tagtype + self.strict_json = strict_json + + self.session = requests.Session() + + def parse_sents(self, sentences, *args, **kwargs): + """Parse multiple sentences. + + Takes multiple sentences as a list where each sentence is a list of + words. Each sentence will be automatically tagged with this + CoreNLPParser instance's tagger. + + If a whitespace exists inside a token, then the token will be treated as + several tokens. + + :param sentences: Input sentences to parse + :type sentences: list(list(str)) + :rtype: iter(iter(Tree)) + """ + # Converting list(list(str)) -> list(str) + sentences = (" ".join(words) for words in sentences) + return self.raw_parse_sents(sentences, *args, **kwargs) + + def raw_parse(self, sentence, properties=None, *args, **kwargs): + """Parse a sentence. + + Takes a sentence as a string; before parsing, it will be automatically + tokenized and tagged by the CoreNLP Parser. + + :param sentence: Input sentence to parse + :type sentence: str + :rtype: iter(Tree) + """ + default_properties = {"tokenize.whitespace": "false"} + default_properties.update(properties or {}) + + return next( + self.raw_parse_sents( + [sentence], properties=default_properties, *args, **kwargs + ) + ) + + def api_call(self, data, properties=None, timeout=60): + default_properties = { + "outputFormat": "json", + "annotators": "tokenize,pos,lemma,ssplit,{parser_annotator}".format( + parser_annotator=self.parser_annotator + ), + } + + default_properties.update(properties or {}) + + response = self.session.post( + self.url, + params={"properties": json.dumps(default_properties)}, + data=data.encode(self.encoding), + headers={"Content-Type": f"text/plain; charset={self.encoding}"}, + timeout=timeout, + ) + + response.raise_for_status() + + return response.json(strict=self.strict_json) + + def raw_parse_sents( + self, sentences, verbose=False, properties=None, *args, **kwargs + ): + """Parse multiple sentences. + + Takes multiple sentences as a list of strings. Each sentence will be + automatically tokenized and tagged. + + :param sentences: Input sentences to parse. + :type sentences: list(str) + :rtype: iter(iter(Tree)) + + """ + default_properties = { + # Only splits on '\n', never inside the sentence. + "ssplit.eolonly": "true" + } + + default_properties.update(properties or {}) + + """ + for sentence in sentences: + parsed_data = self.api_call(sentence, properties=default_properties) + + assert len(parsed_data['sentences']) == 1 + + for parse in parsed_data['sentences']: + tree = self.make_tree(parse) + yield iter([tree]) + """ + parsed_data = self.api_call("\n".join(sentences), properties=default_properties) + for parsed_sent in parsed_data["sentences"]: + tree = self.make_tree(parsed_sent) + yield iter([tree]) + + def parse_text(self, text, *args, **kwargs): + """Parse a piece of text. + + The text might contain several sentences which will be split by CoreNLP. + + :param str text: text to be split. + :returns: an iterable of syntactic structures. # TODO: should it be an iterable of iterables? + + """ + parsed_data = self.api_call(text, *args, **kwargs) + + for parse in parsed_data["sentences"]: + yield self.make_tree(parse) + + def tokenize(self, text, properties=None): + """Tokenize a string of text. + + Skip these tests if CoreNLP is likely not ready. + >>> from nltk.test.setup_fixt import check_jar + >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True) + + The CoreNLP server can be started using the following notation, although + we recommend the `with CoreNLPServer() as server:` context manager notation + to ensure that the server is always stopped. + >>> server = CoreNLPServer() + >>> server.start() + >>> parser = CoreNLPParser(url=server.url) + + >>> text = 'Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\nThanks.' + >>> list(parser.tokenize(text)) + ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + + >>> s = "The colour of the wall is blue." + >>> list( + ... parser.tokenize( + ... 'The colour of the wall is blue.', + ... properties={'tokenize.options': 'americanize=true'}, + ... ) + ... ) + ['The', 'colour', 'of', 'the', 'wall', 'is', 'blue', '.'] + >>> server.stop() + + """ + default_properties = {"annotators": "tokenize,ssplit"} + + default_properties.update(properties or {}) + + result = self.api_call(text, properties=default_properties) + + for sentence in result["sentences"]: + for token in sentence["tokens"]: + yield token["originalText"] or token["word"] + + def tag_sents(self, sentences): + """ + Tag multiple sentences. + + Takes multiple sentences as a list where each sentence is a list of + tokens. + + :param sentences: Input sentences to tag + :type sentences: list(list(str)) + :rtype: list(list(tuple(str, str)) + """ + # Converting list(list(str)) -> list(str) + sentences = (" ".join(words) for words in sentences) + return [sentences[0] for sentences in self.raw_tag_sents(sentences)] + + def tag(self, sentence: str) -> List[Tuple[str, str]]: + """ + Tag a list of tokens. + + :rtype: list(tuple(str, str)) + + Skip these tests if CoreNLP is likely not ready. + >>> from nltk.test.setup_fixt import check_jar + >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True) + + The CoreNLP server can be started using the following notation, although + we recommend the `with CoreNLPServer() as server:` context manager notation + to ensure that the server is always stopped. + >>> server = CoreNLPServer() + >>> server.start() + >>> parser = CoreNLPParser(url=server.url, tagtype='ner') + >>> tokens = 'Rami Eid is studying at Stony Brook University in NY'.split() + >>> parser.tag(tokens) # doctest: +NORMALIZE_WHITESPACE + [('Rami', 'PERSON'), ('Eid', 'PERSON'), ('is', 'O'), ('studying', 'O'), ('at', 'O'), ('Stony', 'ORGANIZATION'), + ('Brook', 'ORGANIZATION'), ('University', 'ORGANIZATION'), ('in', 'O'), ('NY', 'STATE_OR_PROVINCE')] + + >>> parser = CoreNLPParser(url=server.url, tagtype='pos') + >>> tokens = "What is the airspeed of an unladen swallow ?".split() + >>> parser.tag(tokens) # doctest: +NORMALIZE_WHITESPACE + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), + ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), + ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')] + >>> server.stop() + """ + return self.tag_sents([sentence])[0] + + def raw_tag_sents(self, sentences): + """ + Tag multiple sentences. + + Takes multiple sentences as a list where each sentence is a string. + + :param sentences: Input sentences to tag + :type sentences: list(str) + :rtype: list(list(list(tuple(str, str))) + """ + default_properties = { + "ssplit.isOneSentence": "true", + "annotators": "tokenize,ssplit,", + } + + # Supports only 'pos' or 'ner' tags. + assert self.tagtype in ["pos", "ner"] + default_properties["annotators"] += self.tagtype + for sentence in sentences: + tagged_data = self.api_call(sentence, properties=default_properties) + yield [ + [ + (token["word"], token[self.tagtype]) + for token in tagged_sentence["tokens"] + ] + for tagged_sentence in tagged_data["sentences"] + ] + + +class CoreNLPParser(GenericCoreNLPParser): + """ + Skip these tests if CoreNLP is likely not ready. + >>> from nltk.test.setup_fixt import check_jar + >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True) + + The recommended usage of `CoreNLPParser` is using the context manager notation: + >>> with CoreNLPServer() as server: + ... parser = CoreNLPParser(url=server.url) + ... next( + ... parser.raw_parse('The quick brown fox jumps over the lazy dog.') + ... ).pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _______________|__________________________ + | VP | + | _________|___ | + | | PP | + | | ________|___ | + NP | | NP | + ____|__________ | | _______|____ | + DT JJ JJ NN VBZ IN DT JJ NN . + | | | | | | | | | | + The quick brown fox jumps over the lazy dog . + + Alternatively, the server can be started using the following notation. + Note that `CoreNLPServer` does not need to be used if the CoreNLP server is started + outside of Python. + >>> server = CoreNLPServer() + >>> server.start() + >>> parser = CoreNLPParser(url=server.url) + + >>> (parse_fox, ), (parse_wolf, ) = parser.raw_parse_sents( + ... [ + ... 'The quick brown fox jumps over the lazy dog.', + ... 'The quick grey wolf jumps over the lazy fox.', + ... ] + ... ) + + >>> parse_fox.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _______________|__________________________ + | VP | + | _________|___ | + | | PP | + | | ________|___ | + NP | | NP | + ____|__________ | | _______|____ | + DT JJ JJ NN VBZ IN DT JJ NN . + | | | | | | | | | | + The quick brown fox jumps over the lazy dog . + + >>> parse_wolf.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _______________|__________________________ + | VP | + | _________|___ | + | | PP | + | | ________|___ | + NP | | NP | + ____|_________ | | _______|____ | + DT JJ JJ NN VBZ IN DT JJ NN . + | | | | | | | | | | + The quick grey wolf jumps over the lazy fox . + + >>> (parse_dog, ), (parse_friends, ) = parser.parse_sents( + ... [ + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ] + ... ) + + >>> parse_dog.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _______|____ + | VP + | ________|___ + NP | NP + | | ___|___ + PRP VBP DT NN + | | | | + I 'm a dog + + >>> parse_friends.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + ____|___________ + | VP + | ___________|_____________ + | | NP + | | _______|________________________ + | | NP | | | + | | _____|_______ | | | + NP | NP | | NP | + | | ______|_________ | | ___|____ | + DT VBZ PRP$ NNS POS NN -LRB- DT NN -RRB- + | | | | | | | | | | + This is my friends ' cat -LRB- the tabby -RRB- + + >>> parse_john, parse_mary, = parser.parse_text( + ... 'John loves Mary. Mary walks.' + ... ) + + >>> parse_john.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _____|_____________ + | VP | + | ____|___ | + NP | NP | + | | | | + NNP VBZ NNP . + | | | | + John loves Mary . + + >>> parse_mary.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _____|____ + NP VP | + | | | + NNP VBZ . + | | | + Mary walks . + + Special cases + + >>> next( + ... parser.raw_parse( + ... 'NASIRIYA, Iraq—Iraqi doctors who treated former prisoner of war ' + ... 'Jessica Lynch have angrily dismissed claims made in her biography ' + ... 'that she was raped by her Iraqi captors.' + ... ) + ... ).height() + 14 + + >>> next( + ... parser.raw_parse( + ... "The broader Standard & Poor's 500 Index <.SPX> was 0.46 points lower, or " + ... '0.05 percent, at 997.02.' + ... ) + ... ).height() + 11 + + >>> server.stop() + """ + + _OUTPUT_FORMAT = "penn" + parser_annotator = "parse" + + def make_tree(self, result): + return Tree.fromstring(result["parse"]) + + +class CoreNLPDependencyParser(GenericCoreNLPParser): + """Dependency parser. + + Skip these tests if CoreNLP is likely not ready. + >>> from nltk.test.setup_fixt import check_jar + >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True) + + The recommended usage of `CoreNLPParser` is using the context manager notation: + >>> with CoreNLPServer() as server: + ... dep_parser = CoreNLPDependencyParser(url=server.url) + ... parse, = dep_parser.raw_parse( + ... 'The quick brown fox jumps over the lazy dog.' + ... ) + ... print(parse.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + The DT 4 det + quick JJ 4 amod + brown JJ 4 amod + fox NN 5 nsubj + jumps VBZ 0 ROOT + over IN 9 case + the DT 9 det + lazy JJ 9 amod + dog NN 5 obl + . . 5 punct + + Alternatively, the server can be started using the following notation. + Note that `CoreNLPServer` does not need to be used if the CoreNLP server is started + outside of Python. + >>> server = CoreNLPServer() + >>> server.start() + >>> dep_parser = CoreNLPDependencyParser(url=server.url) + >>> parse, = dep_parser.raw_parse('The quick brown fox jumps over the lazy dog.') + >>> print(parse.tree()) # doctest: +NORMALIZE_WHITESPACE + (jumps (fox The quick brown) (dog over the lazy) .) + + >>> for governor, dep, dependent in parse.triples(): + ... print(governor, dep, dependent) # doctest: +NORMALIZE_WHITESPACE + ('jumps', 'VBZ') nsubj ('fox', 'NN') + ('fox', 'NN') det ('The', 'DT') + ('fox', 'NN') amod ('quick', 'JJ') + ('fox', 'NN') amod ('brown', 'JJ') + ('jumps', 'VBZ') obl ('dog', 'NN') + ('dog', 'NN') case ('over', 'IN') + ('dog', 'NN') det ('the', 'DT') + ('dog', 'NN') amod ('lazy', 'JJ') + ('jumps', 'VBZ') punct ('.', '.') + + >>> (parse_fox, ), (parse_dog, ) = dep_parser.raw_parse_sents( + ... [ + ... 'The quick brown fox jumps over the lazy dog.', + ... 'The quick grey wolf jumps over the lazy fox.', + ... ] + ... ) + >>> print(parse_fox.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + The DT 4 det + quick JJ 4 amod + brown JJ 4 amod + fox NN 5 nsubj + jumps VBZ 0 ROOT + over IN 9 case + the DT 9 det + lazy JJ 9 amod + dog NN 5 obl + . . 5 punct + + >>> print(parse_dog.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + The DT 4 det + quick JJ 4 amod + grey JJ 4 amod + wolf NN 5 nsubj + jumps VBZ 0 ROOT + over IN 9 case + the DT 9 det + lazy JJ 9 amod + fox NN 5 obl + . . 5 punct + + >>> (parse_dog, ), (parse_friends, ) = dep_parser.parse_sents( + ... [ + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ] + ... ) + >>> print(parse_dog.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + I PRP 4 nsubj + 'm VBP 4 cop + a DT 4 det + dog NN 0 ROOT + + >>> print(parse_friends.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + This DT 6 nsubj + is VBZ 6 cop + my PRP$ 4 nmod:poss + friends NNS 6 nmod:poss + ' POS 4 case + cat NN 0 ROOT + ( -LRB- 9 punct + the DT 9 det + tabby NN 6 dep + ) -RRB- 9 punct + + >>> parse_john, parse_mary, = dep_parser.parse_text( + ... 'John loves Mary. Mary walks.' + ... ) + + >>> print(parse_john.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + John NNP 2 nsubj + loves VBZ 0 ROOT + Mary NNP 2 obj + . . 2 punct + + >>> print(parse_mary.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + Mary NNP 2 nsubj + walks VBZ 0 ROOT + . . 2 punct + + Special cases + + Non-breaking space inside of a token. + + >>> len( + ... next( + ... dep_parser.raw_parse( + ... 'Anhalt said children typically treat a 20-ounce soda bottle as one ' + ... 'serving, while it actually contains 2 1/2 servings.' + ... ) + ... ).nodes + ... ) + 23 + + Phone numbers. + + >>> len( + ... next( + ... dep_parser.raw_parse('This is not going to crash: 01 111 555.') + ... ).nodes + ... ) + 10 + + >>> print( + ... next( + ... dep_parser.raw_parse('The underscore _ should not simply disappear.') + ... ).to_conll(4) + ... ) # doctest: +NORMALIZE_WHITESPACE + The DT 2 det + underscore NN 7 nsubj + _ NFP 7 punct + should MD 7 aux + not RB 7 advmod + simply RB 7 advmod + disappear VB 0 ROOT + . . 7 punct + + >>> print( + ... next( + ... dep_parser.raw_parse( + ... 'for all of its insights into the dream world of teen life , and its electronic expression through ' + ... 'cyber culture , the film gives no quarter to anyone seeking to pull a cohesive story out of its 2 ' + ... '1/2-hour running time .' + ... ) + ... ).to_conll(4) + ... ) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS + for IN 2 case + all DT 24 obl + of IN 5 case + its PRP$ 5 nmod:poss + insights NNS 2 nmod + into IN 9 case + the DT 9 det + dream NN 9 compound + world NN 5 nmod + of IN 12 case + teen NN 12 compound + ... + + >>> server.stop() + """ + + _OUTPUT_FORMAT = "conll2007" + parser_annotator = "depparse" + + def make_tree(self, result): + + return DependencyGraph( + ( + " ".join(n_items[1:]) # NLTK expects an iterable of strings... + for n_items in sorted(transform(result)) + ), + cell_separator=" ", # To make sure that a non-breaking space is kept inside of a token. + ) + + +def transform(sentence): + for dependency in sentence["basicDependencies"]: + + dependent_index = dependency["dependent"] + token = sentence["tokens"][dependent_index - 1] + + # Return values that we don't know as '_'. Also, consider tag and ctag + # to be equal. + yield ( + dependent_index, + "_", + token["word"], + token["lemma"], + token["pos"], + token["pos"], + "_", + str(dependency["governor"]), + dependency["dep"], + "_", + "_", + ) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/malt.py b/env-llmeval/lib/python3.10/site-packages/nltk/parse/malt.py new file mode 100644 index 0000000000000000000000000000000000000000..229e8242719dc4645763706b58363b546bc7e6ae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/parse/malt.py @@ -0,0 +1,393 @@ +# Natural Language Toolkit: Interface to MaltParser +# +# Author: Dan Garrette +# Contributor: Liling Tan, Mustufain, osamamukhtar11 +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +import inspect +import os +import subprocess +import sys +import tempfile + +from nltk.data import ZipFilePathPointer +from nltk.internals import find_dir, find_file, find_jars_within_path +from nltk.parse.api import ParserI +from nltk.parse.dependencygraph import DependencyGraph +from nltk.parse.util import taggedsents_to_conll + + +def malt_regex_tagger(): + from nltk.tag import RegexpTagger + + _tagger = RegexpTagger( + [ + (r"\.$", "."), + (r"\,$", ","), + (r"\?$", "?"), # fullstop, comma, Qmark + (r"\($", "("), + (r"\)$", ")"), # round brackets + (r"\[$", "["), + (r"\]$", "]"), # square brackets + (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers + (r"(The|the|A|a|An|an)$", "DT"), # articles + (r"(He|he|She|she|It|it|I|me|Me|You|you)$", "PRP"), # pronouns + (r"(His|his|Her|her|Its|its)$", "PRP$"), # possessive + (r"(my|Your|your|Yours|yours)$", "PRP$"), # possessive + (r"(on|On|in|In|at|At|since|Since)$", "IN"), # time prepopsitions + (r"(for|For|ago|Ago|before|Before)$", "IN"), # time prepopsitions + (r"(till|Till|until|Until)$", "IN"), # time prepopsitions + (r"(by|By|beside|Beside)$", "IN"), # space prepopsitions + (r"(under|Under|below|Below)$", "IN"), # space prepopsitions + (r"(over|Over|above|Above)$", "IN"), # space prepopsitions + (r"(across|Across|through|Through)$", "IN"), # space prepopsitions + (r"(into|Into|towards|Towards)$", "IN"), # space prepopsitions + (r"(onto|Onto|from|From)$", "IN"), # space prepopsitions + (r".*able$", "JJ"), # adjectives + (r".*ness$", "NN"), # nouns formed from adjectives + (r".*ly$", "RB"), # adverbs + (r".*s$", "NNS"), # plural nouns + (r".*ing$", "VBG"), # gerunds + (r".*ed$", "VBD"), # past tense verbs + (r".*", "NN"), # nouns (default) + ] + ) + return _tagger.tag + + +def find_maltparser(parser_dirname): + """ + A module to find MaltParser .jar file and its dependencies. + """ + if os.path.exists(parser_dirname): # If a full path is given. + _malt_dir = parser_dirname + else: # Try to find path to maltparser directory in environment variables. + _malt_dir = find_dir(parser_dirname, env_vars=("MALT_PARSER",)) + # Checks that that the found directory contains all the necessary .jar + malt_dependencies = ["", "", ""] + _malt_jars = set(find_jars_within_path(_malt_dir)) + _jars = {os.path.split(jar)[1] for jar in _malt_jars} + malt_dependencies = {"log4j.jar", "libsvm.jar", "liblinear-1.8.jar"} + + assert malt_dependencies.issubset(_jars) + assert any( + filter(lambda i: i.startswith("maltparser-") and i.endswith(".jar"), _jars) + ) + return list(_malt_jars) + + +def find_malt_model(model_filename): + """ + A module to find pre-trained MaltParser model. + """ + if model_filename is None: + return "malt_temp.mco" + elif os.path.exists(model_filename): # If a full path is given. + return model_filename + else: # Try to find path to malt model in environment variables. + return find_file(model_filename, env_vars=("MALT_MODEL",), verbose=False) + + +class MaltParser(ParserI): + """ + A class for dependency parsing with MaltParser. The input is the paths to: + - (optionally) a maltparser directory + - (optionally) the path to a pre-trained MaltParser .mco model file + - (optionally) the tagger to use for POS tagging before parsing + - (optionally) additional Java arguments + + Example: + >>> from nltk.parse import malt + >>> # With MALT_PARSER and MALT_MODEL environment set. + >>> mp = malt.MaltParser(model_filename='engmalt.linear-1.7.mco') # doctest: +SKIP + >>> mp.parse_one('I shot an elephant in my pajamas .'.split()).tree() # doctest: +SKIP + (shot I (elephant an) (in (pajamas my)) .) + >>> # Without MALT_PARSER and MALT_MODEL environment. + >>> mp = malt.MaltParser('/home/user/maltparser-1.9.2/', '/home/user/engmalt.linear-1.7.mco') # doctest: +SKIP + >>> mp.parse_one('I shot an elephant in my pajamas .'.split()).tree() # doctest: +SKIP + (shot I (elephant an) (in (pajamas my)) .) + """ + + def __init__( + self, + parser_dirname="", + model_filename=None, + tagger=None, + additional_java_args=None, + ): + """ + An interface for parsing with the Malt Parser. + + :param parser_dirname: The path to the maltparser directory that + contains the maltparser-1.x.jar + :type parser_dirname: str + :param model_filename: The name of the pre-trained model with .mco file + extension. If provided, training will not be required. + (see http://www.maltparser.org/mco/mco.html and + see http://www.patful.com/chalk/node/185) + :type model_filename: str + :param tagger: The tagger used to POS tag the raw string before + formatting to CONLL format. It should behave like `nltk.pos_tag` + :type tagger: function + :param additional_java_args: This is the additional Java arguments that + one can use when calling Maltparser, usually this is the heapsize + limits, e.g. `additional_java_args=['-Xmx1024m']` + (see https://goo.gl/mpDBvQ) + :type additional_java_args: list + """ + + # Find all the necessary jar files for MaltParser. + self.malt_jars = find_maltparser(parser_dirname) + # Initialize additional java arguments. + self.additional_java_args = ( + additional_java_args if additional_java_args is not None else [] + ) + # Initialize model. + self.model = find_malt_model(model_filename) + self._trained = self.model != "malt_temp.mco" + # Set the working_dir parameters i.e. `-w` from MaltParser's option. + self.working_dir = tempfile.gettempdir() + # Initialize POS tagger. + self.tagger = tagger if tagger is not None else malt_regex_tagger() + + def parse_tagged_sents(self, sentences, verbose=False, top_relation_label="null"): + """ + Use MaltParser to parse multiple POS tagged sentences. Takes multiple + sentences where each sentence is a list of (word, tag) tuples. + The sentences must have already been tokenized and tagged. + + :param sentences: Input sentences to parse + :type sentence: list(list(tuple(str, str))) + :return: iter(iter(``DependencyGraph``)) the dependency graph + representation of each sentence + """ + if not self._trained: + raise Exception("Parser has not been trained. Call train() first.") + + with tempfile.NamedTemporaryFile( + prefix="malt_input.conll.", dir=self.working_dir, mode="w", delete=False + ) as input_file: + with tempfile.NamedTemporaryFile( + prefix="malt_output.conll.", + dir=self.working_dir, + mode="w", + delete=False, + ) as output_file: + # Convert list of sentences to CONLL format. + for line in taggedsents_to_conll(sentences): + input_file.write(str(line)) + input_file.close() + + # Generate command to run maltparser. + cmd = self.generate_malt_command( + input_file.name, output_file.name, mode="parse" + ) + + # This is a maltparser quirk, it needs to be run + # where the model file is. otherwise it goes into an awkward + # missing .jars or strange -w working_dir problem. + _current_path = os.getcwd() # Remembers the current path. + try: # Change to modelfile path + os.chdir(os.path.split(self.model)[0]) + except: + pass + ret = self._execute(cmd, verbose) # Run command. + os.chdir(_current_path) # Change back to current path. + + if ret != 0: + raise Exception( + "MaltParser parsing (%s) failed with exit " + "code %d" % (" ".join(cmd), ret) + ) + + # Must return iter(iter(Tree)) + with open(output_file.name) as infile: + for tree_str in infile.read().split("\n\n"): + yield ( + iter( + [ + DependencyGraph( + tree_str, top_relation_label=top_relation_label + ) + ] + ) + ) + + os.remove(input_file.name) + os.remove(output_file.name) + + def parse_sents(self, sentences, verbose=False, top_relation_label="null"): + """ + Use MaltParser to parse multiple sentences. + Takes a list of sentences, where each sentence is a list of words. + Each sentence will be automatically tagged with this + MaltParser instance's tagger. + + :param sentences: Input sentences to parse + :type sentence: list(list(str)) + :return: iter(DependencyGraph) + """ + tagged_sentences = (self.tagger(sentence) for sentence in sentences) + return self.parse_tagged_sents( + tagged_sentences, verbose, top_relation_label=top_relation_label + ) + + def generate_malt_command(self, inputfilename, outputfilename=None, mode=None): + """ + This function generates the maltparser command use at the terminal. + + :param inputfilename: path to the input file + :type inputfilename: str + :param outputfilename: path to the output file + :type outputfilename: str + """ + + cmd = ["java"] + cmd += self.additional_java_args # Adds additional java arguments + # Joins classpaths with ";" if on Windows and on Linux/Mac use ":" + classpaths_separator = ";" if sys.platform.startswith("win") else ":" + cmd += [ + "-cp", + classpaths_separator.join(self.malt_jars), + ] # Adds classpaths for jars + cmd += ["org.maltparser.Malt"] # Adds the main function. + + # Adds the model file. + if os.path.exists(self.model): # when parsing + cmd += ["-c", os.path.split(self.model)[-1]] + else: # when learning + cmd += ["-c", self.model] + + cmd += ["-i", inputfilename] + if mode == "parse": + cmd += ["-o", outputfilename] + cmd += ["-m", mode] # mode use to generate parses. + return cmd + + @staticmethod + def _execute(cmd, verbose=False): + output = None if verbose else subprocess.PIPE + p = subprocess.Popen(cmd, stdout=output, stderr=output) + return p.wait() + + def train(self, depgraphs, verbose=False): + """ + Train MaltParser from a list of ``DependencyGraph`` objects + + :param depgraphs: list of ``DependencyGraph`` objects for training input data + :type depgraphs: DependencyGraph + """ + + # Write the conll_str to malt_train.conll file in /tmp/ + with tempfile.NamedTemporaryFile( + prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False + ) as input_file: + input_str = "\n".join(dg.to_conll(10) for dg in depgraphs) + input_file.write(str(input_str)) + # Trains the model with the malt_train.conll + self.train_from_file(input_file.name, verbose=verbose) + # Removes the malt_train.conll once training finishes. + os.remove(input_file.name) + + def train_from_file(self, conll_file, verbose=False): + """ + Train MaltParser from a file + :param conll_file: str for the filename of the training input data + :type conll_file: str + """ + + # If conll_file is a ZipFilePathPointer, + # then we need to do some extra massaging + if isinstance(conll_file, ZipFilePathPointer): + with tempfile.NamedTemporaryFile( + prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False + ) as input_file: + with conll_file.open() as conll_input_file: + conll_str = conll_input_file.read() + input_file.write(str(conll_str)) + return self.train_from_file(input_file.name, verbose=verbose) + + # Generate command to run maltparser. + cmd = self.generate_malt_command(conll_file, mode="learn") + ret = self._execute(cmd, verbose) + if ret != 0: + raise Exception( + "MaltParser training (%s) failed with exit " + "code %d" % (" ".join(cmd), ret) + ) + self._trained = True + + +if __name__ == "__main__": + """ + A demonstration function to show how NLTK users can use the malt parser API. + + >>> from nltk import pos_tag + >>> assert 'MALT_PARSER' in os.environ, str( + ... "Please set MALT_PARSER in your global environment, e.g.:\n" + ... "$ export MALT_PARSER='/home/user/maltparser-1.9.2/'") + >>> + >>> assert 'MALT_MODEL' in os.environ, str( + ... "Please set MALT_MODEL in your global environment, e.g.:\n" + ... "$ export MALT_MODEL='/home/user/engmalt.linear-1.7.mco'") + >>> + >>> _dg1_str = str("1 John _ NNP _ _ 2 SUBJ _ _\n" + ... "2 sees _ VB _ _ 0 ROOT _ _\n" + ... "3 a _ DT _ _ 4 SPEC _ _\n" + ... "4 dog _ NN _ _ 2 OBJ _ _\n" + ... "5 . _ . _ _ 2 PUNCT _ _\n") + >>> + >>> + >>> _dg2_str = str("1 John _ NNP _ _ 2 SUBJ _ _\n" + ... "2 walks _ VB _ _ 0 ROOT _ _\n" + ... "3 . _ . _ _ 2 PUNCT _ _\n") + >>> dg1 = DependencyGraph(_dg1_str) + >>> dg2 = DependencyGraph(_dg2_str) + >>> # Initialize a MaltParser object + >>> mp = MaltParser() + >>> + >>> # Trains a model. + >>> mp.train([dg1,dg2], verbose=False) + >>> sent1 = ['John','sees','Mary', '.'] + >>> sent2 = ['John', 'walks', 'a', 'dog', '.'] + >>> + >>> # Parse a single sentence. + >>> parsed_sent1 = mp.parse_one(sent1) + >>> parsed_sent2 = mp.parse_one(sent2) + >>> print(parsed_sent1.tree()) + (sees John Mary .) + >>> print(parsed_sent2.tree()) + (walks John (dog a) .) + >>> + >>> # Parsing multiple sentences. + >>> sentences = [sent1,sent2] + >>> parsed_sents = mp.parse_sents(sentences) + >>> print(next(next(parsed_sents)).tree()) + (sees John Mary .) + >>> print(next(next(parsed_sents)).tree()) + (walks John (dog a) .) + >>> + >>> # Initialize a MaltParser object with an English pre-trained model. + >>> parser_dirname = 'maltparser-1.9.2' + >>> model_name = 'engmalt.linear-1.7.mco' + >>> mp = MaltParser(parser_dirname=parser_dirname, model_filename=model_name, tagger=pos_tag) + >>> sent1 = 'I shot an elephant in my pajamas .'.split() + >>> sent2 = 'Time flies like banana .'.split() + >>> # Parse a single sentence. + >>> print(mp.parse_one(sent1).tree()) + (shot I (elephant an) (in (pajamas my)) .) + # Parsing multiple sentences + >>> sentences = [sent1,sent2] + >>> parsed_sents = mp.parse_sents(sentences) + >>> print(next(next(parsed_sents)).tree()) + (shot I (elephant an) (in (pajamas my)) .) + >>> print(next(next(parsed_sents)).tree()) + (flies Time (like banana) .) + """ + + import doctest + + doctest.testmod() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/nonprojectivedependencyparser.py b/env-llmeval/lib/python3.10/site-packages/nltk/parse/nonprojectivedependencyparser.py new file mode 100644 index 0000000000000000000000000000000000000000..b96f996cf63b4d3e093994d6319c8fb9fb91569a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/parse/nonprojectivedependencyparser.py @@ -0,0 +1,772 @@ +# Natural Language Toolkit: Dependency Grammars +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Jason Narad +# +# URL: +# For license information, see LICENSE.TXT +# + +import logging +import math + +from nltk.parse.dependencygraph import DependencyGraph + +logger = logging.getLogger(__name__) + +################################################################# +# DependencyScorerI - Interface for Graph-Edge Weight Calculation +################################################################# + + +class DependencyScorerI: + """ + A scorer for calculated the weights on the edges of a weighted + dependency graph. This is used by a + ``ProbabilisticNonprojectiveParser`` to initialize the edge + weights of a ``DependencyGraph``. While typically this would be done + by training a binary classifier, any class that can return a + multidimensional list representation of the edge weights can + implement this interface. As such, it has no necessary + fields. + """ + + def __init__(self): + if self.__class__ == DependencyScorerI: + raise TypeError("DependencyScorerI is an abstract interface") + + def train(self, graphs): + """ + :type graphs: list(DependencyGraph) + :param graphs: A list of dependency graphs to train the scorer. + Typically the edges present in the graphs can be used as + positive training examples, and the edges not present as negative + examples. + """ + raise NotImplementedError() + + def score(self, graph): + """ + :type graph: DependencyGraph + :param graph: A dependency graph whose set of edges need to be + scored. + :rtype: A three-dimensional list of numbers. + :return: The score is returned in a multidimensional(3) list, such + that the outer-dimension refers to the head, and the + inner-dimension refers to the dependencies. For instance, + scores[0][1] would reference the list of scores corresponding to + arcs from node 0 to node 1. The node's 'address' field can be used + to determine its number identification. + + For further illustration, a score list corresponding to Fig.2 of + Keith Hall's 'K-best Spanning Tree Parsing' paper:: + + scores = [[[], [5], [1], [1]], + [[], [], [11], [4]], + [[], [10], [], [5]], + [[], [8], [8], []]] + + When used in conjunction with a MaxEntClassifier, each score would + correspond to the confidence of a particular edge being classified + with the positive training examples. + """ + raise NotImplementedError() + + +################################################################# +# NaiveBayesDependencyScorer +################################################################# + + +class NaiveBayesDependencyScorer(DependencyScorerI): + """ + A dependency scorer built around a MaxEnt classifier. In this + particular class that classifier is a ``NaiveBayesClassifier``. + It uses head-word, head-tag, child-word, and child-tag features + for classification. + + >>> from nltk.parse.dependencygraph import DependencyGraph, conll_data2 + + >>> graphs = [DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry] + >>> npp = ProbabilisticNonprojectiveParser() + >>> npp.train(graphs, NaiveBayesDependencyScorer()) + >>> parses = npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc']) + >>> len(list(parses)) + 1 + + """ + + def __init__(self): + pass # Do nothing without throwing error + + def train(self, graphs): + """ + Trains a ``NaiveBayesClassifier`` using the edges present in + graphs list as positive examples, the edges not present as + negative examples. Uses a feature vector of head-word, + head-tag, child-word, and child-tag. + + :type graphs: list(DependencyGraph) + :param graphs: A list of dependency graphs to train the scorer. + """ + + from nltk.classify import NaiveBayesClassifier + + # Create training labeled training examples + labeled_examples = [] + for graph in graphs: + for head_node in graph.nodes.values(): + for child_index, child_node in graph.nodes.items(): + if child_index in head_node["deps"]: + label = "T" + else: + label = "F" + labeled_examples.append( + ( + dict( + a=head_node["word"], + b=head_node["tag"], + c=child_node["word"], + d=child_node["tag"], + ), + label, + ) + ) + + self.classifier = NaiveBayesClassifier.train(labeled_examples) + + def score(self, graph): + """ + Converts the graph into a feature-based representation of + each edge, and then assigns a score to each based on the + confidence of the classifier in assigning it to the + positive label. Scores are returned in a multidimensional list. + + :type graph: DependencyGraph + :param graph: A dependency graph to score. + :rtype: 3 dimensional list + :return: Edge scores for the graph parameter. + """ + # Convert graph to feature representation + edges = [] + for head_node in graph.nodes.values(): + for child_node in graph.nodes.values(): + edges.append( + dict( + a=head_node["word"], + b=head_node["tag"], + c=child_node["word"], + d=child_node["tag"], + ) + ) + + # Score edges + edge_scores = [] + row = [] + count = 0 + for pdist in self.classifier.prob_classify_many(edges): + logger.debug("%.4f %.4f", pdist.prob("T"), pdist.prob("F")) + # smoothing in case the probability = 0 + row.append([math.log(pdist.prob("T") + 0.00000000001)]) + count += 1 + if count == len(graph.nodes): + edge_scores.append(row) + row = [] + count = 0 + return edge_scores + + +################################################################# +# A Scorer for Demo Purposes +################################################################# +# A short class necessary to show parsing example from paper +class DemoScorer(DependencyScorerI): + def train(self, graphs): + print("Training...") + + def score(self, graph): + # scores for Keith Hall 'K-best Spanning Tree Parsing' paper + return [ + [[], [5], [1], [1]], + [[], [], [11], [4]], + [[], [10], [], [5]], + [[], [8], [8], []], + ] + + +################################################################# +# Non-Projective Probabilistic Parsing +################################################################# + + +class ProbabilisticNonprojectiveParser: + """A probabilistic non-projective dependency parser. + + Nonprojective dependencies allows for "crossing branches" in the parse tree + which is necessary for representing particular linguistic phenomena, or even + typical parses in some languages. This parser follows the MST parsing + algorithm, outlined in McDonald(2005), which likens the search for the best + non-projective parse to finding the maximum spanning tree in a weighted + directed graph. + + >>> class Scorer(DependencyScorerI): + ... def train(self, graphs): + ... pass + ... + ... def score(self, graph): + ... return [ + ... [[], [5], [1], [1]], + ... [[], [], [11], [4]], + ... [[], [10], [], [5]], + ... [[], [8], [8], []], + ... ] + + + >>> npp = ProbabilisticNonprojectiveParser() + >>> npp.train([], Scorer()) + + >>> parses = npp.parse(['v1', 'v2', 'v3'], [None, None, None]) + >>> len(list(parses)) + 1 + + Rule based example + + >>> from nltk.grammar import DependencyGrammar + + >>> grammar = DependencyGrammar.fromstring(''' + ... 'taught' -> 'play' | 'man' + ... 'man' -> 'the' | 'in' + ... 'in' -> 'corner' + ... 'corner' -> 'the' + ... 'play' -> 'golf' | 'dachshund' | 'to' + ... 'dachshund' -> 'his' + ... ''') + + >>> ndp = NonprojectiveDependencyParser(grammar) + >>> parses = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf']) + >>> len(list(parses)) + 4 + + """ + + def __init__(self): + """ + Creates a new non-projective parser. + """ + logging.debug("initializing prob. nonprojective...") + + def train(self, graphs, dependency_scorer): + """ + Trains a ``DependencyScorerI`` from a set of ``DependencyGraph`` objects, + and establishes this as the parser's scorer. This is used to + initialize the scores on a ``DependencyGraph`` during the parsing + procedure. + + :type graphs: list(DependencyGraph) + :param graphs: A list of dependency graphs to train the scorer. + :type dependency_scorer: DependencyScorerI + :param dependency_scorer: A scorer which implements the + ``DependencyScorerI`` interface. + """ + self._scorer = dependency_scorer + self._scorer.train(graphs) + + def initialize_edge_scores(self, graph): + """ + Assigns a score to every edge in the ``DependencyGraph`` graph. + These scores are generated via the parser's scorer which + was assigned during the training process. + + :type graph: DependencyGraph + :param graph: A dependency graph to assign scores to. + """ + self.scores = self._scorer.score(graph) + + def collapse_nodes(self, new_node, cycle_path, g_graph, b_graph, c_graph): + """ + Takes a list of nodes that have been identified to belong to a cycle, + and collapses them into on larger node. The arcs of all nodes in + the graph must be updated to account for this. + + :type new_node: Node. + :param new_node: A Node (Dictionary) to collapse the cycle nodes into. + :type cycle_path: A list of integers. + :param cycle_path: A list of node addresses, each of which is in the cycle. + :type g_graph, b_graph, c_graph: DependencyGraph + :param g_graph, b_graph, c_graph: Graphs which need to be updated. + """ + logger.debug("Collapsing nodes...") + # Collapse all cycle nodes into v_n+1 in G_Graph + for cycle_node_index in cycle_path: + g_graph.remove_by_address(cycle_node_index) + g_graph.add_node(new_node) + g_graph.redirect_arcs(cycle_path, new_node["address"]) + + def update_edge_scores(self, new_node, cycle_path): + """ + Updates the edge scores to reflect a collapse operation into + new_node. + + :type new_node: A Node. + :param new_node: The node which cycle nodes are collapsed into. + :type cycle_path: A list of integers. + :param cycle_path: A list of node addresses that belong to the cycle. + """ + logger.debug("cycle %s", cycle_path) + + cycle_path = self.compute_original_indexes(cycle_path) + + logger.debug("old cycle %s", cycle_path) + logger.debug("Prior to update: %s", self.scores) + + for i, row in enumerate(self.scores): + for j, column in enumerate(self.scores[i]): + logger.debug(self.scores[i][j]) + if j in cycle_path and i not in cycle_path and self.scores[i][j]: + subtract_val = self.compute_max_subtract_score(j, cycle_path) + + logger.debug("%s - %s", self.scores[i][j], subtract_val) + + new_vals = [] + for cur_val in self.scores[i][j]: + new_vals.append(cur_val - subtract_val) + + self.scores[i][j] = new_vals + + for i, row in enumerate(self.scores): + for j, cell in enumerate(self.scores[i]): + if i in cycle_path and j in cycle_path: + self.scores[i][j] = [] + + logger.debug("After update: %s", self.scores) + + def compute_original_indexes(self, new_indexes): + """ + As nodes are collapsed into others, they are replaced + by the new node in the graph, but it's still necessary + to keep track of what these original nodes were. This + takes a list of node addresses and replaces any collapsed + node addresses with their original addresses. + + :type new_indexes: A list of integers. + :param new_indexes: A list of node addresses to check for + subsumed nodes. + """ + swapped = True + while swapped: + originals = [] + swapped = False + for new_index in new_indexes: + if new_index in self.inner_nodes: + for old_val in self.inner_nodes[new_index]: + if old_val not in originals: + originals.append(old_val) + swapped = True + else: + originals.append(new_index) + new_indexes = originals + return new_indexes + + def compute_max_subtract_score(self, column_index, cycle_indexes): + """ + When updating scores the score of the highest-weighted incoming + arc is subtracted upon collapse. This returns the correct + amount to subtract from that edge. + + :type column_index: integer. + :param column_index: A index representing the column of incoming arcs + to a particular node being updated + :type cycle_indexes: A list of integers. + :param cycle_indexes: Only arcs from cycle nodes are considered. This + is a list of such nodes addresses. + """ + max_score = -100000 + for row_index in cycle_indexes: + for subtract_val in self.scores[row_index][column_index]: + if subtract_val > max_score: + max_score = subtract_val + return max_score + + def best_incoming_arc(self, node_index): + """ + Returns the source of the best incoming arc to the + node with address: node_index + + :type node_index: integer. + :param node_index: The address of the 'destination' node, + the node that is arced to. + """ + originals = self.compute_original_indexes([node_index]) + logger.debug("originals: %s", originals) + + max_arc = None + max_score = None + for row_index in range(len(self.scores)): + for col_index in range(len(self.scores[row_index])): + if col_index in originals and ( + max_score is None or self.scores[row_index][col_index] > max_score + ): + max_score = self.scores[row_index][col_index] + max_arc = row_index + logger.debug("%s, %s", row_index, col_index) + + logger.debug(max_score) + + for key in self.inner_nodes: + replaced_nodes = self.inner_nodes[key] + if max_arc in replaced_nodes: + return key + + return max_arc + + def original_best_arc(self, node_index): + originals = self.compute_original_indexes([node_index]) + max_arc = None + max_score = None + max_orig = None + for row_index in range(len(self.scores)): + for col_index in range(len(self.scores[row_index])): + if col_index in originals and ( + max_score is None or self.scores[row_index][col_index] > max_score + ): + max_score = self.scores[row_index][col_index] + max_arc = row_index + max_orig = col_index + return [max_arc, max_orig] + + def parse(self, tokens, tags): + """ + Parses a list of tokens in accordance to the MST parsing algorithm + for non-projective dependency parses. Assumes that the tokens to + be parsed have already been tagged and those tags are provided. Various + scoring methods can be used by implementing the ``DependencyScorerI`` + interface and passing it to the training algorithm. + + :type tokens: list(str) + :param tokens: A list of words or punctuation to be parsed. + :type tags: list(str) + :param tags: A list of tags corresponding by index to the words in the tokens list. + :return: An iterator of non-projective parses. + :rtype: iter(DependencyGraph) + """ + self.inner_nodes = {} + + # Initialize g_graph + g_graph = DependencyGraph() + for index, token in enumerate(tokens): + g_graph.nodes[index + 1].update( + {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} + ) + + # Fully connect non-root nodes in g_graph + g_graph.connect_graph() + original_graph = DependencyGraph() + for index, token in enumerate(tokens): + original_graph.nodes[index + 1].update( + {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} + ) + + b_graph = DependencyGraph() + c_graph = DependencyGraph() + + for index, token in enumerate(tokens): + c_graph.nodes[index + 1].update( + {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} + ) + + # Assign initial scores to g_graph edges + self.initialize_edge_scores(g_graph) + logger.debug(self.scores) + # Initialize a list of unvisited vertices (by node address) + unvisited_vertices = [vertex["address"] for vertex in c_graph.nodes.values()] + # Iterate over unvisited vertices + nr_vertices = len(tokens) + betas = {} + while unvisited_vertices: + # Mark current node as visited + current_vertex = unvisited_vertices.pop(0) + logger.debug("current_vertex: %s", current_vertex) + # Get corresponding node n_i to vertex v_i + current_node = g_graph.get_by_address(current_vertex) + logger.debug("current_node: %s", current_node) + # Get best in-edge node b for current node + best_in_edge = self.best_incoming_arc(current_vertex) + betas[current_vertex] = self.original_best_arc(current_vertex) + logger.debug("best in arc: %s --> %s", best_in_edge, current_vertex) + # b_graph = Union(b_graph, b) + for new_vertex in [current_vertex, best_in_edge]: + b_graph.nodes[new_vertex].update( + {"word": "TEMP", "rel": "NTOP", "address": new_vertex} + ) + b_graph.add_arc(best_in_edge, current_vertex) + # Beta(current node) = b - stored for parse recovery + # If b_graph contains a cycle, collapse it + cycle_path = b_graph.contains_cycle() + if cycle_path: + # Create a new node v_n+1 with address = len(nodes) + 1 + new_node = {"word": "NONE", "rel": "NTOP", "address": nr_vertices + 1} + # c_graph = Union(c_graph, v_n+1) + c_graph.add_node(new_node) + # Collapse all nodes in cycle C into v_n+1 + self.update_edge_scores(new_node, cycle_path) + self.collapse_nodes(new_node, cycle_path, g_graph, b_graph, c_graph) + for cycle_index in cycle_path: + c_graph.add_arc(new_node["address"], cycle_index) + # self.replaced_by[cycle_index] = new_node['address'] + + self.inner_nodes[new_node["address"]] = cycle_path + + # Add v_n+1 to list of unvisited vertices + unvisited_vertices.insert(0, nr_vertices + 1) + + # increment # of nodes counter + nr_vertices += 1 + + # Remove cycle nodes from b_graph; B = B - cycle c + for cycle_node_address in cycle_path: + b_graph.remove_by_address(cycle_node_address) + + logger.debug("g_graph: %s", g_graph) + logger.debug("b_graph: %s", b_graph) + logger.debug("c_graph: %s", c_graph) + logger.debug("Betas: %s", betas) + logger.debug("replaced nodes %s", self.inner_nodes) + + # Recover parse tree + logger.debug("Final scores: %s", self.scores) + + logger.debug("Recovering parse...") + for i in range(len(tokens) + 1, nr_vertices + 1): + betas[betas[i][1]] = betas[i] + + logger.debug("Betas: %s", betas) + for node in original_graph.nodes.values(): + # TODO: It's dangerous to assume that deps it a dictionary + # because it's a default dictionary. Ideally, here we should not + # be concerned how dependencies are stored inside of a dependency + # graph. + node["deps"] = {} + for i in range(1, len(tokens) + 1): + original_graph.add_arc(betas[i][0], betas[i][1]) + + logger.debug("Done.") + yield original_graph + + +################################################################# +# Rule-based Non-Projective Parser +################################################################# + + +class NonprojectiveDependencyParser: + """ + A non-projective, rule-based, dependency parser. This parser + will return the set of all possible non-projective parses based on + the word-to-word relations defined in the parser's dependency + grammar, and will allow the branches of the parse tree to cross + in order to capture a variety of linguistic phenomena that a + projective parser will not. + """ + + def __init__(self, dependency_grammar): + """ + Creates a new ``NonprojectiveDependencyParser``. + + :param dependency_grammar: a grammar of word-to-word relations. + :type dependency_grammar: DependencyGrammar + """ + self._grammar = dependency_grammar + + def parse(self, tokens): + """ + Parses the input tokens with respect to the parser's grammar. Parsing + is accomplished by representing the search-space of possible parses as + a fully-connected directed graph. Arcs that would lead to ungrammatical + parses are removed and a lattice is constructed of length n, where n is + the number of input tokens, to represent all possible grammatical + traversals. All possible paths through the lattice are then enumerated + to produce the set of non-projective parses. + + param tokens: A list of tokens to parse. + type tokens: list(str) + return: An iterator of non-projective parses. + rtype: iter(DependencyGraph) + """ + # Create graph representation of tokens + self._graph = DependencyGraph() + + for index, token in enumerate(tokens): + self._graph.nodes[index] = { + "word": token, + "deps": [], + "rel": "NTOP", + "address": index, + } + + for head_node in self._graph.nodes.values(): + deps = [] + for dep_node in self._graph.nodes.values(): + if ( + self._grammar.contains(head_node["word"], dep_node["word"]) + and head_node["word"] != dep_node["word"] + ): + deps.append(dep_node["address"]) + head_node["deps"] = deps + + # Create lattice of possible heads + roots = [] + possible_heads = [] + for i, word in enumerate(tokens): + heads = [] + for j, head in enumerate(tokens): + if (i != j) and self._grammar.contains(head, word): + heads.append(j) + if len(heads) == 0: + roots.append(i) + possible_heads.append(heads) + + # Set roots to attempt + if len(roots) < 2: + if len(roots) == 0: + for i in range(len(tokens)): + roots.append(i) + + # Traverse lattice + analyses = [] + for _ in roots: + stack = [] + analysis = [[] for i in range(len(possible_heads))] + i = 0 + forward = True + while i >= 0: + if forward: + if len(possible_heads[i]) == 1: + analysis[i] = possible_heads[i][0] + elif len(possible_heads[i]) == 0: + analysis[i] = -1 + else: + head = possible_heads[i].pop() + analysis[i] = head + stack.append([i, head]) + if not forward: + index_on_stack = False + for stack_item in stack: + if stack_item[0] == i: + index_on_stack = True + orig_length = len(possible_heads[i]) + + if index_on_stack and orig_length == 0: + for j in range(len(stack) - 1, -1, -1): + stack_item = stack[j] + if stack_item[0] == i: + possible_heads[i].append(stack.pop(j)[1]) + + elif index_on_stack and orig_length > 0: + head = possible_heads[i].pop() + analysis[i] = head + stack.append([i, head]) + forward = True + + if i + 1 == len(possible_heads): + analyses.append(analysis[:]) + forward = False + if forward: + i += 1 + else: + i -= 1 + + # Filter parses + # ensure 1 root, every thing has 1 head + for analysis in analyses: + if analysis.count(-1) > 1: + # there are several root elements! + continue + + graph = DependencyGraph() + graph.root = graph.nodes[analysis.index(-1) + 1] + + for address, (token, head_index) in enumerate( + zip(tokens, analysis), start=1 + ): + head_address = head_index + 1 + + node = graph.nodes[address] + node.update({"word": token, "address": address}) + + if head_address == 0: + rel = "ROOT" + else: + rel = "" + graph.nodes[head_index + 1]["deps"][rel].append(address) + + # TODO: check for cycles + yield graph + + +################################################################# +# Demos +################################################################# + + +def demo(): + # hall_demo() + nonprojective_conll_parse_demo() + rule_based_demo() + + +def hall_demo(): + npp = ProbabilisticNonprojectiveParser() + npp.train([], DemoScorer()) + for parse_graph in npp.parse(["v1", "v2", "v3"], [None, None, None]): + print(parse_graph) + + +def nonprojective_conll_parse_demo(): + from nltk.parse.dependencygraph import conll_data2 + + graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry] + npp = ProbabilisticNonprojectiveParser() + npp.train(graphs, NaiveBayesDependencyScorer()) + for parse_graph in npp.parse( + ["Cathy", "zag", "hen", "zwaaien", "."], ["N", "V", "Pron", "Adj", "N", "Punc"] + ): + print(parse_graph) + + +def rule_based_demo(): + from nltk.grammar import DependencyGrammar + + grammar = DependencyGrammar.fromstring( + """ + 'taught' -> 'play' | 'man' + 'man' -> 'the' | 'in' + 'in' -> 'corner' + 'corner' -> 'the' + 'play' -> 'golf' | 'dachshund' | 'to' + 'dachshund' -> 'his' + """ + ) + print(grammar) + ndp = NonprojectiveDependencyParser(grammar) + graphs = ndp.parse( + [ + "the", + "man", + "in", + "the", + "corner", + "taught", + "his", + "dachshund", + "to", + "play", + "golf", + ] + ) + print("Graphs:") + for graph in graphs: + print(graph) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/pchart.py b/env-llmeval/lib/python3.10/site-packages/nltk/parse/pchart.py new file mode 100644 index 0000000000000000000000000000000000000000..319655d023a462c0c6c7ac087746dc77d46b7949 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/parse/pchart.py @@ -0,0 +1,579 @@ +# Natural Language Toolkit: Probabilistic Chart Parsers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Classes and interfaces for associating probabilities with tree +structures that represent the internal organization of a text. The +probabilistic parser module defines ``BottomUpProbabilisticChartParser``. + +``BottomUpProbabilisticChartParser`` is an abstract class that implements +a bottom-up chart parser for ``PCFG`` grammars. It maintains a queue of edges, +and adds them to the chart one at a time. The ordering of this queue +is based on the probabilities associated with the edges, allowing the +parser to expand more likely edges before less likely ones. Each +subclass implements a different queue ordering, producing different +search strategies. Currently the following subclasses are defined: + + - ``InsideChartParser`` searches edges in decreasing order of + their trees' inside probabilities. + - ``RandomChartParser`` searches edges in random order. + - ``LongestChartParser`` searches edges in decreasing order of their + location's length. + +The ``BottomUpProbabilisticChartParser`` constructor has an optional +argument beam_size. If non-zero, this controls the size of the beam +(aka the edge queue). This option is most useful with InsideChartParser. +""" + +##////////////////////////////////////////////////////// +## Bottom-Up PCFG Chart Parser +##////////////////////////////////////////////////////// + +# [XX] This might not be implemented quite right -- it would be better +# to associate probabilities with child pointer lists. + +import random +from functools import reduce + +from nltk.grammar import PCFG, Nonterminal +from nltk.parse.api import ParserI +from nltk.parse.chart import AbstractChartRule, Chart, LeafEdge, TreeEdge +from nltk.tree import ProbabilisticTree, Tree + + +# Probabilistic edges +class ProbabilisticLeafEdge(LeafEdge): + def prob(self): + return 1.0 + + +class ProbabilisticTreeEdge(TreeEdge): + def __init__(self, prob, *args, **kwargs): + TreeEdge.__init__(self, *args, **kwargs) + self._prob = prob + # two edges with different probabilities are not equal. + self._comparison_key = (self._comparison_key, prob) + + def prob(self): + return self._prob + + @staticmethod + def from_production(production, index, p): + return ProbabilisticTreeEdge( + p, (index, index), production.lhs(), production.rhs(), 0 + ) + + +# Rules using probabilistic edges +class ProbabilisticBottomUpInitRule(AbstractChartRule): + NUM_EDGES = 0 + + def apply(self, chart, grammar): + for index in range(chart.num_leaves()): + new_edge = ProbabilisticLeafEdge(chart.leaf(index), index) + if chart.insert(new_edge, ()): + yield new_edge + + +class ProbabilisticBottomUpPredictRule(AbstractChartRule): + NUM_EDGES = 1 + + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + return + for prod in grammar.productions(): + if edge.lhs() == prod.rhs()[0]: + new_edge = ProbabilisticTreeEdge.from_production( + prod, edge.start(), prod.prob() + ) + if chart.insert(new_edge, ()): + yield new_edge + + +class ProbabilisticFundamentalRule(AbstractChartRule): + NUM_EDGES = 2 + + def apply(self, chart, grammar, left_edge, right_edge): + # Make sure the rule is applicable. + if not ( + left_edge.end() == right_edge.start() + and left_edge.nextsym() == right_edge.lhs() + and left_edge.is_incomplete() + and right_edge.is_complete() + ): + return + + # Construct the new edge. + p = left_edge.prob() * right_edge.prob() + new_edge = ProbabilisticTreeEdge( + p, + span=(left_edge.start(), right_edge.end()), + lhs=left_edge.lhs(), + rhs=left_edge.rhs(), + dot=left_edge.dot() + 1, + ) + + # Add it to the chart, with appropriate child pointers. + changed_chart = False + for cpl1 in chart.child_pointer_lists(left_edge): + if chart.insert(new_edge, cpl1 + (right_edge,)): + changed_chart = True + + # If we changed the chart, then generate the edge. + if changed_chart: + yield new_edge + + +class SingleEdgeProbabilisticFundamentalRule(AbstractChartRule): + NUM_EDGES = 1 + + _fundamental_rule = ProbabilisticFundamentalRule() + + def apply(self, chart, grammar, edge1): + fr = self._fundamental_rule + if edge1.is_incomplete(): + # edge1 = left_edge; edge2 = right_edge + for edge2 in chart.select( + start=edge1.end(), is_complete=True, lhs=edge1.nextsym() + ): + yield from fr.apply(chart, grammar, edge1, edge2) + else: + # edge2 = left_edge; edge1 = right_edge + for edge2 in chart.select( + end=edge1.start(), is_complete=False, nextsym=edge1.lhs() + ): + yield from fr.apply(chart, grammar, edge2, edge1) + + def __str__(self): + return "Fundamental Rule" + + +class BottomUpProbabilisticChartParser(ParserI): + """ + An abstract bottom-up parser for ``PCFG`` grammars that uses a ``Chart`` to + record partial results. ``BottomUpProbabilisticChartParser`` maintains + a queue of edges that can be added to the chart. This queue is + initialized with edges for each token in the text that is being + parsed. ``BottomUpProbabilisticChartParser`` inserts these edges into + the chart one at a time, starting with the most likely edges, and + proceeding to less likely edges. For each edge that is added to + the chart, it may become possible to insert additional edges into + the chart; these are added to the queue. This process continues + until enough complete parses have been generated, or until the + queue is empty. + + The sorting order for the queue is not specified by + ``BottomUpProbabilisticChartParser``. Different sorting orders will + result in different search strategies. The sorting order for the + queue is defined by the method ``sort_queue``; subclasses are required + to provide a definition for this method. + + :type _grammar: PCFG + :ivar _grammar: The grammar used to parse sentences. + :type _trace: int + :ivar _trace: The level of tracing output that should be generated + when parsing a text. + """ + + def __init__(self, grammar, beam_size=0, trace=0): + """ + Create a new ``BottomUpProbabilisticChartParser``, that uses + ``grammar`` to parse texts. + + :type grammar: PCFG + :param grammar: The grammar used to parse texts. + :type beam_size: int + :param beam_size: The maximum length for the parser's edge queue. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + """ + if not isinstance(grammar, PCFG): + raise ValueError("The grammar must be probabilistic PCFG") + self._grammar = grammar + self.beam_size = beam_size + self._trace = trace + + def grammar(self): + return self._grammar + + def trace(self, trace=2): + """ + Set the level of tracing output that should be generated when + parsing a text. + + :type trace: int + :param trace: The trace level. A trace level of ``0`` will + generate no tracing output; and higher trace levels will + produce more verbose tracing output. + :rtype: None + """ + self._trace = trace + + # TODO: change this to conform more with the standard ChartParser + def parse(self, tokens): + self._grammar.check_coverage(tokens) + chart = Chart(list(tokens)) + grammar = self._grammar + + # Chart parser rules. + bu_init = ProbabilisticBottomUpInitRule() + bu = ProbabilisticBottomUpPredictRule() + fr = SingleEdgeProbabilisticFundamentalRule() + + # Our queue + queue = [] + + # Initialize the chart. + for edge in bu_init.apply(chart, grammar): + if self._trace > 1: + print( + " %-50s [%s]" + % (chart.pretty_format_edge(edge, width=2), edge.prob()) + ) + queue.append(edge) + + while len(queue) > 0: + # Re-sort the queue. + self.sort_queue(queue, chart) + + # Prune the queue to the correct size if a beam was defined + if self.beam_size: + self._prune(queue, chart) + + # Get the best edge. + edge = queue.pop() + if self._trace > 0: + print( + " %-50s [%s]" + % (chart.pretty_format_edge(edge, width=2), edge.prob()) + ) + + # Apply BU & FR to it. + queue.extend(bu.apply(chart, grammar, edge)) + queue.extend(fr.apply(chart, grammar, edge)) + + # Get a list of complete parses. + parses = list(chart.parses(grammar.start(), ProbabilisticTree)) + + # Assign probabilities to the trees. + prod_probs = {} + for prod in grammar.productions(): + prod_probs[prod.lhs(), prod.rhs()] = prod.prob() + for parse in parses: + self._setprob(parse, prod_probs) + + # Sort by probability + parses.sort(reverse=True, key=lambda tree: tree.prob()) + + return iter(parses) + + def _setprob(self, tree, prod_probs): + if tree.prob() is not None: + return + + # Get the prob of the CFG production. + lhs = Nonterminal(tree.label()) + rhs = [] + for child in tree: + if isinstance(child, Tree): + rhs.append(Nonterminal(child.label())) + else: + rhs.append(child) + prob = prod_probs[lhs, tuple(rhs)] + + # Get the probs of children. + for child in tree: + if isinstance(child, Tree): + self._setprob(child, prod_probs) + prob *= child.prob() + + tree.set_prob(prob) + + def sort_queue(self, queue, chart): + """ + Sort the given queue of ``Edge`` objects, placing the edge that should + be tried first at the beginning of the queue. This method + will be called after each ``Edge`` is added to the queue. + + :param queue: The queue of ``Edge`` objects to sort. Each edge in + this queue is an edge that could be added to the chart by + the fundamental rule; but that has not yet been added. + :type queue: list(Edge) + :param chart: The chart being used to parse the text. This + chart can be used to provide extra information for sorting + the queue. + :type chart: Chart + :rtype: None + """ + raise NotImplementedError() + + def _prune(self, queue, chart): + """Discard items in the queue if the queue is longer than the beam.""" + if len(queue) > self.beam_size: + split = len(queue) - self.beam_size + if self._trace > 2: + for edge in queue[:split]: + print(" %-50s [DISCARDED]" % chart.pretty_format_edge(edge, 2)) + del queue[:split] + + +class InsideChartParser(BottomUpProbabilisticChartParser): + """ + A bottom-up parser for ``PCFG`` grammars that tries edges in descending + order of the inside probabilities of their trees. The "inside + probability" of a tree is simply the + probability of the entire tree, ignoring its context. In + particular, the inside probability of a tree generated by + production *p* with children *c[1], c[2], ..., c[n]* is + *P(p)P(c[1])P(c[2])...P(c[n])*; and the inside + probability of a token is 1 if it is present in the text, and 0 if + it is absent. + + This sorting order results in a type of lowest-cost-first search + strategy. + """ + + # Inherit constructor. + def sort_queue(self, queue, chart): + """ + Sort the given queue of edges, in descending order of the + inside probabilities of the edges' trees. + + :param queue: The queue of ``Edge`` objects to sort. Each edge in + this queue is an edge that could be added to the chart by + the fundamental rule; but that has not yet been added. + :type queue: list(Edge) + :param chart: The chart being used to parse the text. This + chart can be used to provide extra information for sorting + the queue. + :type chart: Chart + :rtype: None + """ + queue.sort(key=lambda edge: edge.prob()) + + +# Eventually, this will become some sort of inside-outside parser: +# class InsideOutsideParser(BottomUpProbabilisticChartParser): +# def __init__(self, grammar, trace=0): +# # Inherit docs. +# BottomUpProbabilisticChartParser.__init__(self, grammar, trace) +# +# # Find the best path from S to each nonterminal +# bestp = {} +# for production in grammar.productions(): bestp[production.lhs()]=0 +# bestp[grammar.start()] = 1.0 +# +# for i in range(len(grammar.productions())): +# for production in grammar.productions(): +# lhs = production.lhs() +# for elt in production.rhs(): +# bestp[elt] = max(bestp[lhs]*production.prob(), +# bestp.get(elt,0)) +# +# self._bestp = bestp +# for (k,v) in self._bestp.items(): print(k,v) +# +# def _sortkey(self, edge): +# return edge.structure()[PROB] * self._bestp[edge.lhs()] +# +# def sort_queue(self, queue, chart): +# queue.sort(key=self._sortkey) + + +class RandomChartParser(BottomUpProbabilisticChartParser): + """ + A bottom-up parser for ``PCFG`` grammars that tries edges in random order. + This sorting order results in a random search strategy. + """ + + # Inherit constructor + def sort_queue(self, queue, chart): + i = random.randint(0, len(queue) - 1) + (queue[-1], queue[i]) = (queue[i], queue[-1]) + + +class UnsortedChartParser(BottomUpProbabilisticChartParser): + """ + A bottom-up parser for ``PCFG`` grammars that tries edges in whatever order. + """ + + # Inherit constructor + def sort_queue(self, queue, chart): + return + + +class LongestChartParser(BottomUpProbabilisticChartParser): + """ + A bottom-up parser for ``PCFG`` grammars that tries longer edges before + shorter ones. This sorting order results in a type of best-first + search strategy. + """ + + # Inherit constructor + def sort_queue(self, queue, chart): + queue.sort(key=lambda edge: edge.length()) + + +##////////////////////////////////////////////////////// +## Test Code +##////////////////////////////////////////////////////// + + +def demo(choice=None, draw_parses=None, print_parses=None): + """ + A demonstration of the probabilistic parsers. The user is + prompted to select which demo to run, and how many parses should + be found; and then each parser is run on the same demo, and a + summary of the results are displayed. + """ + import sys + import time + + from nltk import tokenize + from nltk.parse import pchart + + # Define two demos. Each demo has a sentence and a grammar. + toy_pcfg1 = PCFG.fromstring( + """ + S -> NP VP [1.0] + NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15] + Det -> 'the' [0.8] | 'my' [0.2] + N -> 'man' [0.5] | 'telescope' [0.5] + VP -> VP PP [0.1] | V NP [0.7] | V [0.2] + V -> 'ate' [0.35] | 'saw' [0.65] + PP -> P NP [1.0] + P -> 'with' [0.61] | 'under' [0.39] + """ + ) + + toy_pcfg2 = PCFG.fromstring( + """ + S -> NP VP [1.0] + VP -> V NP [.59] + VP -> V [.40] + VP -> VP PP [.01] + NP -> Det N [.41] + NP -> Name [.28] + NP -> NP PP [.31] + PP -> P NP [1.0] + V -> 'saw' [.21] + V -> 'ate' [.51] + V -> 'ran' [.28] + N -> 'boy' [.11] + N -> 'cookie' [.12] + N -> 'table' [.13] + N -> 'telescope' [.14] + N -> 'hill' [.5] + Name -> 'Jack' [.52] + Name -> 'Bob' [.48] + P -> 'with' [.61] + P -> 'under' [.39] + Det -> 'the' [.41] + Det -> 'a' [.31] + Det -> 'my' [.28] + """ + ) + + demos = [ + ("I saw John with my telescope", toy_pcfg1), + ("the boy saw Jack with Bob under the table with a telescope", toy_pcfg2), + ] + + if choice is None: + # Ask the user which demo they want to use. + print() + for i in range(len(demos)): + print(f"{i + 1:>3}: {demos[i][0]}") + print(" %r" % demos[i][1]) + print() + print("Which demo (%d-%d)? " % (1, len(demos)), end=" ") + choice = int(sys.stdin.readline().strip()) - 1 + try: + sent, grammar = demos[choice] + except: + print("Bad sentence number") + return + + # Tokenize the sentence. + tokens = sent.split() + + # Define a list of parsers. We'll use all parsers. + parsers = [ + pchart.InsideChartParser(grammar), + pchart.RandomChartParser(grammar), + pchart.UnsortedChartParser(grammar), + pchart.LongestChartParser(grammar), + pchart.InsideChartParser(grammar, beam_size=len(tokens) + 1), # was BeamParser + ] + + # Run the parsers on the tokenized sentence. + times = [] + average_p = [] + num_parses = [] + all_parses = {} + for parser in parsers: + print(f"\ns: {sent}\nparser: {parser}\ngrammar: {grammar}") + parser.trace(3) + t = time.time() + parses = list(parser.parse(tokens)) + times.append(time.time() - t) + p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) if parses else 0 + average_p.append(p) + num_parses.append(len(parses)) + for p in parses: + all_parses[p.freeze()] = 1 + + # Print some summary statistics + print() + print(" Parser Beam | Time (secs) # Parses Average P(parse)") + print("------------------------+------------------------------------------") + for i in range(len(parsers)): + print( + "%18s %4d |%11.4f%11d%19.14f" + % ( + parsers[i].__class__.__name__, + parsers[i].beam_size, + times[i], + num_parses[i], + average_p[i], + ) + ) + parses = all_parses.keys() + if parses: + p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) + else: + p = 0 + print("------------------------+------------------------------------------") + print("%18s |%11s%11d%19.14f" % ("(All Parses)", "n/a", len(parses), p)) + + if draw_parses is None: + # Ask the user if we should draw the parses. + print() + print("Draw parses (y/n)? ", end=" ") + draw_parses = sys.stdin.readline().strip().lower().startswith("y") + if draw_parses: + from nltk.draw.tree import draw_trees + + print(" please wait...") + draw_trees(*parses) + + if print_parses is None: + # Ask the user if we should print the parses. + print() + print("Print parses (y/n)? ", end=" ") + print_parses = sys.stdin.readline().strip().lower().startswith("y") + if print_parses: + for parse in parses: + print(parse) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/projectivedependencyparser.py b/env-llmeval/lib/python3.10/site-packages/nltk/parse/projectivedependencyparser.py new file mode 100644 index 0000000000000000000000000000000000000000..9e4e3ba4d6d8e19820de6d527d5847e365e018d7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/parse/projectivedependencyparser.py @@ -0,0 +1,716 @@ +# Natural Language Toolkit: Dependency Grammars +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Jason Narad +# +# URL: +# For license information, see LICENSE.TXT +# + +from collections import defaultdict +from functools import total_ordering +from itertools import chain + +from nltk.grammar import ( + DependencyGrammar, + DependencyProduction, + ProbabilisticDependencyGrammar, +) +from nltk.internals import raise_unorderable_types +from nltk.parse.dependencygraph import DependencyGraph + +################################################################# +# Dependency Span +################################################################# + + +@total_ordering +class DependencySpan: + """ + A contiguous span over some part of the input string representing + dependency (head -> modifier) relationships amongst words. An atomic + span corresponds to only one word so it isn't a 'span' in the conventional + sense, as its _start_index = _end_index = _head_index for concatenation + purposes. All other spans are assumed to have arcs between all nodes + within the start and end indexes of the span, and one head index corresponding + to the head word for the entire span. This is the same as the root node if + the dependency structure were depicted as a graph. + """ + + def __init__(self, start_index, end_index, head_index, arcs, tags): + self._start_index = start_index + self._end_index = end_index + self._head_index = head_index + self._arcs = arcs + self._tags = tags + self._comparison_key = (start_index, end_index, head_index, tuple(arcs)) + self._hash = hash(self._comparison_key) + + def head_index(self): + """ + :return: An value indexing the head of the entire ``DependencySpan``. + :rtype: int + """ + return self._head_index + + def __repr__(self): + """ + :return: A concise string representatino of the ``DependencySpan``. + :rtype: str. + """ + return "Span %d-%d; Head Index: %d" % ( + self._start_index, + self._end_index, + self._head_index, + ) + + def __str__(self): + """ + :return: A verbose string representation of the ``DependencySpan``. + :rtype: str + """ + str = "Span %d-%d; Head Index: %d" % ( + self._start_index, + self._end_index, + self._head_index, + ) + for i in range(len(self._arcs)): + str += "\n%d <- %d, %s" % (i, self._arcs[i], self._tags[i]) + return str + + def __eq__(self, other): + return ( + type(self) == type(other) and self._comparison_key == other._comparison_key + ) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, DependencySpan): + raise_unorderable_types("<", self, other) + return self._comparison_key < other._comparison_key + + def __hash__(self): + """ + :return: The hash value of this ``DependencySpan``. + """ + return self._hash + + +################################################################# +# Chart Cell +################################################################# + + +class ChartCell: + """ + A cell from the parse chart formed when performing the CYK algorithm. + Each cell keeps track of its x and y coordinates (though this will probably + be discarded), and a list of spans serving as the cell's entries. + """ + + def __init__(self, x, y): + """ + :param x: This cell's x coordinate. + :type x: int. + :param y: This cell's y coordinate. + :type y: int. + """ + self._x = x + self._y = y + self._entries = set() + + def add(self, span): + """ + Appends the given span to the list of spans + representing the chart cell's entries. + + :param span: The span to add. + :type span: DependencySpan + """ + self._entries.add(span) + + def __str__(self): + """ + :return: A verbose string representation of this ``ChartCell``. + :rtype: str. + """ + return "CC[%d,%d]: %s" % (self._x, self._y, self._entries) + + def __repr__(self): + """ + :return: A concise string representation of this ``ChartCell``. + :rtype: str. + """ + return "%s" % self + + +################################################################# +# Parsing with Dependency Grammars +################################################################# + + +class ProjectiveDependencyParser: + """ + A projective, rule-based, dependency parser. A ProjectiveDependencyParser + is created with a DependencyGrammar, a set of productions specifying + word-to-word dependency relations. The parse() method will then + return the set of all parses, in tree representation, for a given input + sequence of tokens. Each parse must meet the requirements of the both + the grammar and the projectivity constraint which specifies that the + branches of the dependency tree are not allowed to cross. Alternatively, + this can be understood as stating that each parent node and its children + in the parse tree form a continuous substring of the input sequence. + """ + + def __init__(self, dependency_grammar): + """ + Create a new ProjectiveDependencyParser, from a word-to-word + dependency grammar ``DependencyGrammar``. + + :param dependency_grammar: A word-to-word relation dependencygrammar. + :type dependency_grammar: DependencyGrammar + """ + self._grammar = dependency_grammar + + def parse(self, tokens): + """ + Performs a projective dependency parse on the list of tokens using + a chart-based, span-concatenation algorithm similar to Eisner (1996). + + :param tokens: The list of input tokens. + :type tokens: list(str) + :return: An iterator over parse trees. + :rtype: iter(Tree) + """ + self._tokens = list(tokens) + chart = [] + for i in range(0, len(self._tokens) + 1): + chart.append([]) + for j in range(0, len(self._tokens) + 1): + chart[i].append(ChartCell(i, j)) + if i == j + 1: + chart[i][j].add(DependencySpan(i - 1, i, i - 1, [-1], ["null"])) + + for i in range(1, len(self._tokens) + 1): + for j in range(i - 2, -1, -1): + for k in range(i - 1, j, -1): + for span1 in chart[k][j]._entries: + for span2 in chart[i][k]._entries: + for newspan in self.concatenate(span1, span2): + chart[i][j].add(newspan) + + for parse in chart[len(self._tokens)][0]._entries: + conll_format = "" + # malt_format = "" + for i in range(len(tokens)): + # malt_format += '%s\t%s\t%d\t%s\n' % (tokens[i], 'null', parse._arcs[i] + 1, 'null') + # conll_format += '\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n' % (i+1, tokens[i], tokens[i], 'null', 'null', 'null', parse._arcs[i] + 1, 'null', '-', '-') + # Modify to comply with the new Dependency Graph requirement (at least must have an root elements) + conll_format += "\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n" % ( + i + 1, + tokens[i], + tokens[i], + "null", + "null", + "null", + parse._arcs[i] + 1, + "ROOT", + "-", + "-", + ) + dg = DependencyGraph(conll_format) + # if self.meets_arity(dg): + yield dg.tree() + + def concatenate(self, span1, span2): + """ + Concatenates the two spans in whichever way possible. This + includes rightward concatenation (from the leftmost word of the + leftmost span to the rightmost word of the rightmost span) and + leftward concatenation (vice-versa) between adjacent spans. Unlike + Eisner's presentation of span concatenation, these spans do not + share or pivot on a particular word/word-index. + + :return: A list of new spans formed through concatenation. + :rtype: list(DependencySpan) + """ + spans = [] + if span1._start_index == span2._start_index: + print("Error: Mismatched spans - replace this with thrown error") + if span1._start_index > span2._start_index: + temp_span = span1 + span1 = span2 + span2 = temp_span + # adjacent rightward covered concatenation + new_arcs = span1._arcs + span2._arcs + new_tags = span1._tags + span2._tags + if self._grammar.contains( + self._tokens[span1._head_index], self._tokens[span2._head_index] + ): + # print('Performing rightward cover %d to %d' % (span1._head_index, span2._head_index)) + new_arcs[span2._head_index - span1._start_index] = span1._head_index + spans.append( + DependencySpan( + span1._start_index, + span2._end_index, + span1._head_index, + new_arcs, + new_tags, + ) + ) + # adjacent leftward covered concatenation + new_arcs = span1._arcs + span2._arcs + if self._grammar.contains( + self._tokens[span2._head_index], self._tokens[span1._head_index] + ): + # print('performing leftward cover %d to %d' % (span2._head_index, span1._head_index)) + new_arcs[span1._head_index - span1._start_index] = span2._head_index + spans.append( + DependencySpan( + span1._start_index, + span2._end_index, + span2._head_index, + new_arcs, + new_tags, + ) + ) + return spans + + +################################################################# +# Parsing with Probabilistic Dependency Grammars +################################################################# + + +class ProbabilisticProjectiveDependencyParser: + """A probabilistic, projective dependency parser. + + This parser returns the most probable projective parse derived from the + probabilistic dependency grammar derived from the train() method. The + probabilistic model is an implementation of Eisner's (1996) Model C, which + conditions on head-word, head-tag, child-word, and child-tag. The decoding + uses a bottom-up chart-based span concatenation algorithm that's identical + to the one utilized by the rule-based projective parser. + + Usage example + + >>> from nltk.parse.dependencygraph import conll_data2 + + >>> graphs = [ + ... DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry + ... ] + + >>> ppdp = ProbabilisticProjectiveDependencyParser() + >>> ppdp.train(graphs) + + >>> sent = ['Cathy', 'zag', 'hen', 'wild', 'zwaaien', '.'] + >>> list(ppdp.parse(sent)) + [Tree('zag', ['Cathy', 'hen', Tree('zwaaien', ['wild', '.'])])] + + """ + + def __init__(self): + """ + Create a new probabilistic dependency parser. No additional + operations are necessary. + """ + + def parse(self, tokens): + """ + Parses the list of tokens subject to the projectivity constraint + and the productions in the parser's grammar. This uses a method + similar to the span-concatenation algorithm defined in Eisner (1996). + It returns the most probable parse derived from the parser's + probabilistic dependency grammar. + """ + self._tokens = list(tokens) + chart = [] + for i in range(0, len(self._tokens) + 1): + chart.append([]) + for j in range(0, len(self._tokens) + 1): + chart[i].append(ChartCell(i, j)) + if i == j + 1: + if tokens[i - 1] in self._grammar._tags: + for tag in self._grammar._tags[tokens[i - 1]]: + chart[i][j].add( + DependencySpan(i - 1, i, i - 1, [-1], [tag]) + ) + else: + print( + "No tag found for input token '%s', parse is impossible." + % tokens[i - 1] + ) + return [] + for i in range(1, len(self._tokens) + 1): + for j in range(i - 2, -1, -1): + for k in range(i - 1, j, -1): + for span1 in chart[k][j]._entries: + for span2 in chart[i][k]._entries: + for newspan in self.concatenate(span1, span2): + chart[i][j].add(newspan) + trees = [] + max_parse = None + max_score = 0 + for parse in chart[len(self._tokens)][0]._entries: + conll_format = "" + malt_format = "" + for i in range(len(tokens)): + malt_format += "%s\t%s\t%d\t%s\n" % ( + tokens[i], + "null", + parse._arcs[i] + 1, + "null", + ) + # conll_format += '\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n' % (i+1, tokens[i], tokens[i], parse._tags[i], parse._tags[i], 'null', parse._arcs[i] + 1, 'null', '-', '-') + # Modify to comply with recent change in dependency graph such that there must be a ROOT element. + conll_format += "\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n" % ( + i + 1, + tokens[i], + tokens[i], + parse._tags[i], + parse._tags[i], + "null", + parse._arcs[i] + 1, + "ROOT", + "-", + "-", + ) + dg = DependencyGraph(conll_format) + score = self.compute_prob(dg) + trees.append((score, dg.tree())) + trees.sort() + return (tree for (score, tree) in trees) + + def concatenate(self, span1, span2): + """ + Concatenates the two spans in whichever way possible. This + includes rightward concatenation (from the leftmost word of the + leftmost span to the rightmost word of the rightmost span) and + leftward concatenation (vice-versa) between adjacent spans. Unlike + Eisner's presentation of span concatenation, these spans do not + share or pivot on a particular word/word-index. + + :return: A list of new spans formed through concatenation. + :rtype: list(DependencySpan) + """ + spans = [] + if span1._start_index == span2._start_index: + print("Error: Mismatched spans - replace this with thrown error") + if span1._start_index > span2._start_index: + temp_span = span1 + span1 = span2 + span2 = temp_span + # adjacent rightward covered concatenation + new_arcs = span1._arcs + span2._arcs + new_tags = span1._tags + span2._tags + if self._grammar.contains( + self._tokens[span1._head_index], self._tokens[span2._head_index] + ): + new_arcs[span2._head_index - span1._start_index] = span1._head_index + spans.append( + DependencySpan( + span1._start_index, + span2._end_index, + span1._head_index, + new_arcs, + new_tags, + ) + ) + # adjacent leftward covered concatenation + new_arcs = span1._arcs + span2._arcs + new_tags = span1._tags + span2._tags + if self._grammar.contains( + self._tokens[span2._head_index], self._tokens[span1._head_index] + ): + new_arcs[span1._head_index - span1._start_index] = span2._head_index + spans.append( + DependencySpan( + span1._start_index, + span2._end_index, + span2._head_index, + new_arcs, + new_tags, + ) + ) + return spans + + def train(self, graphs): + """ + Trains a ProbabilisticDependencyGrammar based on the list of input + DependencyGraphs. This model is an implementation of Eisner's (1996) + Model C, which derives its statistics from head-word, head-tag, + child-word, and child-tag relationships. + + :param graphs: A list of dependency graphs to train from. + :type: list(DependencyGraph) + """ + productions = [] + events = defaultdict(int) + tags = {} + for dg in graphs: + for node_index in range(1, len(dg.nodes)): + # children = dg.nodes[node_index]['deps'] + children = list( + chain.from_iterable(dg.nodes[node_index]["deps"].values()) + ) + + nr_left_children = dg.left_children(node_index) + nr_right_children = dg.right_children(node_index) + nr_children = nr_left_children + nr_right_children + for child_index in range( + 0 - (nr_left_children + 1), nr_right_children + 2 + ): + head_word = dg.nodes[node_index]["word"] + head_tag = dg.nodes[node_index]["tag"] + if head_word in tags: + tags[head_word].add(head_tag) + else: + tags[head_word] = {head_tag} + child = "STOP" + child_tag = "STOP" + prev_word = "START" + prev_tag = "START" + if child_index < 0: + array_index = child_index + nr_left_children + if array_index >= 0: + child = dg.nodes[children[array_index]]["word"] + child_tag = dg.nodes[children[array_index]]["tag"] + if child_index != -1: + prev_word = dg.nodes[children[array_index + 1]]["word"] + prev_tag = dg.nodes[children[array_index + 1]]["tag"] + if child != "STOP": + productions.append(DependencyProduction(head_word, [child])) + head_event = "(head ({} {}) (mods ({}, {}, {}) left))".format( + child, + child_tag, + prev_tag, + head_word, + head_tag, + ) + mod_event = "(mods ({}, {}, {}) left))".format( + prev_tag, + head_word, + head_tag, + ) + events[head_event] += 1 + events[mod_event] += 1 + elif child_index > 0: + array_index = child_index + nr_left_children - 1 + if array_index < nr_children: + child = dg.nodes[children[array_index]]["word"] + child_tag = dg.nodes[children[array_index]]["tag"] + if child_index != 1: + prev_word = dg.nodes[children[array_index - 1]]["word"] + prev_tag = dg.nodes[children[array_index - 1]]["tag"] + if child != "STOP": + productions.append(DependencyProduction(head_word, [child])) + head_event = "(head ({} {}) (mods ({}, {}, {}) right))".format( + child, + child_tag, + prev_tag, + head_word, + head_tag, + ) + mod_event = "(mods ({}, {}, {}) right))".format( + prev_tag, + head_word, + head_tag, + ) + events[head_event] += 1 + events[mod_event] += 1 + self._grammar = ProbabilisticDependencyGrammar(productions, events, tags) + + def compute_prob(self, dg): + """ + Computes the probability of a dependency graph based + on the parser's probability model (defined by the parser's + statistical dependency grammar). + + :param dg: A dependency graph to score. + :type dg: DependencyGraph + :return: The probability of the dependency graph. + :rtype: int + """ + prob = 1.0 + for node_index in range(1, len(dg.nodes)): + # children = dg.nodes[node_index]['deps'] + children = list(chain.from_iterable(dg.nodes[node_index]["deps"].values())) + + nr_left_children = dg.left_children(node_index) + nr_right_children = dg.right_children(node_index) + nr_children = nr_left_children + nr_right_children + for child_index in range(0 - (nr_left_children + 1), nr_right_children + 2): + head_word = dg.nodes[node_index]["word"] + head_tag = dg.nodes[node_index]["tag"] + child = "STOP" + child_tag = "STOP" + prev_word = "START" + prev_tag = "START" + if child_index < 0: + array_index = child_index + nr_left_children + if array_index >= 0: + child = dg.nodes[children[array_index]]["word"] + child_tag = dg.nodes[children[array_index]]["tag"] + if child_index != -1: + prev_word = dg.nodes[children[array_index + 1]]["word"] + prev_tag = dg.nodes[children[array_index + 1]]["tag"] + head_event = "(head ({} {}) (mods ({}, {}, {}) left))".format( + child, + child_tag, + prev_tag, + head_word, + head_tag, + ) + mod_event = "(mods ({}, {}, {}) left))".format( + prev_tag, + head_word, + head_tag, + ) + h_count = self._grammar._events[head_event] + m_count = self._grammar._events[mod_event] + + # If the grammar is not covered + if m_count != 0: + prob *= h_count / m_count + else: + prob = 0.00000001 # Very small number + + elif child_index > 0: + array_index = child_index + nr_left_children - 1 + if array_index < nr_children: + child = dg.nodes[children[array_index]]["word"] + child_tag = dg.nodes[children[array_index]]["tag"] + if child_index != 1: + prev_word = dg.nodes[children[array_index - 1]]["word"] + prev_tag = dg.nodes[children[array_index - 1]]["tag"] + head_event = "(head ({} {}) (mods ({}, {}, {}) right))".format( + child, + child_tag, + prev_tag, + head_word, + head_tag, + ) + mod_event = "(mods ({}, {}, {}) right))".format( + prev_tag, + head_word, + head_tag, + ) + h_count = self._grammar._events[head_event] + m_count = self._grammar._events[mod_event] + + if m_count != 0: + prob *= h_count / m_count + else: + prob = 0.00000001 # Very small number + + return prob + + +################################################################# +# Demos +################################################################# + + +def demo(): + projective_rule_parse_demo() + # arity_parse_demo() + projective_prob_parse_demo() + + +def projective_rule_parse_demo(): + """ + A demonstration showing the creation and use of a + ``DependencyGrammar`` to perform a projective dependency + parse. + """ + grammar = DependencyGrammar.fromstring( + """ + 'scratch' -> 'cats' | 'walls' + 'walls' -> 'the' + 'cats' -> 'the' + """ + ) + print(grammar) + pdp = ProjectiveDependencyParser(grammar) + trees = pdp.parse(["the", "cats", "scratch", "the", "walls"]) + for tree in trees: + print(tree) + + +def arity_parse_demo(): + """ + A demonstration showing the creation of a ``DependencyGrammar`` + in which a specific number of modifiers is listed for a given + head. This can further constrain the number of possible parses + created by a ``ProjectiveDependencyParser``. + """ + print() + print("A grammar with no arity constraints. Each DependencyProduction") + print("specifies a relationship between one head word and only one") + print("modifier word.") + grammar = DependencyGrammar.fromstring( + """ + 'fell' -> 'price' | 'stock' + 'price' -> 'of' | 'the' + 'of' -> 'stock' + 'stock' -> 'the' + """ + ) + print(grammar) + + print() + print("For the sentence 'The price of the stock fell', this grammar") + print("will produce the following three parses:") + pdp = ProjectiveDependencyParser(grammar) + trees = pdp.parse(["the", "price", "of", "the", "stock", "fell"]) + for tree in trees: + print(tree) + + print() + print("By contrast, the following grammar contains a ") + print("DependencyProduction that specifies a relationship") + print("between a single head word, 'price', and two modifier") + print("words, 'of' and 'the'.") + grammar = DependencyGrammar.fromstring( + """ + 'fell' -> 'price' | 'stock' + 'price' -> 'of' 'the' + 'of' -> 'stock' + 'stock' -> 'the' + """ + ) + print(grammar) + + print() + print( + "This constrains the number of possible parses to just one:" + ) # unimplemented, soon to replace + pdp = ProjectiveDependencyParser(grammar) + trees = pdp.parse(["the", "price", "of", "the", "stock", "fell"]) + for tree in trees: + print(tree) + + +def projective_prob_parse_demo(): + """ + A demo showing the training and use of a projective + dependency parser. + """ + from nltk.parse.dependencygraph import conll_data2 + + graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry] + ppdp = ProbabilisticProjectiveDependencyParser() + print("Training Probabilistic Projective Dependency Parser...") + ppdp.train(graphs) + + sent = ["Cathy", "zag", "hen", "wild", "zwaaien", "."] + print("Parsing '", " ".join(sent), "'...") + print("Parse:") + for tree in ppdp.parse(sent): + print(tree) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/recursivedescent.py b/env-llmeval/lib/python3.10/site-packages/nltk/parse/recursivedescent.py new file mode 100644 index 0000000000000000000000000000000000000000..dc5d88c0884d8da7fdc52b044331ff0536bc19c4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/parse/recursivedescent.py @@ -0,0 +1,684 @@ +# Natural Language Toolkit: Recursive Descent Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from nltk.grammar import Nonterminal +from nltk.parse.api import ParserI +from nltk.tree import ImmutableTree, Tree + + +##////////////////////////////////////////////////////// +## Recursive Descent Parser +##////////////////////////////////////////////////////// +class RecursiveDescentParser(ParserI): + """ + A simple top-down CFG parser that parses texts by recursively + expanding the fringe of a Tree, and matching it against a + text. + + ``RecursiveDescentParser`` uses a list of tree locations called a + "frontier" to remember which subtrees have not yet been expanded + and which leaves have not yet been matched against the text. Each + tree location consists of a list of child indices specifying the + path from the root of the tree to a subtree or a leaf; see the + reference documentation for Tree for more information + about tree locations. + + When the parser begins parsing a text, it constructs a tree + containing only the start symbol, and a frontier containing the + location of the tree's root node. It then extends the tree to + cover the text, using the following recursive procedure: + + - If the frontier is empty, and the text is covered by the tree, + then return the tree as a possible parse. + - If the frontier is empty, and the text is not covered by the + tree, then return no parses. + - If the first element of the frontier is a subtree, then + use CFG productions to "expand" it. For each applicable + production, add the expanded subtree's children to the + frontier, and recursively find all parses that can be + generated by the new tree and frontier. + - If the first element of the frontier is a token, then "match" + it against the next token from the text. Remove the token + from the frontier, and recursively find all parses that can be + generated by the new tree and frontier. + + :see: ``nltk.grammar`` + """ + + def __init__(self, grammar, trace=0): + """ + Create a new ``RecursiveDescentParser``, that uses ``grammar`` + to parse texts. + + :type grammar: CFG + :param grammar: The grammar used to parse texts. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + """ + self._grammar = grammar + self._trace = trace + + def grammar(self): + return self._grammar + + def parse(self, tokens): + # Inherit docs from ParserI + + tokens = list(tokens) + self._grammar.check_coverage(tokens) + + # Start a recursive descent parse, with an initial tree + # containing just the start symbol. + start = self._grammar.start().symbol() + initial_tree = Tree(start, []) + frontier = [()] + if self._trace: + self._trace_start(initial_tree, frontier, tokens) + return self._parse(tokens, initial_tree, frontier) + + def _parse(self, remaining_text, tree, frontier): + """ + Recursively expand and match each elements of ``tree`` + specified by ``frontier``, to cover ``remaining_text``. Return + a list of all parses found. + + :return: An iterator of all parses that can be generated by + matching and expanding the elements of ``tree`` + specified by ``frontier``. + :rtype: iter(Tree) + :type tree: Tree + :param tree: A partial structure for the text that is + currently being parsed. The elements of ``tree`` + that are specified by ``frontier`` have not yet been + expanded or matched. + :type remaining_text: list(str) + :param remaining_text: The portion of the text that is not yet + covered by ``tree``. + :type frontier: list(tuple(int)) + :param frontier: A list of the locations within ``tree`` of + all subtrees that have not yet been expanded, and all + leaves that have not yet been matched. This list sorted + in left-to-right order of location within the tree. + """ + + # If the tree covers the text, and there's nothing left to + # expand, then we've found a complete parse; return it. + if len(remaining_text) == 0 and len(frontier) == 0: + if self._trace: + self._trace_succeed(tree, frontier) + yield tree + + # If there's still text, but nothing left to expand, we failed. + elif len(frontier) == 0: + if self._trace: + self._trace_backtrack(tree, frontier) + + # If the next element on the frontier is a tree, expand it. + elif isinstance(tree[frontier[0]], Tree): + yield from self._expand(remaining_text, tree, frontier) + + # If the next element on the frontier is a token, match it. + else: + yield from self._match(remaining_text, tree, frontier) + + def _match(self, rtext, tree, frontier): + """ + :rtype: iter(Tree) + :return: an iterator of all parses that can be generated by + matching the first element of ``frontier`` against the + first token in ``rtext``. In particular, if the first + element of ``frontier`` has the same type as the first + token in ``rtext``, then substitute the token into + ``tree``; and return all parses that can be generated by + matching and expanding the remaining elements of + ``frontier``. If the first element of ``frontier`` does not + have the same type as the first token in ``rtext``, then + return empty list. + + :type tree: Tree + :param tree: A partial structure for the text that is + currently being parsed. The elements of ``tree`` + that are specified by ``frontier`` have not yet been + expanded or matched. + :type rtext: list(str) + :param rtext: The portion of the text that is not yet + covered by ``tree``. + :type frontier: list of tuple of int + :param frontier: A list of the locations within ``tree`` of + all subtrees that have not yet been expanded, and all + leaves that have not yet been matched. + """ + + tree_leaf = tree[frontier[0]] + if len(rtext) > 0 and tree_leaf == rtext[0]: + # If it's a terminal that matches rtext[0], then substitute + # in the token, and continue parsing. + newtree = tree.copy(deep=True) + newtree[frontier[0]] = rtext[0] + if self._trace: + self._trace_match(newtree, frontier[1:], rtext[0]) + yield from self._parse(rtext[1:], newtree, frontier[1:]) + else: + # If it's a non-matching terminal, fail. + if self._trace: + self._trace_backtrack(tree, frontier, rtext[:1]) + + def _expand(self, remaining_text, tree, frontier, production=None): + """ + :rtype: iter(Tree) + :return: An iterator of all parses that can be generated by + expanding the first element of ``frontier`` with + ``production``. In particular, if the first element of + ``frontier`` is a subtree whose node type is equal to + ``production``'s left hand side, then add a child to that + subtree for each element of ``production``'s right hand + side; and return all parses that can be generated by + matching and expanding the remaining elements of + ``frontier``. If the first element of ``frontier`` is not a + subtree whose node type is equal to ``production``'s left + hand side, then return an empty list. If ``production`` is + not specified, then return a list of all parses that can + be generated by expanding the first element of ``frontier`` + with *any* CFG production. + + :type tree: Tree + :param tree: A partial structure for the text that is + currently being parsed. The elements of ``tree`` + that are specified by ``frontier`` have not yet been + expanded or matched. + :type remaining_text: list(str) + :param remaining_text: The portion of the text that is not yet + covered by ``tree``. + :type frontier: list(tuple(int)) + :param frontier: A list of the locations within ``tree`` of + all subtrees that have not yet been expanded, and all + leaves that have not yet been matched. + """ + + if production is None: + productions = self._grammar.productions() + else: + productions = [production] + + for production in productions: + lhs = production.lhs().symbol() + if lhs == tree[frontier[0]].label(): + subtree = self._production_to_tree(production) + if frontier[0] == (): + newtree = subtree + else: + newtree = tree.copy(deep=True) + newtree[frontier[0]] = subtree + new_frontier = [ + frontier[0] + (i,) for i in range(len(production.rhs())) + ] + if self._trace: + self._trace_expand(newtree, new_frontier, production) + yield from self._parse( + remaining_text, newtree, new_frontier + frontier[1:] + ) + + def _production_to_tree(self, production): + """ + :rtype: Tree + :return: The Tree that is licensed by ``production``. + In particular, given the production ``[lhs -> elt[1] ... elt[n]]`` + return a tree that has a node ``lhs.symbol``, and + ``n`` children. For each nonterminal element + ``elt[i]`` in the production, the tree token has a + childless subtree with node value ``elt[i].symbol``; and + for each terminal element ``elt[j]``, the tree token has + a leaf token with type ``elt[j]``. + + :param production: The CFG production that licenses the tree + token that should be returned. + :type production: Production + """ + children = [] + for elt in production.rhs(): + if isinstance(elt, Nonterminal): + children.append(Tree(elt.symbol(), [])) + else: + # This will be matched. + children.append(elt) + return Tree(production.lhs().symbol(), children) + + def trace(self, trace=2): + """ + Set the level of tracing output that should be generated when + parsing a text. + + :type trace: int + :param trace: The trace level. A trace level of ``0`` will + generate no tracing output; and higher trace levels will + produce more verbose tracing output. + :rtype: None + """ + self._trace = trace + + def _trace_fringe(self, tree, treeloc=None): + """ + Print trace output displaying the fringe of ``tree``. The + fringe of ``tree`` consists of all of its leaves and all of + its childless subtrees. + + :rtype: None + """ + + if treeloc == (): + print("*", end=" ") + if isinstance(tree, Tree): + if len(tree) == 0: + print(repr(Nonterminal(tree.label())), end=" ") + for i in range(len(tree)): + if treeloc is not None and i == treeloc[0]: + self._trace_fringe(tree[i], treeloc[1:]) + else: + self._trace_fringe(tree[i]) + else: + print(repr(tree), end=" ") + + def _trace_tree(self, tree, frontier, operation): + """ + Print trace output displaying the parser's current state. + + :param operation: A character identifying the operation that + generated the current state. + :rtype: None + """ + if self._trace == 2: + print(" %c [" % operation, end=" ") + else: + print(" [", end=" ") + if len(frontier) > 0: + self._trace_fringe(tree, frontier[0]) + else: + self._trace_fringe(tree) + print("]") + + def _trace_start(self, tree, frontier, text): + print("Parsing %r" % " ".join(text)) + if self._trace > 2: + print("Start:") + if self._trace > 1: + self._trace_tree(tree, frontier, " ") + + def _trace_expand(self, tree, frontier, production): + if self._trace > 2: + print("Expand: %s" % production) + if self._trace > 1: + self._trace_tree(tree, frontier, "E") + + def _trace_match(self, tree, frontier, tok): + if self._trace > 2: + print("Match: %r" % tok) + if self._trace > 1: + self._trace_tree(tree, frontier, "M") + + def _trace_succeed(self, tree, frontier): + if self._trace > 2: + print("GOOD PARSE:") + if self._trace == 1: + print("Found a parse:\n%s" % tree) + if self._trace > 1: + self._trace_tree(tree, frontier, "+") + + def _trace_backtrack(self, tree, frontier, toks=None): + if self._trace > 2: + if toks: + print("Backtrack: %r match failed" % toks[0]) + else: + print("Backtrack") + + +##////////////////////////////////////////////////////// +## Stepping Recursive Descent Parser +##////////////////////////////////////////////////////// +class SteppingRecursiveDescentParser(RecursiveDescentParser): + """ + A ``RecursiveDescentParser`` that allows you to step through the + parsing process, performing a single operation at a time. + + The ``initialize`` method is used to start parsing a text. + ``expand`` expands the first element on the frontier using a single + CFG production, and ``match`` matches the first element on the + frontier against the next text token. ``backtrack`` undoes the most + recent expand or match operation. ``step`` performs a single + expand, match, or backtrack operation. ``parses`` returns the set + of parses that have been found by the parser. + + :ivar _history: A list of ``(rtext, tree, frontier)`` tripples, + containing the previous states of the parser. This history is + used to implement the ``backtrack`` operation. + :ivar _tried_e: A record of all productions that have been tried + for a given tree. This record is used by ``expand`` to perform + the next untried production. + :ivar _tried_m: A record of what tokens have been matched for a + given tree. This record is used by ``step`` to decide whether + or not to match a token. + :see: ``nltk.grammar`` + """ + + def __init__(self, grammar, trace=0): + super().__init__(grammar, trace) + self._rtext = None + self._tree = None + self._frontier = [()] + self._tried_e = {} + self._tried_m = {} + self._history = [] + self._parses = [] + + # [XX] TEMPORARY HACK WARNING! This should be replaced with + # something nicer when we get the chance. + def _freeze(self, tree): + c = tree.copy() + # for pos in c.treepositions('leaves'): + # c[pos] = c[pos].freeze() + return ImmutableTree.convert(c) + + def parse(self, tokens): + tokens = list(tokens) + self.initialize(tokens) + while self.step() is not None: + pass + return self.parses() + + def initialize(self, tokens): + """ + Start parsing a given text. This sets the parser's tree to + the start symbol, its frontier to the root node, and its + remaining text to ``token['SUBTOKENS']``. + """ + + self._rtext = tokens + start = self._grammar.start().symbol() + self._tree = Tree(start, []) + self._frontier = [()] + self._tried_e = {} + self._tried_m = {} + self._history = [] + self._parses = [] + if self._trace: + self._trace_start(self._tree, self._frontier, self._rtext) + + def remaining_text(self): + """ + :return: The portion of the text that is not yet covered by the + tree. + :rtype: list(str) + """ + return self._rtext + + def frontier(self): + """ + :return: A list of the tree locations of all subtrees that + have not yet been expanded, and all leaves that have not + yet been matched. + :rtype: list(tuple(int)) + """ + return self._frontier + + def tree(self): + """ + :return: A partial structure for the text that is + currently being parsed. The elements specified by the + frontier have not yet been expanded or matched. + :rtype: Tree + """ + return self._tree + + def step(self): + """ + Perform a single parsing operation. If an untried match is + possible, then perform the match, and return the matched + token. If an untried expansion is possible, then perform the + expansion, and return the production that it is based on. If + backtracking is possible, then backtrack, and return True. + Otherwise, return None. + + :return: None if no operation was performed; a token if a match + was performed; a production if an expansion was performed; + and True if a backtrack operation was performed. + :rtype: Production or String or bool + """ + # Try matching (if we haven't already) + if self.untried_match(): + token = self.match() + if token is not None: + return token + + # Try expanding. + production = self.expand() + if production is not None: + return production + + # Try backtracking + if self.backtrack(): + self._trace_backtrack(self._tree, self._frontier) + return True + + # Nothing left to do. + return None + + def expand(self, production=None): + """ + Expand the first element of the frontier. In particular, if + the first element of the frontier is a subtree whose node type + is equal to ``production``'s left hand side, then add a child + to that subtree for each element of ``production``'s right hand + side. If ``production`` is not specified, then use the first + untried expandable production. If all expandable productions + have been tried, do nothing. + + :return: The production used to expand the frontier, if an + expansion was performed. If no expansion was performed, + return None. + :rtype: Production or None + """ + + # Make sure we *can* expand. + if len(self._frontier) == 0: + return None + if not isinstance(self._tree[self._frontier[0]], Tree): + return None + + # If they didn't specify a production, check all untried ones. + if production is None: + productions = self.untried_expandable_productions() + else: + productions = [production] + + parses = [] + for prod in productions: + # Record that we've tried this production now. + self._tried_e.setdefault(self._freeze(self._tree), []).append(prod) + + # Try expanding. + for _result in self._expand(self._rtext, self._tree, self._frontier, prod): + return prod + + # We didn't expand anything. + return None + + def match(self): + """ + Match the first element of the frontier. In particular, if + the first element of the frontier has the same type as the + next text token, then substitute the text token into the tree. + + :return: The token matched, if a match operation was + performed. If no match was performed, return None + :rtype: str or None + """ + + # Record that we've tried matching this token. + tok = self._rtext[0] + self._tried_m.setdefault(self._freeze(self._tree), []).append(tok) + + # Make sure we *can* match. + if len(self._frontier) == 0: + return None + if isinstance(self._tree[self._frontier[0]], Tree): + return None + + for _result in self._match(self._rtext, self._tree, self._frontier): + # Return the token we just matched. + return self._history[-1][0][0] + return None + + def backtrack(self): + """ + Return the parser to its state before the most recent + match or expand operation. Calling ``undo`` repeatedly return + the parser to successively earlier states. If no match or + expand operations have been performed, ``undo`` will make no + changes. + + :return: true if an operation was successfully undone. + :rtype: bool + """ + if len(self._history) == 0: + return False + (self._rtext, self._tree, self._frontier) = self._history.pop() + return True + + def expandable_productions(self): + """ + :return: A list of all the productions for which expansions + are available for the current parser state. + :rtype: list(Production) + """ + # Make sure we *can* expand. + if len(self._frontier) == 0: + return [] + frontier_child = self._tree[self._frontier[0]] + if len(self._frontier) == 0 or not isinstance(frontier_child, Tree): + return [] + + return [ + p + for p in self._grammar.productions() + if p.lhs().symbol() == frontier_child.label() + ] + + def untried_expandable_productions(self): + """ + :return: A list of all the untried productions for which + expansions are available for the current parser state. + :rtype: list(Production) + """ + + tried_expansions = self._tried_e.get(self._freeze(self._tree), []) + return [p for p in self.expandable_productions() if p not in tried_expansions] + + def untried_match(self): + """ + :return: Whether the first element of the frontier is a token + that has not yet been matched. + :rtype: bool + """ + + if len(self._rtext) == 0: + return False + tried_matches = self._tried_m.get(self._freeze(self._tree), []) + return self._rtext[0] not in tried_matches + + def currently_complete(self): + """ + :return: Whether the parser's current state represents a + complete parse. + :rtype: bool + """ + return len(self._frontier) == 0 and len(self._rtext) == 0 + + def _parse(self, remaining_text, tree, frontier): + """ + A stub version of ``_parse`` that sets the parsers current + state to the given arguments. In ``RecursiveDescentParser``, + the ``_parse`` method is used to recursively continue parsing a + text. ``SteppingRecursiveDescentParser`` overrides it to + capture these recursive calls. It records the parser's old + state in the history (to allow for backtracking), and updates + the parser's new state using the given arguments. Finally, it + returns ``[1]``, which is used by ``match`` and ``expand`` to + detect whether their operations were successful. + + :return: ``[1]`` + :rtype: list of int + """ + self._history.append((self._rtext, self._tree, self._frontier)) + self._rtext = remaining_text + self._tree = tree + self._frontier = frontier + + # Is it a good parse? If so, record it. + if len(frontier) == 0 and len(remaining_text) == 0: + self._parses.append(tree) + self._trace_succeed(self._tree, self._frontier) + + return [1] + + def parses(self): + """ + :return: An iterator of the parses that have been found by this + parser so far. + :rtype: list of Tree + """ + return iter(self._parses) + + def set_grammar(self, grammar): + """ + Change the grammar used to parse texts. + + :param grammar: The new grammar. + :type grammar: CFG + """ + self._grammar = grammar + + +##////////////////////////////////////////////////////// +## Demonstration Code +##////////////////////////////////////////////////////// + + +def demo(): + """ + A demonstration of the recursive descent parser. + """ + + from nltk import CFG, parse + + grammar = CFG.fromstring( + """ + S -> NP VP + NP -> Det N | Det N PP + VP -> V NP | V NP PP + PP -> P NP + NP -> 'I' + N -> 'man' | 'park' | 'telescope' | 'dog' + Det -> 'the' | 'a' + P -> 'in' | 'with' + V -> 'saw' + """ + ) + + for prod in grammar.productions(): + print(prod) + + sent = "I saw a man in the park".split() + parser = parse.RecursiveDescentParser(grammar, trace=2) + for p in parser.parse(sent): + print(p) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/stanford.py b/env-llmeval/lib/python3.10/site-packages/nltk/parse/stanford.py new file mode 100644 index 0000000000000000000000000000000000000000..c5ed0f9eab042dfdb7d91679ac5502f495a328ac --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/parse/stanford.py @@ -0,0 +1,470 @@ +# Natural Language Toolkit: Interface to the Stanford Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Xu +# +# URL: +# For license information, see LICENSE.TXT + +import os +import tempfile +import warnings +from subprocess import PIPE + +from nltk.internals import ( + _java_options, + config_java, + find_jar_iter, + find_jars_within_path, + java, +) +from nltk.parse.api import ParserI +from nltk.parse.dependencygraph import DependencyGraph +from nltk.tree import Tree + +_stanford_url = "https://nlp.stanford.edu/software/lex-parser.shtml" + + +class GenericStanfordParser(ParserI): + """Interface to the Stanford Parser""" + + _MODEL_JAR_PATTERN = r"stanford-parser-(\d+)(\.(\d+))+-models\.jar" + _JAR = r"stanford-parser\.jar" + _MAIN_CLASS = "edu.stanford.nlp.parser.lexparser.LexicalizedParser" + + _USE_STDIN = False + _DOUBLE_SPACED_OUTPUT = False + + def __init__( + self, + path_to_jar=None, + path_to_models_jar=None, + model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz", + encoding="utf8", + verbose=False, + java_options="-mx4g", + corenlp_options="", + ): + + # find the most recent code and model jar + stanford_jar = max( + find_jar_iter( + self._JAR, + path_to_jar, + env_vars=("STANFORD_PARSER", "STANFORD_CORENLP"), + searchpath=(), + url=_stanford_url, + verbose=verbose, + is_regex=True, + ), + key=lambda model_path: os.path.dirname(model_path), + ) + + model_jar = max( + find_jar_iter( + self._MODEL_JAR_PATTERN, + path_to_models_jar, + env_vars=("STANFORD_MODELS", "STANFORD_CORENLP"), + searchpath=(), + url=_stanford_url, + verbose=verbose, + is_regex=True, + ), + key=lambda model_path: os.path.dirname(model_path), + ) + + # self._classpath = (stanford_jar, model_jar) + + # Adding logging jar files to classpath + stanford_dir = os.path.split(stanford_jar)[0] + self._classpath = tuple([model_jar] + find_jars_within_path(stanford_dir)) + + self.model_path = model_path + self._encoding = encoding + self.corenlp_options = corenlp_options + self.java_options = java_options + + def _parse_trees_output(self, output_): + res = [] + cur_lines = [] + cur_trees = [] + blank = False + for line in output_.splitlines(False): + if line == "": + if blank: + res.append(iter(cur_trees)) + cur_trees = [] + blank = False + elif self._DOUBLE_SPACED_OUTPUT: + cur_trees.append(self._make_tree("\n".join(cur_lines))) + cur_lines = [] + blank = True + else: + res.append(iter([self._make_tree("\n".join(cur_lines))])) + cur_lines = [] + else: + cur_lines.append(line) + blank = False + return iter(res) + + def parse_sents(self, sentences, verbose=False): + """ + Use StanfordParser to parse multiple sentences. Takes multiple sentences as a + list where each sentence is a list of words. + Each sentence will be automatically tagged with this StanfordParser instance's + tagger. + If whitespaces exists inside a token, then the token will be treated as + separate tokens. + + :param sentences: Input sentences to parse + :type sentences: list(list(str)) + :rtype: iter(iter(Tree)) + """ + cmd = [ + self._MAIN_CLASS, + "-model", + self.model_path, + "-sentences", + "newline", + "-outputFormat", + self._OUTPUT_FORMAT, + "-tokenized", + "-escaper", + "edu.stanford.nlp.process.PTBEscapingProcessor", + ] + return self._parse_trees_output( + self._execute( + cmd, "\n".join(" ".join(sentence) for sentence in sentences), verbose + ) + ) + + def raw_parse(self, sentence, verbose=False): + """ + Use StanfordParser to parse a sentence. Takes a sentence as a string; + before parsing, it will be automatically tokenized and tagged by + the Stanford Parser. + + :param sentence: Input sentence to parse + :type sentence: str + :rtype: iter(Tree) + """ + return next(self.raw_parse_sents([sentence], verbose)) + + def raw_parse_sents(self, sentences, verbose=False): + """ + Use StanfordParser to parse multiple sentences. Takes multiple sentences as a + list of strings. + Each sentence will be automatically tokenized and tagged by the Stanford Parser. + + :param sentences: Input sentences to parse + :type sentences: list(str) + :rtype: iter(iter(Tree)) + """ + cmd = [ + self._MAIN_CLASS, + "-model", + self.model_path, + "-sentences", + "newline", + "-outputFormat", + self._OUTPUT_FORMAT, + ] + return self._parse_trees_output( + self._execute(cmd, "\n".join(sentences), verbose) + ) + + def tagged_parse(self, sentence, verbose=False): + """ + Use StanfordParser to parse a sentence. Takes a sentence as a list of + (word, tag) tuples; the sentence must have already been tokenized and + tagged. + + :param sentence: Input sentence to parse + :type sentence: list(tuple(str, str)) + :rtype: iter(Tree) + """ + return next(self.tagged_parse_sents([sentence], verbose)) + + def tagged_parse_sents(self, sentences, verbose=False): + """ + Use StanfordParser to parse multiple sentences. Takes multiple sentences + where each sentence is a list of (word, tag) tuples. + The sentences must have already been tokenized and tagged. + + :param sentences: Input sentences to parse + :type sentences: list(list(tuple(str, str))) + :rtype: iter(iter(Tree)) + """ + tag_separator = "/" + cmd = [ + self._MAIN_CLASS, + "-model", + self.model_path, + "-sentences", + "newline", + "-outputFormat", + self._OUTPUT_FORMAT, + "-tokenized", + "-tagSeparator", + tag_separator, + "-tokenizerFactory", + "edu.stanford.nlp.process.WhitespaceTokenizer", + "-tokenizerMethod", + "newCoreLabelTokenizerFactory", + ] + # We don't need to escape slashes as "splitting is done on the last instance of the character in the token" + return self._parse_trees_output( + self._execute( + cmd, + "\n".join( + " ".join(tag_separator.join(tagged) for tagged in sentence) + for sentence in sentences + ), + verbose, + ) + ) + + def _execute(self, cmd, input_, verbose=False): + encoding = self._encoding + cmd.extend(["-encoding", encoding]) + if self.corenlp_options: + cmd.extend(self.corenlp_options.split()) + + default_options = " ".join(_java_options) + + # Configure java. + config_java(options=self.java_options, verbose=verbose) + + # Windows is incompatible with NamedTemporaryFile() without passing in delete=False. + with tempfile.NamedTemporaryFile(mode="wb", delete=False) as input_file: + # Write the actual sentences to the temporary input file + if isinstance(input_, str) and encoding: + input_ = input_.encode(encoding) + input_file.write(input_) + input_file.flush() + + # Run the tagger and get the output. + if self._USE_STDIN: + input_file.seek(0) + stdout, stderr = java( + cmd, + classpath=self._classpath, + stdin=input_file, + stdout=PIPE, + stderr=PIPE, + ) + else: + cmd.append(input_file.name) + stdout, stderr = java( + cmd, classpath=self._classpath, stdout=PIPE, stderr=PIPE + ) + + stdout = stdout.replace(b"\xc2\xa0", b" ") + stdout = stdout.replace(b"\x00\xa0", b" ") + stdout = stdout.decode(encoding) + + os.unlink(input_file.name) + + # Return java configurations to their default values. + config_java(options=default_options, verbose=False) + + return stdout + + +class StanfordParser(GenericStanfordParser): + """ + >>> parser=StanfordParser( + ... model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz" + ... ) # doctest: +SKIP + + >>> list(parser.raw_parse("the quick brown fox jumps over the lazy dog")) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('ROOT', [Tree('NP', [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['brown']), + Tree('NN', ['fox'])]), Tree('NP', [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']), + Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])])])])] + + >>> sum([list(dep_graphs) for dep_graphs in parser.raw_parse_sents(( + ... "the quick brown fox jumps over the lazy dog", + ... "the quick grey wolf jumps over the lazy fox" + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('ROOT', [Tree('NP', [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['brown']), + Tree('NN', ['fox'])]), Tree('NP', [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']), + Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])])])]), Tree('ROOT', [Tree('NP', + [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['grey']), Tree('NN', ['wolf'])]), Tree('NP', + [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']), Tree('NP', [Tree('DT', ['the']), + Tree('JJ', ['lazy']), Tree('NN', ['fox'])])])])])])] + + >>> sum([list(dep_graphs) for dep_graphs in parser.parse_sents(( + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('ROOT', [Tree('S', [Tree('NP', [Tree('PRP', ['I'])]), Tree('VP', [Tree('VBP', ["'m"]), + Tree('NP', [Tree('DT', ['a']), Tree('NN', ['dog'])])])])]), Tree('ROOT', [Tree('S', [Tree('NP', + [Tree('DT', ['This'])]), Tree('VP', [Tree('VBZ', ['is']), Tree('NP', [Tree('NP', [Tree('NP', [Tree('PRP$', ['my']), + Tree('NNS', ['friends']), Tree('POS', ["'"])]), Tree('NN', ['cat'])]), Tree('PRN', [Tree('-LRB-', [Tree('', []), + Tree('NP', [Tree('DT', ['the']), Tree('NN', ['tabby'])]), Tree('-RRB-', [])])])])])])])] + + >>> sum([list(dep_graphs) for dep_graphs in parser.tagged_parse_sents(( + ... ( + ... ("The", "DT"), + ... ("quick", "JJ"), + ... ("brown", "JJ"), + ... ("fox", "NN"), + ... ("jumped", "VBD"), + ... ("over", "IN"), + ... ("the", "DT"), + ... ("lazy", "JJ"), + ... ("dog", "NN"), + ... (".", "."), + ... ), + ... ))],[]) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('ROOT', [Tree('S', [Tree('NP', [Tree('DT', ['The']), Tree('JJ', ['quick']), Tree('JJ', ['brown']), + Tree('NN', ['fox'])]), Tree('VP', [Tree('VBD', ['jumped']), Tree('PP', [Tree('IN', ['over']), Tree('NP', + [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])]), Tree('.', ['.'])])])] + """ + + _OUTPUT_FORMAT = "penn" + + def __init__(self, *args, **kwargs): + warnings.warn( + "The StanfordParser will be deprecated\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead.", + DeprecationWarning, + stacklevel=2, + ) + + super().__init__(*args, **kwargs) + + def _make_tree(self, result): + return Tree.fromstring(result) + + +class StanfordDependencyParser(GenericStanfordParser): + + """ + >>> dep_parser=StanfordDependencyParser( + ... model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz" + ... ) # doctest: +SKIP + + >>> [parse.tree() for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy'])])] + + >>> [list(parse.triples()) for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP + [[((u'jumps', u'VBZ'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', (u'The', u'DT')), + ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), u'amod', (u'brown', u'JJ')), + ((u'jumps', u'VBZ'), u'nmod', (u'dog', u'NN')), ((u'dog', u'NN'), u'case', (u'over', u'IN')), + ((u'dog', u'NN'), u'det', (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ'))]] + + >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.raw_parse_sents(( + ... "The quick brown fox jumps over the lazy dog.", + ... "The quick grey wolf jumps over the lazy fox." + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy'])]), + Tree('jumps', [Tree('wolf', ['The', 'quick', 'grey']), Tree('fox', ['over', 'the', 'lazy'])])] + + >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.parse_sents(( + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('dog', ['I', "'m", 'a']), Tree('cat', ['This', 'is', Tree('friends', ['my', "'"]), Tree('tabby', ['the'])])] + + >>> sum([[list(parse.triples()) for parse in dep_graphs] for dep_graphs in dep_parser.tagged_parse_sents(( + ... ( + ... ("The", "DT"), + ... ("quick", "JJ"), + ... ("brown", "JJ"), + ... ("fox", "NN"), + ... ("jumped", "VBD"), + ... ("over", "IN"), + ... ("the", "DT"), + ... ("lazy", "JJ"), + ... ("dog", "NN"), + ... (".", "."), + ... ), + ... ))],[]) # doctest: +NORMALIZE_WHITESPACE +SKIP + [[((u'jumped', u'VBD'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', (u'The', u'DT')), + ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), u'amod', (u'brown', u'JJ')), + ((u'jumped', u'VBD'), u'nmod', (u'dog', u'NN')), ((u'dog', u'NN'), u'case', (u'over', u'IN')), + ((u'dog', u'NN'), u'det', (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ'))]] + + """ + + _OUTPUT_FORMAT = "conll2007" + + def __init__(self, *args, **kwargs): + warnings.warn( + "The StanfordDependencyParser will be deprecated\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.", + DeprecationWarning, + stacklevel=2, + ) + + super().__init__(*args, **kwargs) + + def _make_tree(self, result): + return DependencyGraph(result, top_relation_label="root") + + +class StanfordNeuralDependencyParser(GenericStanfordParser): + """ + >>> from nltk.parse.stanford import StanfordNeuralDependencyParser # doctest: +SKIP + >>> dep_parser=StanfordNeuralDependencyParser(java_options='-mx4g')# doctest: +SKIP + + >>> [parse.tree() for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy']), '.'])] + + >>> [list(parse.triples()) for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP + [[((u'jumps', u'VBZ'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', + (u'The', u'DT')), ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), + u'amod', (u'brown', u'JJ')), ((u'jumps', u'VBZ'), u'nmod', (u'dog', u'NN')), + ((u'dog', u'NN'), u'case', (u'over', u'IN')), ((u'dog', u'NN'), u'det', + (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ')), ((u'jumps', u'VBZ'), + u'punct', (u'.', u'.'))]] + + >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.raw_parse_sents(( + ... "The quick brown fox jumps over the lazy dog.", + ... "The quick grey wolf jumps over the lazy fox." + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', + 'the', 'lazy']), '.']), Tree('jumps', [Tree('wolf', ['The', 'quick', 'grey']), + Tree('fox', ['over', 'the', 'lazy']), '.'])] + + >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.parse_sents(( + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('dog', ['I', "'m", 'a']), Tree('cat', ['This', 'is', Tree('friends', + ['my', "'"]), Tree('tabby', ['-LRB-', 'the', '-RRB-'])])] + """ + + _OUTPUT_FORMAT = "conll" + _MAIN_CLASS = "edu.stanford.nlp.pipeline.StanfordCoreNLP" + _JAR = r"stanford-corenlp-(\d+)(\.(\d+))+\.jar" + _MODEL_JAR_PATTERN = r"stanford-corenlp-(\d+)(\.(\d+))+-models\.jar" + _USE_STDIN = True + _DOUBLE_SPACED_OUTPUT = True + + def __init__(self, *args, **kwargs): + warnings.warn( + "The StanfordNeuralDependencyParser will be deprecated\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.", + DeprecationWarning, + stacklevel=2, + ) + + super().__init__(*args, **kwargs) + self.corenlp_options += "-annotators tokenize,ssplit,pos,depparse" + + def tagged_parse_sents(self, sentences, verbose=False): + """ + Currently unimplemented because the neural dependency parser (and + the StanfordCoreNLP pipeline class) doesn't support passing in pre- + tagged tokens. + """ + raise NotImplementedError( + "tagged_parse[_sents] is not supported by " + "StanfordNeuralDependencyParser; use " + "parse[_sents] or raw_parse[_sents] instead." + ) + + def _make_tree(self, result): + return DependencyGraph(result, top_relation_label="ROOT") diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/transitionparser.py b/env-llmeval/lib/python3.10/site-packages/nltk/parse/transitionparser.py new file mode 100644 index 0000000000000000000000000000000000000000..476d70260a09c92196ea1cce749fc6774e75d822 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/parse/transitionparser.py @@ -0,0 +1,794 @@ +# Natural Language Toolkit: Arc-Standard and Arc-eager Transition Based Parsers +# +# Author: Long Duong +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +import pickle +import tempfile +from copy import deepcopy +from operator import itemgetter +from os import remove + +try: + from numpy import array + from scipy import sparse + from sklearn import svm + from sklearn.datasets import load_svmlight_file +except ImportError: + pass + +from nltk.parse import DependencyEvaluator, DependencyGraph, ParserI + + +class Configuration: + """ + Class for holding configuration which is the partial analysis of the input sentence. + The transition based parser aims at finding set of operators that transfer the initial + configuration to the terminal configuration. + + The configuration includes: + - Stack: for storing partially proceeded words + - Buffer: for storing remaining input words + - Set of arcs: for storing partially built dependency tree + + This class also provides a method to represent a configuration as list of features. + """ + + def __init__(self, dep_graph): + """ + :param dep_graph: the representation of an input in the form of dependency graph. + :type dep_graph: DependencyGraph where the dependencies are not specified. + """ + # dep_graph.nodes contain list of token for a sentence + self.stack = [0] # The root element + self.buffer = list(range(1, len(dep_graph.nodes))) # The rest is in the buffer + self.arcs = [] # empty set of arc + self._tokens = dep_graph.nodes + self._max_address = len(self.buffer) + + def __str__(self): + return ( + "Stack : " + + str(self.stack) + + " Buffer : " + + str(self.buffer) + + " Arcs : " + + str(self.arcs) + ) + + def _check_informative(self, feat, flag=False): + """ + Check whether a feature is informative + The flag control whether "_" is informative or not + """ + if feat is None: + return False + if feat == "": + return False + if flag is False: + if feat == "_": + return False + return True + + def extract_features(self): + """ + Extract the set of features for the current configuration. Implement standard features as describe in + Table 3.2 (page 31) in Dependency Parsing book by Sandra Kubler, Ryan McDonal, Joakim Nivre. + Please note that these features are very basic. + :return: list(str) + """ + result = [] + # Todo : can come up with more complicated features set for better + # performance. + if len(self.stack) > 0: + # Stack 0 + stack_idx0 = self.stack[len(self.stack) - 1] + token = self._tokens[stack_idx0] + if self._check_informative(token["word"], True): + result.append("STK_0_FORM_" + token["word"]) + if "lemma" in token and self._check_informative(token["lemma"]): + result.append("STK_0_LEMMA_" + token["lemma"]) + if self._check_informative(token["tag"]): + result.append("STK_0_POS_" + token["tag"]) + if "feats" in token and self._check_informative(token["feats"]): + feats = token["feats"].split("|") + for feat in feats: + result.append("STK_0_FEATS_" + feat) + # Stack 1 + if len(self.stack) > 1: + stack_idx1 = self.stack[len(self.stack) - 2] + token = self._tokens[stack_idx1] + if self._check_informative(token["tag"]): + result.append("STK_1_POS_" + token["tag"]) + + # Left most, right most dependency of stack[0] + left_most = 1000000 + right_most = -1 + dep_left_most = "" + dep_right_most = "" + for (wi, r, wj) in self.arcs: + if wi == stack_idx0: + if (wj > wi) and (wj > right_most): + right_most = wj + dep_right_most = r + if (wj < wi) and (wj < left_most): + left_most = wj + dep_left_most = r + if self._check_informative(dep_left_most): + result.append("STK_0_LDEP_" + dep_left_most) + if self._check_informative(dep_right_most): + result.append("STK_0_RDEP_" + dep_right_most) + + # Check Buffered 0 + if len(self.buffer) > 0: + # Buffer 0 + buffer_idx0 = self.buffer[0] + token = self._tokens[buffer_idx0] + if self._check_informative(token["word"], True): + result.append("BUF_0_FORM_" + token["word"]) + if "lemma" in token and self._check_informative(token["lemma"]): + result.append("BUF_0_LEMMA_" + token["lemma"]) + if self._check_informative(token["tag"]): + result.append("BUF_0_POS_" + token["tag"]) + if "feats" in token and self._check_informative(token["feats"]): + feats = token["feats"].split("|") + for feat in feats: + result.append("BUF_0_FEATS_" + feat) + # Buffer 1 + if len(self.buffer) > 1: + buffer_idx1 = self.buffer[1] + token = self._tokens[buffer_idx1] + if self._check_informative(token["word"], True): + result.append("BUF_1_FORM_" + token["word"]) + if self._check_informative(token["tag"]): + result.append("BUF_1_POS_" + token["tag"]) + if len(self.buffer) > 2: + buffer_idx2 = self.buffer[2] + token = self._tokens[buffer_idx2] + if self._check_informative(token["tag"]): + result.append("BUF_2_POS_" + token["tag"]) + if len(self.buffer) > 3: + buffer_idx3 = self.buffer[3] + token = self._tokens[buffer_idx3] + if self._check_informative(token["tag"]): + result.append("BUF_3_POS_" + token["tag"]) + # Left most, right most dependency of stack[0] + left_most = 1000000 + right_most = -1 + dep_left_most = "" + dep_right_most = "" + for (wi, r, wj) in self.arcs: + if wi == buffer_idx0: + if (wj > wi) and (wj > right_most): + right_most = wj + dep_right_most = r + if (wj < wi) and (wj < left_most): + left_most = wj + dep_left_most = r + if self._check_informative(dep_left_most): + result.append("BUF_0_LDEP_" + dep_left_most) + if self._check_informative(dep_right_most): + result.append("BUF_0_RDEP_" + dep_right_most) + + return result + + +class Transition: + """ + This class defines a set of transition which is applied to a configuration to get another configuration + Note that for different parsing algorithm, the transition is different. + """ + + # Define set of transitions + LEFT_ARC = "LEFTARC" + RIGHT_ARC = "RIGHTARC" + SHIFT = "SHIFT" + REDUCE = "REDUCE" + + def __init__(self, alg_option): + """ + :param alg_option: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm + :type alg_option: str + """ + self._algo = alg_option + if alg_option not in [ + TransitionParser.ARC_STANDARD, + TransitionParser.ARC_EAGER, + ]: + raise ValueError( + " Currently we only support %s and %s " + % (TransitionParser.ARC_STANDARD, TransitionParser.ARC_EAGER) + ) + + def left_arc(self, conf, relation): + """ + Note that the algorithm for left-arc is quite similar except for precondition for both arc-standard and arc-eager + + :param configuration: is the current configuration + :return: A new configuration or -1 if the pre-condition is not satisfied + """ + if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0): + return -1 + if conf.buffer[0] == 0: + # here is the Root element + return -1 + + idx_wi = conf.stack[len(conf.stack) - 1] + + flag = True + if self._algo == TransitionParser.ARC_EAGER: + for (idx_parent, r, idx_child) in conf.arcs: + if idx_child == idx_wi: + flag = False + + if flag: + conf.stack.pop() + idx_wj = conf.buffer[0] + conf.arcs.append((idx_wj, relation, idx_wi)) + else: + return -1 + + def right_arc(self, conf, relation): + """ + Note that the algorithm for right-arc is DIFFERENT for arc-standard and arc-eager + + :param configuration: is the current configuration + :return: A new configuration or -1 if the pre-condition is not satisfied + """ + if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0): + return -1 + if self._algo == TransitionParser.ARC_STANDARD: + idx_wi = conf.stack.pop() + idx_wj = conf.buffer[0] + conf.buffer[0] = idx_wi + conf.arcs.append((idx_wi, relation, idx_wj)) + else: # arc-eager + idx_wi = conf.stack[len(conf.stack) - 1] + idx_wj = conf.buffer.pop(0) + conf.stack.append(idx_wj) + conf.arcs.append((idx_wi, relation, idx_wj)) + + def reduce(self, conf): + """ + Note that the algorithm for reduce is only available for arc-eager + + :param configuration: is the current configuration + :return: A new configuration or -1 if the pre-condition is not satisfied + """ + + if self._algo != TransitionParser.ARC_EAGER: + return -1 + if len(conf.stack) <= 0: + return -1 + + idx_wi = conf.stack[len(conf.stack) - 1] + flag = False + for (idx_parent, r, idx_child) in conf.arcs: + if idx_child == idx_wi: + flag = True + if flag: + conf.stack.pop() # reduce it + else: + return -1 + + def shift(self, conf): + """ + Note that the algorithm for shift is the SAME for arc-standard and arc-eager + + :param configuration: is the current configuration + :return: A new configuration or -1 if the pre-condition is not satisfied + """ + if len(conf.buffer) <= 0: + return -1 + idx_wi = conf.buffer.pop(0) + conf.stack.append(idx_wi) + + +class TransitionParser(ParserI): + + """ + Class for transition based parser. Implement 2 algorithms which are "arc-standard" and "arc-eager" + """ + + ARC_STANDARD = "arc-standard" + ARC_EAGER = "arc-eager" + + def __init__(self, algorithm): + """ + :param algorithm: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm + :type algorithm: str + """ + if not (algorithm in [self.ARC_STANDARD, self.ARC_EAGER]): + raise ValueError( + " Currently we only support %s and %s " + % (self.ARC_STANDARD, self.ARC_EAGER) + ) + self._algorithm = algorithm + + self._dictionary = {} + self._transition = {} + self._match_transition = {} + + def _get_dep_relation(self, idx_parent, idx_child, depgraph): + p_node = depgraph.nodes[idx_parent] + c_node = depgraph.nodes[idx_child] + + if c_node["word"] is None: + return None # Root word + + if c_node["head"] == p_node["address"]: + return c_node["rel"] + else: + return None + + def _convert_to_binary_features(self, features): + """ + :param features: list of feature string which is needed to convert to binary features + :type features: list(str) + :return : string of binary features in libsvm format which is 'featureID:value' pairs + """ + unsorted_result = [] + for feature in features: + self._dictionary.setdefault(feature, len(self._dictionary)) + unsorted_result.append(self._dictionary[feature]) + + # Default value of each feature is 1.0 + return " ".join( + str(featureID) + ":1.0" for featureID in sorted(unsorted_result) + ) + + def _is_projective(self, depgraph): + arc_list = [] + for key in depgraph.nodes: + node = depgraph.nodes[key] + + if "head" in node: + childIdx = node["address"] + parentIdx = node["head"] + if parentIdx is not None: + arc_list.append((parentIdx, childIdx)) + + for (parentIdx, childIdx) in arc_list: + # Ensure that childIdx < parentIdx + if childIdx > parentIdx: + temp = childIdx + childIdx = parentIdx + parentIdx = temp + for k in range(childIdx + 1, parentIdx): + for m in range(len(depgraph.nodes)): + if (m < childIdx) or (m > parentIdx): + if (k, m) in arc_list: + return False + if (m, k) in arc_list: + return False + return True + + def _write_to_file(self, key, binary_features, input_file): + """ + write the binary features to input file and update the transition dictionary + """ + self._transition.setdefault(key, len(self._transition) + 1) + self._match_transition[self._transition[key]] = key + + input_str = str(self._transition[key]) + " " + binary_features + "\n" + input_file.write(input_str.encode("utf-8")) + + def _create_training_examples_arc_std(self, depgraphs, input_file): + """ + Create the training example in the libsvm format and write it to the input_file. + Reference : Page 32, Chapter 3. Dependency Parsing by Sandra Kubler, Ryan McDonal and Joakim Nivre (2009) + """ + operation = Transition(self.ARC_STANDARD) + count_proj = 0 + training_seq = [] + + for depgraph in depgraphs: + if not self._is_projective(depgraph): + continue + + count_proj += 1 + conf = Configuration(depgraph) + while len(conf.buffer) > 0: + b0 = conf.buffer[0] + features = conf.extract_features() + binary_features = self._convert_to_binary_features(features) + + if len(conf.stack) > 0: + s0 = conf.stack[len(conf.stack) - 1] + # Left-arc operation + rel = self._get_dep_relation(b0, s0, depgraph) + if rel is not None: + key = Transition.LEFT_ARC + ":" + rel + self._write_to_file(key, binary_features, input_file) + operation.left_arc(conf, rel) + training_seq.append(key) + continue + + # Right-arc operation + rel = self._get_dep_relation(s0, b0, depgraph) + if rel is not None: + precondition = True + # Get the max-index of buffer + maxID = conf._max_address + + for w in range(maxID + 1): + if w != b0: + relw = self._get_dep_relation(b0, w, depgraph) + if relw is not None: + if (b0, relw, w) not in conf.arcs: + precondition = False + + if precondition: + key = Transition.RIGHT_ARC + ":" + rel + self._write_to_file(key, binary_features, input_file) + operation.right_arc(conf, rel) + training_seq.append(key) + continue + + # Shift operation as the default + key = Transition.SHIFT + self._write_to_file(key, binary_features, input_file) + operation.shift(conf) + training_seq.append(key) + + print(" Number of training examples : " + str(len(depgraphs))) + print(" Number of valid (projective) examples : " + str(count_proj)) + return training_seq + + def _create_training_examples_arc_eager(self, depgraphs, input_file): + """ + Create the training example in the libsvm format and write it to the input_file. + Reference : 'A Dynamic Oracle for Arc-Eager Dependency Parsing' by Joav Goldberg and Joakim Nivre + """ + operation = Transition(self.ARC_EAGER) + countProj = 0 + training_seq = [] + + for depgraph in depgraphs: + if not self._is_projective(depgraph): + continue + + countProj += 1 + conf = Configuration(depgraph) + while len(conf.buffer) > 0: + b0 = conf.buffer[0] + features = conf.extract_features() + binary_features = self._convert_to_binary_features(features) + + if len(conf.stack) > 0: + s0 = conf.stack[len(conf.stack) - 1] + # Left-arc operation + rel = self._get_dep_relation(b0, s0, depgraph) + if rel is not None: + key = Transition.LEFT_ARC + ":" + rel + self._write_to_file(key, binary_features, input_file) + operation.left_arc(conf, rel) + training_seq.append(key) + continue + + # Right-arc operation + rel = self._get_dep_relation(s0, b0, depgraph) + if rel is not None: + key = Transition.RIGHT_ARC + ":" + rel + self._write_to_file(key, binary_features, input_file) + operation.right_arc(conf, rel) + training_seq.append(key) + continue + + # reduce operation + flag = False + for k in range(s0): + if self._get_dep_relation(k, b0, depgraph) is not None: + flag = True + if self._get_dep_relation(b0, k, depgraph) is not None: + flag = True + if flag: + key = Transition.REDUCE + self._write_to_file(key, binary_features, input_file) + operation.reduce(conf) + training_seq.append(key) + continue + + # Shift operation as the default + key = Transition.SHIFT + self._write_to_file(key, binary_features, input_file) + operation.shift(conf) + training_seq.append(key) + + print(" Number of training examples : " + str(len(depgraphs))) + print(" Number of valid (projective) examples : " + str(countProj)) + return training_seq + + def train(self, depgraphs, modelfile, verbose=True): + """ + :param depgraphs : list of DependencyGraph as the training data + :type depgraphs : DependencyGraph + :param modelfile : file name to save the trained model + :type modelfile : str + """ + + try: + input_file = tempfile.NamedTemporaryFile( + prefix="transition_parse.train", dir=tempfile.gettempdir(), delete=False + ) + + if self._algorithm == self.ARC_STANDARD: + self._create_training_examples_arc_std(depgraphs, input_file) + else: + self._create_training_examples_arc_eager(depgraphs, input_file) + + input_file.close() + # Using the temporary file to train the libsvm classifier + x_train, y_train = load_svmlight_file(input_file.name) + # The parameter is set according to the paper: + # Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre + # Todo : because of probability = True => very slow due to + # cross-validation. Need to improve the speed here + model = svm.SVC( + kernel="poly", + degree=2, + coef0=0, + gamma=0.2, + C=0.5, + verbose=verbose, + probability=True, + ) + + model.fit(x_train, y_train) + # Save the model to file name (as pickle) + pickle.dump(model, open(modelfile, "wb")) + finally: + remove(input_file.name) + + def parse(self, depgraphs, modelFile): + """ + :param depgraphs: the list of test sentence, each sentence is represented as a dependency graph where the 'head' information is dummy + :type depgraphs: list(DependencyGraph) + :param modelfile: the model file + :type modelfile: str + :return: list (DependencyGraph) with the 'head' and 'rel' information + """ + result = [] + # First load the model + model = pickle.load(open(modelFile, "rb")) + operation = Transition(self._algorithm) + + for depgraph in depgraphs: + conf = Configuration(depgraph) + while len(conf.buffer) > 0: + features = conf.extract_features() + col = [] + row = [] + data = [] + for feature in features: + if feature in self._dictionary: + col.append(self._dictionary[feature]) + row.append(0) + data.append(1.0) + np_col = array(sorted(col)) # NB : index must be sorted + np_row = array(row) + np_data = array(data) + + x_test = sparse.csr_matrix( + (np_data, (np_row, np_col)), shape=(1, len(self._dictionary)) + ) + + # It's best to use decision function as follow BUT it's not supported yet for sparse SVM + # Using decision function to build the votes array + # dec_func = model.decision_function(x_test)[0] + # votes = {} + # k = 0 + # for i in range(len(model.classes_)): + # for j in range(i+1, len(model.classes_)): + # #if dec_func[k] > 0: + # votes.setdefault(i,0) + # votes[i] +=1 + # else: + # votes.setdefault(j,0) + # votes[j] +=1 + # k +=1 + # Sort votes according to the values + # sorted_votes = sorted(votes.items(), key=itemgetter(1), reverse=True) + + # We will use predict_proba instead of decision_function + prob_dict = {} + pred_prob = model.predict_proba(x_test)[0] + for i in range(len(pred_prob)): + prob_dict[i] = pred_prob[i] + sorted_Prob = sorted(prob_dict.items(), key=itemgetter(1), reverse=True) + + # Note that SHIFT is always a valid operation + for (y_pred_idx, confidence) in sorted_Prob: + # y_pred = model.predict(x_test)[0] + # From the prediction match to the operation + y_pred = model.classes_[y_pred_idx] + + if y_pred in self._match_transition: + strTransition = self._match_transition[y_pred] + baseTransition = strTransition.split(":")[0] + + if baseTransition == Transition.LEFT_ARC: + if ( + operation.left_arc(conf, strTransition.split(":")[1]) + != -1 + ): + break + elif baseTransition == Transition.RIGHT_ARC: + if ( + operation.right_arc(conf, strTransition.split(":")[1]) + != -1 + ): + break + elif baseTransition == Transition.REDUCE: + if operation.reduce(conf) != -1: + break + elif baseTransition == Transition.SHIFT: + if operation.shift(conf) != -1: + break + else: + raise ValueError( + "The predicted transition is not recognized, expected errors" + ) + + # Finish with operations build the dependency graph from Conf.arcs + + new_depgraph = deepcopy(depgraph) + for key in new_depgraph.nodes: + node = new_depgraph.nodes[key] + node["rel"] = "" + # With the default, all the token depend on the Root + node["head"] = 0 + for (head, rel, child) in conf.arcs: + c_node = new_depgraph.nodes[child] + c_node["head"] = head + c_node["rel"] = rel + result.append(new_depgraph) + + return result + + +def demo(): + """ + >>> from nltk.parse import DependencyGraph, DependencyEvaluator + >>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition + >>> gold_sent = DependencyGraph(\""" + ... Economic JJ 2 ATT + ... news NN 3 SBJ + ... has VBD 0 ROOT + ... little JJ 5 ATT + ... effect NN 3 OBJ + ... on IN 5 ATT + ... financial JJ 8 ATT + ... markets NNS 6 PC + ... . . 3 PU + ... \""") + + >>> conf = Configuration(gold_sent) + + ###################### Check the Initial Feature ######################## + + >>> print(', '.join(conf.extract_features())) + STK_0_POS_TOP, BUF_0_FORM_Economic, BUF_0_LEMMA_Economic, BUF_0_POS_JJ, BUF_1_FORM_news, BUF_1_POS_NN, BUF_2_POS_VBD, BUF_3_POS_JJ + + ###################### Check The Transition ####################### + Check the Initialized Configuration + >>> print(conf) + Stack : [0] Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9] Arcs : [] + + A. Do some transition checks for ARC-STANDARD + + >>> operation = Transition('arc-standard') + >>> operation.shift(conf) + >>> operation.left_arc(conf, "ATT") + >>> operation.shift(conf) + >>> operation.left_arc(conf,"SBJ") + >>> operation.shift(conf) + >>> operation.shift(conf) + >>> operation.left_arc(conf, "ATT") + >>> operation.shift(conf) + >>> operation.shift(conf) + >>> operation.shift(conf) + >>> operation.left_arc(conf, "ATT") + + Middle Configuration and Features Check + >>> print(conf) + Stack : [0, 3, 5, 6] Buffer : [8, 9] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)] + + >>> print(', '.join(conf.extract_features())) + STK_0_FORM_on, STK_0_LEMMA_on, STK_0_POS_IN, STK_1_POS_NN, BUF_0_FORM_markets, BUF_0_LEMMA_markets, BUF_0_POS_NNS, BUF_1_FORM_., BUF_1_POS_., BUF_0_LDEP_ATT + + >>> operation.right_arc(conf, "PC") + >>> operation.right_arc(conf, "ATT") + >>> operation.right_arc(conf, "OBJ") + >>> operation.shift(conf) + >>> operation.right_arc(conf, "PU") + >>> operation.right_arc(conf, "ROOT") + >>> operation.shift(conf) + + Terminated Configuration Check + >>> print(conf) + Stack : [0] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)] + + + B. Do some transition checks for ARC-EAGER + + >>> conf = Configuration(gold_sent) + >>> operation = Transition('arc-eager') + >>> operation.shift(conf) + >>> operation.left_arc(conf,'ATT') + >>> operation.shift(conf) + >>> operation.left_arc(conf,'SBJ') + >>> operation.right_arc(conf,'ROOT') + >>> operation.shift(conf) + >>> operation.left_arc(conf,'ATT') + >>> operation.right_arc(conf,'OBJ') + >>> operation.right_arc(conf,'ATT') + >>> operation.shift(conf) + >>> operation.left_arc(conf,'ATT') + >>> operation.right_arc(conf,'PC') + >>> operation.reduce(conf) + >>> operation.reduce(conf) + >>> operation.reduce(conf) + >>> operation.right_arc(conf,'PU') + >>> print(conf) + Stack : [0, 3, 9] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)] + + ###################### Check The Training Function ####################### + + A. Check the ARC-STANDARD training + >>> import tempfile + >>> import os + >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False) + + >>> parser_std = TransitionParser('arc-standard') + >>> print(', '.join(parser_std._create_training_examples_arc_std([gold_sent], input_file))) + Number of training examples : 1 + Number of valid (projective) examples : 1 + SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, SHIFT, SHIFT, LEFTARC:ATT, SHIFT, SHIFT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, RIGHTARC:ATT, RIGHTARC:OBJ, SHIFT, RIGHTARC:PU, RIGHTARC:ROOT, SHIFT + + >>> parser_std.train([gold_sent],'temp.arcstd.model', verbose=False) + Number of training examples : 1 + Number of valid (projective) examples : 1 + >>> input_file.close() + >>> remove(input_file.name) + + B. Check the ARC-EAGER training + + >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False) + >>> parser_eager = TransitionParser('arc-eager') + >>> print(', '.join(parser_eager._create_training_examples_arc_eager([gold_sent], input_file))) + Number of training examples : 1 + Number of valid (projective) examples : 1 + SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, RIGHTARC:ROOT, SHIFT, LEFTARC:ATT, RIGHTARC:OBJ, RIGHTARC:ATT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, REDUCE, REDUCE, REDUCE, RIGHTARC:PU + + >>> parser_eager.train([gold_sent],'temp.arceager.model', verbose=False) + Number of training examples : 1 + Number of valid (projective) examples : 1 + + >>> input_file.close() + >>> remove(input_file.name) + + ###################### Check The Parsing Function ######################## + + A. Check the ARC-STANDARD parser + + >>> result = parser_std.parse([gold_sent], 'temp.arcstd.model') + >>> de = DependencyEvaluator(result, [gold_sent]) + >>> de.eval() >= (0, 0) + True + + B. Check the ARC-EAGER parser + >>> result = parser_eager.parse([gold_sent], 'temp.arceager.model') + >>> de = DependencyEvaluator(result, [gold_sent]) + >>> de.eval() >= (0, 0) + True + + Remove test temporary files + >>> remove('temp.arceager.model') + >>> remove('temp.arcstd.model') + + Note that result is very poor because of only one training example. + """ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/util.py b/env-llmeval/lib/python3.10/site-packages/nltk/parse/util.py new file mode 100644 index 0000000000000000000000000000000000000000..3cc5bee08fdb9aa237513992a36fa2eaa0aa8219 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/parse/util.py @@ -0,0 +1,234 @@ +# Natural Language Toolkit: Parser Utility Functions +# +# Author: Ewan Klein +# Tom Aarsen <> +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + + +""" +Utility functions for parsers. +""" + +from nltk.data import load +from nltk.grammar import CFG, PCFG, FeatureGrammar +from nltk.parse.chart import Chart, ChartParser +from nltk.parse.featurechart import FeatureChart, FeatureChartParser +from nltk.parse.pchart import InsideChartParser + + +def load_parser( + grammar_url, trace=0, parser=None, chart_class=None, beam_size=0, **load_args +): + """ + Load a grammar from a file, and build a parser based on that grammar. + The parser depends on the grammar format, and might also depend + on properties of the grammar itself. + + The following grammar formats are currently supported: + - ``'cfg'`` (CFGs: ``CFG``) + - ``'pcfg'`` (probabilistic CFGs: ``PCFG``) + - ``'fcfg'`` (feature-based CFGs: ``FeatureGrammar``) + + :type grammar_url: str + :param grammar_url: A URL specifying where the grammar is located. + The default protocol is ``"nltk:"``, which searches for the file + in the the NLTK data package. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing output. + :param parser: The class used for parsing; should be ``ChartParser`` + or a subclass. + If None, the class depends on the grammar format. + :param chart_class: The class used for storing the chart; + should be ``Chart`` or a subclass. + Only used for CFGs and feature CFGs. + If None, the chart class depends on the grammar format. + :type beam_size: int + :param beam_size: The maximum length for the parser's edge queue. + Only used for probabilistic CFGs. + :param load_args: Keyword parameters used when loading the grammar. + See ``data.load`` for more information. + """ + grammar = load(grammar_url, **load_args) + if not isinstance(grammar, CFG): + raise ValueError("The grammar must be a CFG, " "or a subclass thereof.") + if isinstance(grammar, PCFG): + if parser is None: + parser = InsideChartParser + return parser(grammar, trace=trace, beam_size=beam_size) + + elif isinstance(grammar, FeatureGrammar): + if parser is None: + parser = FeatureChartParser + if chart_class is None: + chart_class = FeatureChart + return parser(grammar, trace=trace, chart_class=chart_class) + + else: # Plain CFG. + if parser is None: + parser = ChartParser + if chart_class is None: + chart_class = Chart + return parser(grammar, trace=trace, chart_class=chart_class) + + +def taggedsent_to_conll(sentence): + """ + A module to convert a single POS tagged sentence into CONLL format. + + >>> from nltk import word_tokenize, pos_tag + >>> text = "This is a foobar sentence." + >>> for line in taggedsent_to_conll(pos_tag(word_tokenize(text))): # doctest: +NORMALIZE_WHITESPACE + ... print(line, end="") + 1 This _ DT DT _ 0 a _ _ + 2 is _ VBZ VBZ _ 0 a _ _ + 3 a _ DT DT _ 0 a _ _ + 4 foobar _ JJ JJ _ 0 a _ _ + 5 sentence _ NN NN _ 0 a _ _ + 6 . _ . . _ 0 a _ _ + + :param sentence: A single input sentence to parse + :type sentence: list(tuple(str, str)) + :rtype: iter(str) + :return: a generator yielding a single sentence in CONLL format. + """ + for (i, (word, tag)) in enumerate(sentence, start=1): + input_str = [str(i), word, "_", tag, tag, "_", "0", "a", "_", "_"] + input_str = "\t".join(input_str) + "\n" + yield input_str + + +def taggedsents_to_conll(sentences): + """ + A module to convert the a POS tagged document stream + (i.e. list of list of tuples, a list of sentences) and yield lines + in CONLL format. This module yields one line per word and two newlines + for end of sentence. + + >>> from nltk import word_tokenize, sent_tokenize, pos_tag + >>> text = "This is a foobar sentence. Is that right?" + >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)] + >>> for line in taggedsents_to_conll(sentences): # doctest: +NORMALIZE_WHITESPACE + ... if line: + ... print(line, end="") + 1 This _ DT DT _ 0 a _ _ + 2 is _ VBZ VBZ _ 0 a _ _ + 3 a _ DT DT _ 0 a _ _ + 4 foobar _ JJ JJ _ 0 a _ _ + 5 sentence _ NN NN _ 0 a _ _ + 6 . _ . . _ 0 a _ _ + + + 1 Is _ VBZ VBZ _ 0 a _ _ + 2 that _ IN IN _ 0 a _ _ + 3 right _ NN NN _ 0 a _ _ + 4 ? _ . . _ 0 a _ _ + + + + :param sentences: Input sentences to parse + :type sentence: list(list(tuple(str, str))) + :rtype: iter(str) + :return: a generator yielding sentences in CONLL format. + """ + for sentence in sentences: + yield from taggedsent_to_conll(sentence) + yield "\n\n" + + +###################################################################### +# { Test Suites +###################################################################### + + +class TestGrammar: + """ + Unit tests for CFG. + """ + + def __init__(self, grammar, suite, accept=None, reject=None): + self.test_grammar = grammar + + self.cp = load_parser(grammar, trace=0) + self.suite = suite + self._accept = accept + self._reject = reject + + def run(self, show_trees=False): + """ + Sentences in the test suite are divided into two classes: + + - grammatical (``accept``) and + - ungrammatical (``reject``). + + If a sentence should parse according to the grammar, the value of + ``trees`` will be a non-empty list. If a sentence should be rejected + according to the grammar, then the value of ``trees`` will be None. + """ + for test in self.suite: + print(test["doc"] + ":", end=" ") + for key in ["accept", "reject"]: + for sent in test[key]: + tokens = sent.split() + trees = list(self.cp.parse(tokens)) + if show_trees and trees: + print() + print(sent) + for tree in trees: + print(tree) + if key == "accept": + if trees == []: + raise ValueError("Sentence '%s' failed to parse'" % sent) + else: + accepted = True + else: + if trees: + raise ValueError("Sentence '%s' received a parse'" % sent) + else: + rejected = True + if accepted and rejected: + print("All tests passed!") + + +def extract_test_sentences(string, comment_chars="#%;", encoding=None): + """ + Parses a string with one test sentence per line. + Lines can optionally begin with: + + - a bool, saying if the sentence is grammatical or not, or + - an int, giving the number of parse trees is should have, + + The result information is followed by a colon, and then the sentence. + Empty lines and lines beginning with a comment char are ignored. + + :return: a list of tuple of sentences and expected results, + where a sentence is a list of str, + and a result is None, or bool, or int + + :param comment_chars: ``str`` of possible comment characters. + :param encoding: the encoding of the string, if it is binary + """ + if encoding is not None: + string = string.decode(encoding) + sentences = [] + for sentence in string.split("\n"): + if sentence == "" or sentence[0] in comment_chars: + continue + split_info = sentence.split(":", 1) + result = None + if len(split_info) == 2: + if split_info[0] in ["True", "true", "False", "false"]: + result = split_info[0] in ["True", "true"] + sentence = split_info[1] + else: + result = int(split_info[0]) + sentence = split_info[1] + tokens = sentence.split() + if tokens == []: + continue + sentences += [(tokens, result)] + return sentences diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/parse/viterbi.py b/env-llmeval/lib/python3.10/site-packages/nltk/parse/viterbi.py new file mode 100644 index 0000000000000000000000000000000000000000..8a3e9de30432a65828463e32e6ea7bff27b7c5ee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/parse/viterbi.py @@ -0,0 +1,453 @@ +# Natural Language Toolkit: Viterbi Probabilistic Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from functools import reduce + +from nltk.parse.api import ParserI +from nltk.tree import ProbabilisticTree, Tree + +##////////////////////////////////////////////////////// +## Viterbi PCFG Parser +##////////////////////////////////////////////////////// + + +class ViterbiParser(ParserI): + """ + A bottom-up ``PCFG`` parser that uses dynamic programming to find + the single most likely parse for a text. The ``ViterbiParser`` parser + parses texts by filling in a "most likely constituent table". + This table records the most probable tree representation for any + given span and node value. In particular, it has an entry for + every start index, end index, and node value, recording the most + likely subtree that spans from the start index to the end index, + and has the given node value. + + The ``ViterbiParser`` parser fills in this table incrementally. It starts + by filling in all entries for constituents that span one element + of text (i.e., entries where the end index is one greater than the + start index). After it has filled in all table entries for + constituents that span one element of text, it fills in the + entries for constitutants that span two elements of text. It + continues filling in the entries for constituents spanning larger + and larger portions of the text, until the entire table has been + filled. Finally, it returns the table entry for a constituent + spanning the entire text, whose node value is the grammar's start + symbol. + + In order to find the most likely constituent with a given span and + node value, the ``ViterbiParser`` parser considers all productions that + could produce that node value. For each production, it finds all + children that collectively cover the span and have the node values + specified by the production's right hand side. If the probability + of the tree formed by applying the production to the children is + greater than the probability of the current entry in the table, + then the table is updated with this new tree. + + A pseudo-code description of the algorithm used by + ``ViterbiParser`` is: + + | Create an empty most likely constituent table, *MLC*. + | For width in 1...len(text): + | For start in 1...len(text)-width: + | For prod in grammar.productions: + | For each sequence of subtrees [t[1], t[2], ..., t[n]] in MLC, + | where t[i].label()==prod.rhs[i], + | and the sequence covers [start:start+width]: + | old_p = MLC[start, start+width, prod.lhs] + | new_p = P(t[1])P(t[1])...P(t[n])P(prod) + | if new_p > old_p: + | new_tree = Tree(prod.lhs, t[1], t[2], ..., t[n]) + | MLC[start, start+width, prod.lhs] = new_tree + | Return MLC[0, len(text), start_symbol] + + :type _grammar: PCFG + :ivar _grammar: The grammar used to parse sentences. + :type _trace: int + :ivar _trace: The level of tracing output that should be generated + when parsing a text. + """ + + def __init__(self, grammar, trace=0): + """ + Create a new ``ViterbiParser`` parser, that uses ``grammar`` to + parse texts. + + :type grammar: PCFG + :param grammar: The grammar used to parse texts. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + """ + self._grammar = grammar + self._trace = trace + + def grammar(self): + return self._grammar + + def trace(self, trace=2): + """ + Set the level of tracing output that should be generated when + parsing a text. + + :type trace: int + :param trace: The trace level. A trace level of ``0`` will + generate no tracing output; and higher trace levels will + produce more verbose tracing output. + :rtype: None + """ + self._trace = trace + + def parse(self, tokens): + # Inherit docs from ParserI + + tokens = list(tokens) + self._grammar.check_coverage(tokens) + + # The most likely constituent table. This table specifies the + # most likely constituent for a given span and type. + # Constituents can be either Trees or tokens. For Trees, + # the "type" is the Nonterminal for the tree's root node + # value. For Tokens, the "type" is the token's type. + # The table is stored as a dictionary, since it is sparse. + constituents = {} + + # Initialize the constituents dictionary with the words from + # the text. + if self._trace: + print("Inserting tokens into the most likely" + " constituents table...") + for index in range(len(tokens)): + token = tokens[index] + constituents[index, index + 1, token] = token + if self._trace > 1: + self._trace_lexical_insertion(token, index, len(tokens)) + + # Consider each span of length 1, 2, ..., n; and add any trees + # that might cover that span to the constituents dictionary. + for length in range(1, len(tokens) + 1): + if self._trace: + print( + "Finding the most likely constituents" + + " spanning %d text elements..." % length + ) + for start in range(len(tokens) - length + 1): + span = (start, start + length) + self._add_constituents_spanning(span, constituents, tokens) + + # Return the tree that spans the entire text & have the right cat + tree = constituents.get((0, len(tokens), self._grammar.start())) + if tree is not None: + yield tree + + def _add_constituents_spanning(self, span, constituents, tokens): + """ + Find any constituents that might cover ``span``, and add them + to the most likely constituents table. + + :rtype: None + :type span: tuple(int, int) + :param span: The section of the text for which we are + trying to find possible constituents. The span is + specified as a pair of integers, where the first integer + is the index of the first token that should be included in + the constituent; and the second integer is the index of + the first token that should not be included in the + constituent. I.e., the constituent should cover + ``text[span[0]:span[1]]``, where ``text`` is the text + that we are parsing. + + :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree) + :param constituents: The most likely constituents table. This + table records the most probable tree representation for + any given span and node value. In particular, + ``constituents(s,e,nv)`` is the most likely + ``ProbabilisticTree`` that covers ``text[s:e]`` + and has a node value ``nv.symbol()``, where ``text`` + is the text that we are parsing. When + ``_add_constituents_spanning`` is called, ``constituents`` + should contain all possible constituents that are shorter + than ``span``. + + :type tokens: list of tokens + :param tokens: The text we are parsing. This is only used for + trace output. + """ + # Since some of the grammar productions may be unary, we need to + # repeatedly try all of the productions until none of them add any + # new constituents. + changed = True + while changed: + changed = False + + # Find all ways instantiations of the grammar productions that + # cover the span. + instantiations = self._find_instantiations(span, constituents) + + # For each production instantiation, add a new + # ProbabilisticTree whose probability is the product + # of the childrens' probabilities and the production's + # probability. + for (production, children) in instantiations: + subtrees = [c for c in children if isinstance(c, Tree)] + p = reduce(lambda pr, t: pr * t.prob(), subtrees, production.prob()) + node = production.lhs().symbol() + tree = ProbabilisticTree(node, children, prob=p) + + # If it's new a constituent, then add it to the + # constituents dictionary. + c = constituents.get((span[0], span[1], production.lhs())) + if self._trace > 1: + if c is None or c != tree: + if c is None or c.prob() < tree.prob(): + print(" Insert:", end=" ") + else: + print(" Discard:", end=" ") + self._trace_production(production, p, span, len(tokens)) + if c is None or c.prob() < tree.prob(): + constituents[span[0], span[1], production.lhs()] = tree + changed = True + + def _find_instantiations(self, span, constituents): + """ + :return: a list of the production instantiations that cover a + given span of the text. A "production instantiation" is + a tuple containing a production and a list of children, + where the production's right hand side matches the list of + children; and the children cover ``span``. :rtype: list + of ``pair`` of ``Production``, (list of + (``ProbabilisticTree`` or token. + + :type span: tuple(int, int) + :param span: The section of the text for which we are + trying to find production instantiations. The span is + specified as a pair of integers, where the first integer + is the index of the first token that should be covered by + the production instantiation; and the second integer is + the index of the first token that should not be covered by + the production instantiation. + :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree) + :param constituents: The most likely constituents table. This + table records the most probable tree representation for + any given span and node value. See the module + documentation for more information. + """ + rv = [] + for production in self._grammar.productions(): + childlists = self._match_rhs(production.rhs(), span, constituents) + + for childlist in childlists: + rv.append((production, childlist)) + return rv + + def _match_rhs(self, rhs, span, constituents): + """ + :return: a set of all the lists of children that cover ``span`` + and that match ``rhs``. + :rtype: list(list(ProbabilisticTree or token) + + :type rhs: list(Nonterminal or any) + :param rhs: The list specifying what kinds of children need to + cover ``span``. Each nonterminal in ``rhs`` specifies + that the corresponding child should be a tree whose node + value is that nonterminal's symbol. Each terminal in ``rhs`` + specifies that the corresponding child should be a token + whose type is that terminal. + :type span: tuple(int, int) + :param span: The section of the text for which we are + trying to find child lists. The span is specified as a + pair of integers, where the first integer is the index of + the first token that should be covered by the child list; + and the second integer is the index of the first token + that should not be covered by the child list. + :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree) + :param constituents: The most likely constituents table. This + table records the most probable tree representation for + any given span and node value. See the module + documentation for more information. + """ + (start, end) = span + + # Base case + if start >= end and rhs == (): + return [[]] + if start >= end or rhs == (): + return [] + + # Find everything that matches the 1st symbol of the RHS + childlists = [] + for split in range(start, end + 1): + l = constituents.get((start, split, rhs[0])) + if l is not None: + rights = self._match_rhs(rhs[1:], (split, end), constituents) + childlists += [[l] + r for r in rights] + + return childlists + + def _trace_production(self, production, p, span, width): + """ + Print trace output indicating that a given production has been + applied at a given location. + + :param production: The production that has been applied + :type production: Production + :param p: The probability of the tree produced by the production. + :type p: float + :param span: The span of the production + :type span: tuple + :rtype: None + """ + + str = "|" + "." * span[0] + str += "=" * (span[1] - span[0]) + str += "." * (width - span[1]) + "| " + str += "%s" % production + if self._trace > 2: + str = f"{str:<40} {p:12.10f} " + + print(str) + + def _trace_lexical_insertion(self, token, index, width): + str = " Insert: |" + "." * index + "=" + "." * (width - index - 1) + "| " + str += f"{token}" + print(str) + + def __repr__(self): + return "" % self._grammar + + +##////////////////////////////////////////////////////// +## Test Code +##////////////////////////////////////////////////////// + + +def demo(): + """ + A demonstration of the probabilistic parsers. The user is + prompted to select which demo to run, and how many parses should + be found; and then each parser is run on the same demo, and a + summary of the results are displayed. + """ + import sys + import time + + from nltk import tokenize + from nltk.grammar import PCFG + from nltk.parse import ViterbiParser + + toy_pcfg1 = PCFG.fromstring( + """ + S -> NP VP [1.0] + NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15] + Det -> 'the' [0.8] | 'my' [0.2] + N -> 'man' [0.5] | 'telescope' [0.5] + VP -> VP PP [0.1] | V NP [0.7] | V [0.2] + V -> 'ate' [0.35] | 'saw' [0.65] + PP -> P NP [1.0] + P -> 'with' [0.61] | 'under' [0.39] + """ + ) + + toy_pcfg2 = PCFG.fromstring( + """ + S -> NP VP [1.0] + VP -> V NP [.59] + VP -> V [.40] + VP -> VP PP [.01] + NP -> Det N [.41] + NP -> Name [.28] + NP -> NP PP [.31] + PP -> P NP [1.0] + V -> 'saw' [.21] + V -> 'ate' [.51] + V -> 'ran' [.28] + N -> 'boy' [.11] + N -> 'cookie' [.12] + N -> 'table' [.13] + N -> 'telescope' [.14] + N -> 'hill' [.5] + Name -> 'Jack' [.52] + Name -> 'Bob' [.48] + P -> 'with' [.61] + P -> 'under' [.39] + Det -> 'the' [.41] + Det -> 'a' [.31] + Det -> 'my' [.28] + """ + ) + + # Define two demos. Each demo has a sentence and a grammar. + demos = [ + ("I saw the man with my telescope", toy_pcfg1), + ("the boy saw Jack with Bob under the table with a telescope", toy_pcfg2), + ] + + # Ask the user which demo they want to use. + print() + for i in range(len(demos)): + print(f"{i + 1:>3}: {demos[i][0]}") + print(" %r" % demos[i][1]) + print() + print("Which demo (%d-%d)? " % (1, len(demos)), end=" ") + try: + snum = int(sys.stdin.readline().strip()) - 1 + sent, grammar = demos[snum] + except: + print("Bad sentence number") + return + + # Tokenize the sentence. + tokens = sent.split() + + parser = ViterbiParser(grammar) + all_parses = {} + + print(f"\nsent: {sent}\nparser: {parser}\ngrammar: {grammar}") + parser.trace(3) + t = time.time() + parses = parser.parse_all(tokens) + time = time.time() - t + average = ( + reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) if parses else 0 + ) + num_parses = len(parses) + for p in parses: + all_parses[p.freeze()] = 1 + + # Print some summary statistics + print() + print("Time (secs) # Parses Average P(parse)") + print("-----------------------------------------") + print("%11.4f%11d%19.14f" % (time, num_parses, average)) + parses = all_parses.keys() + if parses: + p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) + else: + p = 0 + print("------------------------------------------") + print("%11s%11d%19.14f" % ("n/a", len(parses), p)) + + # Ask the user if we should draw the parses. + print() + print("Draw parses (y/n)? ", end=" ") + if sys.stdin.readline().strip().lower().startswith("y"): + from nltk.draw.tree import draw_trees + + print(" please wait...") + draw_trees(*parses) + + # Ask the user if we should print the parses. + print() + print("Print parses (y/n)? ", end=" ") + if sys.stdin.readline().strip().lower().startswith("y"): + for parse in parses: + print(parse) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__init__.py b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3f537dd6c7a9badc43313a8d2b4c5efed9b1b6ce --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__init__.py @@ -0,0 +1,184 @@ +# Natural Language Toolkit: Taggers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# URL: +# For license information, see LICENSE.TXT +""" +NLTK Taggers + +This package contains classes and interfaces for part-of-speech +tagging, or simply "tagging". + +A "tag" is a case-sensitive string that specifies some property of a token, +such as its part of speech. Tagged tokens are encoded as tuples +``(tag, token)``. For example, the following tagged token combines +the word ``'fly'`` with a noun part of speech tag (``'NN'``): + + >>> tagged_tok = ('fly', 'NN') + +An off-the-shelf tagger is available for English. It uses the Penn Treebank tagset: + + >>> from nltk import pos_tag, word_tokenize + >>> pos_tag(word_tokenize("John's big idea isn't all that bad.")) # doctest: +NORMALIZE_WHITESPACE + [('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'), + ("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')] + +A Russian tagger is also available if you specify lang="rus". It uses +the Russian National Corpus tagset: + + >>> pos_tag(word_tokenize("Илья оторопел и дважды перечитал бумажку."), lang='rus') # doctest: +SKIP + [('Илья', 'S'), ('оторопел', 'V'), ('и', 'CONJ'), ('дважды', 'ADV'), ('перечитал', 'V'), + ('бумажку', 'S'), ('.', 'NONLEX')] + +This package defines several taggers, which take a list of tokens, +assign a tag to each one, and return the resulting list of tagged tokens. +Most of the taggers are built automatically based on a training corpus. +For example, the unigram tagger tags each word *w* by checking what +the most frequent tag for *w* was in a training corpus: + + >>> from nltk.corpus import brown + >>> from nltk.tag import UnigramTagger + >>> tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500]) + >>> sent = ['Mitchell', 'decried', 'the', 'high', 'rate', 'of', 'unemployment'] + >>> for word, tag in tagger.tag(sent): + ... print(word, '->', tag) + Mitchell -> NP + decried -> None + the -> AT + high -> JJ + rate -> NN + of -> IN + unemployment -> None + +Note that words that the tagger has not seen during training receive a tag +of ``None``. + +We evaluate a tagger on data that was not seen during training: + + >>> round(tagger.accuracy(brown.tagged_sents(categories='news')[500:600]), 3) + 0.735 + +For more information, please consult chapter 5 of the NLTK Book. + +isort:skip_file +""" + +from nltk.tag.api import TaggerI +from nltk.tag.util import str2tuple, tuple2str, untag +from nltk.tag.sequential import ( + SequentialBackoffTagger, + ContextTagger, + DefaultTagger, + NgramTagger, + UnigramTagger, + BigramTagger, + TrigramTagger, + AffixTagger, + RegexpTagger, + ClassifierBasedTagger, + ClassifierBasedPOSTagger, +) +from nltk.tag.brill import BrillTagger +from nltk.tag.brill_trainer import BrillTaggerTrainer +from nltk.tag.tnt import TnT +from nltk.tag.hunpos import HunposTagger +from nltk.tag.stanford import StanfordTagger, StanfordPOSTagger, StanfordNERTagger +from nltk.tag.hmm import HiddenMarkovModelTagger, HiddenMarkovModelTrainer +from nltk.tag.senna import SennaTagger, SennaChunkTagger, SennaNERTagger +from nltk.tag.mapping import tagset_mapping, map_tag +from nltk.tag.crf import CRFTagger +from nltk.tag.perceptron import PerceptronTagger + +from nltk.data import load, find + +RUS_PICKLE = ( + "taggers/averaged_perceptron_tagger_ru/averaged_perceptron_tagger_ru.pickle" +) + + +def _get_tagger(lang=None): + if lang == "rus": + tagger = PerceptronTagger(False) + ap_russian_model_loc = "file:" + str(find(RUS_PICKLE)) + tagger.load(ap_russian_model_loc) + else: + tagger = PerceptronTagger() + return tagger + + +def _pos_tag(tokens, tagset=None, tagger=None, lang=None): + # Currently only supports English and Russian. + if lang not in ["eng", "rus"]: + raise NotImplementedError( + "Currently, NLTK pos_tag only supports English and Russian " + "(i.e. lang='eng' or lang='rus')" + ) + # Throws Error if tokens is of string type + elif isinstance(tokens, str): + raise TypeError("tokens: expected a list of strings, got a string") + + else: + tagged_tokens = tagger.tag(tokens) + if tagset: # Maps to the specified tagset. + if lang == "eng": + tagged_tokens = [ + (token, map_tag("en-ptb", tagset, tag)) + for (token, tag) in tagged_tokens + ] + elif lang == "rus": + # Note that the new Russian pos tags from the model contains suffixes, + # see https://github.com/nltk/nltk/issues/2151#issuecomment-430709018 + tagged_tokens = [ + (token, map_tag("ru-rnc-new", tagset, tag.partition("=")[0])) + for (token, tag) in tagged_tokens + ] + return tagged_tokens + + +def pos_tag(tokens, tagset=None, lang="eng"): + """ + Use NLTK's currently recommended part of speech tagger to + tag the given list of tokens. + + >>> from nltk.tag import pos_tag + >>> from nltk.tokenize import word_tokenize + >>> pos_tag(word_tokenize("John's big idea isn't all that bad.")) # doctest: +NORMALIZE_WHITESPACE + [('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'), + ("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')] + >>> pos_tag(word_tokenize("John's big idea isn't all that bad."), tagset='universal') # doctest: +NORMALIZE_WHITESPACE + [('John', 'NOUN'), ("'s", 'PRT'), ('big', 'ADJ'), ('idea', 'NOUN'), ('is', 'VERB'), + ("n't", 'ADV'), ('all', 'DET'), ('that', 'DET'), ('bad', 'ADJ'), ('.', '.')] + + NB. Use `pos_tag_sents()` for efficient tagging of more than one sentence. + + :param tokens: Sequence of tokens to be tagged + :type tokens: list(str) + :param tagset: the tagset to be used, e.g. universal, wsj, brown + :type tagset: str + :param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian + :type lang: str + :return: The tagged tokens + :rtype: list(tuple(str, str)) + """ + tagger = _get_tagger(lang) + return _pos_tag(tokens, tagset, tagger, lang) + + +def pos_tag_sents(sentences, tagset=None, lang="eng"): + """ + Use NLTK's currently recommended part of speech tagger to tag the + given list of sentences, each consisting of a list of tokens. + + :param sentences: List of sentences to be tagged + :type sentences: list(list(str)) + :param tagset: the tagset to be used, e.g. universal, wsj, brown + :type tagset: str + :param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian + :type lang: str + :return: The list of tagged sentences + :rtype: list(list(tuple(str, str))) + """ + tagger = _get_tagger(lang) + return [_pos_tag(sent, tagset, tagger, lang) for sent in sentences] diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b78ffc67a43c0943465f97e28e5bfff0363300f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..536482e79e3cb550789efc4f04ada76978035f31 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/brill.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/brill.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f001b1f3d5466ab53bf78333601ba315f7d6cebb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/brill.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/brill_trainer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/brill_trainer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be45506b7eed95502e5b4de19737e70dbf93d125 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/brill_trainer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/crf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/crf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49bbf786576d13674a4fd8bd04d4231b026caa42 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/crf.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/hmm.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/hmm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7239bab02b8348e7fff7b65ce19316bd421d4454 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/hmm.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/hunpos.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/hunpos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae082be8c5063f8b62ee93c4eecee8dfcc14c50c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/hunpos.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/mapping.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/mapping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9eb446981fd4871f8f0a3a5e108360078aa5aeb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/mapping.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/perceptron.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/perceptron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c77e03d3a797e106d94c0f96022b47be07bcd3f5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/perceptron.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/senna.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/senna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b54c11b7c4448d63cd31ce26773eb7e1ef0d27e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/senna.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/sequential.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/sequential.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..179df09073c6f1b40e8ed3c1b4718e78e8ea79e9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/sequential.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/stanford.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/stanford.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21e84eec7c46c005f509f5d21675c5a8f42e2c99 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/stanford.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/tnt.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/tnt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fec27374a747ea19d4e001576b3c005806c4753 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/tnt.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6441d1513476b904824a6e5a194caab556c040ba Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/api.py b/env-llmeval/lib/python3.10/site-packages/nltk/tag/api.py new file mode 100644 index 0000000000000000000000000000000000000000..27e45026cabe6d747f4b4a7dc108b7c3cec1c6f9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tag/api.py @@ -0,0 +1,296 @@ +# Natural Language Toolkit: Tagger Interface +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +""" +Interface for tagging each token in a sentence with supplementary +information, such as its part of speech. +""" +from abc import ABCMeta, abstractmethod +from functools import lru_cache +from itertools import chain +from typing import Dict + +from nltk.internals import deprecated, overridden +from nltk.metrics import ConfusionMatrix, accuracy +from nltk.tag.util import untag + + +class TaggerI(metaclass=ABCMeta): + """ + A processing interface for assigning a tag to each token in a list. + Tags are case sensitive strings that identify some property of each + token, such as its part of speech or its sense. + + Some taggers require specific types for their tokens. This is + generally indicated by the use of a sub-interface to ``TaggerI``. + For example, featureset taggers, which are subclassed from + ``FeaturesetTagger``, require that each token be a ``featureset``. + + Subclasses must define: + - either ``tag()`` or ``tag_sents()`` (or both) + """ + + @abstractmethod + def tag(self, tokens): + """ + Determine the most appropriate tag sequence for the given + token sequence, and return a corresponding list of tagged + tokens. A tagged token is encoded as a tuple ``(token, tag)``. + + :rtype: list(tuple(str, str)) + """ + if overridden(self.tag_sents): + return self.tag_sents([tokens])[0] + + def tag_sents(self, sentences): + """ + Apply ``self.tag()`` to each element of *sentences*. I.e.:: + + return [self.tag(sent) for sent in sentences] + """ + return [self.tag(sent) for sent in sentences] + + @deprecated("Use accuracy(gold) instead.") + def evaluate(self, gold): + return self.accuracy(gold) + + def accuracy(self, gold): + """ + Score the accuracy of the tagger against the gold standard. + Strip the tags from the gold standard text, retag it using + the tagger, then compute the accuracy score. + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :rtype: float + """ + + tagged_sents = self.tag_sents(untag(sent) for sent in gold) + gold_tokens = list(chain.from_iterable(gold)) + test_tokens = list(chain.from_iterable(tagged_sents)) + return accuracy(gold_tokens, test_tokens) + + @lru_cache(maxsize=1) + def _confusion_cached(self, gold): + """ + Inner function used after ``gold`` is converted to a + ``tuple(tuple(tuple(str, str)))``. That way, we can use caching on + creating a ConfusionMatrix. + + :param gold: The list of tagged sentences to run the tagger with, + also used as the reference values in the generated confusion matrix. + :type gold: tuple(tuple(tuple(str, str))) + :rtype: ConfusionMatrix + """ + + tagged_sents = self.tag_sents(untag(sent) for sent in gold) + gold_tokens = [token for _word, token in chain.from_iterable(gold)] + test_tokens = [token for _word, token in chain.from_iterable(tagged_sents)] + return ConfusionMatrix(gold_tokens, test_tokens) + + def confusion(self, gold): + """ + Return a ConfusionMatrix with the tags from ``gold`` as the reference + values, with the predictions from ``tag_sents`` as the predicted values. + + >>> from nltk.tag import PerceptronTagger + >>> from nltk.corpus import treebank + >>> tagger = PerceptronTagger() + >>> gold_data = treebank.tagged_sents()[:10] + >>> print(tagger.confusion(gold_data)) + | - | + | N | + | O P | + | N J J N N P P R R V V V V V W | + | ' E C C D E I J J J M N N N O R P R B R T V B B B B B D ` | + | ' , - . C D T X N J R S D N P S S P $ B R P O B D G N P Z T ` | + -------+----------------------------------------------------------------------------------------------+ + '' | <1> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | + , | .<15> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | + -NONE- | . . <.> . . 2 . . . 2 . . . 5 1 . . . . 2 . . . . . . . . . . . | + . | . . .<10> . . . . . . . . . . . . . . . . . . . . . . . . . . . | + CC | . . . . <1> . . . . . . . . . . . . . . . . . . . . . . . . . . | + CD | . . . . . <5> . . . . . . . . . . . . . . . . . . . . . . . . . | + DT | . . . . . .<20> . . . . . . . . . . . . . . . . . . . . . . . . | + EX | . . . . . . . <1> . . . . . . . . . . . . . . . . . . . . . . . | + IN | . . . . . . . .<22> . . . . . . . . . . 3 . . . . . . . . . . . | + JJ | . . . . . . . . .<16> . . . . 1 . . . . 1 . . . . . . . . . . . | + JJR | . . . . . . . . . . <.> . . . . . . . . . . . . . . . . . . . . | + JJS | . . . . . . . . . . . <1> . . . . . . . . . . . . . . . . . . . | + MD | . . . . . . . . . . . . <1> . . . . . . . . . . . . . . . . . . | + NN | . . . . . . . . . . . . .<28> 1 1 . . . . . . . . . . . . . . . | + NNP | . . . . . . . . . . . . . .<25> . . . . . . . . . . . . . . . . | + NNS | . . . . . . . . . . . . . . .<19> . . . . . . . . . . . . . . . | + POS | . . . . . . . . . . . . . . . . <1> . . . . . . . . . . . . . . | + PRP | . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . . . . | + PRP$ | . . . . . . . . . . . . . . . . . . <2> . . . . . . . . . . . . | + RB | . . . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . . | + RBR | . . . . . . . . . . 1 . . . . . . . . . <1> . . . . . . . . . . | + RP | . . . . . . . . . . . . . . . . . . . . . <1> . . . . . . . . . | + TO | . . . . . . . . . . . . . . . . . . . . . . <5> . . . . . . . . | + VB | . . . . . . . . . . . . . . . . . . . . . . . <3> . . . . . . . | + VBD | . . . . . . . . . . . . . 1 . . . . . . . . . . <6> . . . . . . | + VBG | . . . . . . . . . . . . . 1 . . . . . . . . . . . <4> . . . . . | + VBN | . . . . . . . . . . . . . . . . . . . . . . . . 1 . <4> . . . . | + VBP | . . . . . . . . . . . . . . . . . . . . . . . . . . . <3> . . . | + VBZ | . . . . . . . . . . . . . . . . . . . . . . . . . . . . <7> . . | + WDT | . . . . . . . . 2 . . . . . . . . . . . . . . . . . . . . <.> . | + `` | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <1>| + -------+----------------------------------------------------------------------------------------------+ + (row = reference; col = test) + + + :param gold: The list of tagged sentences to run the tagger with, + also used as the reference values in the generated confusion matrix. + :type gold: list(list(tuple(str, str))) + :rtype: ConfusionMatrix + """ + + return self._confusion_cached(tuple(tuple(sent) for sent in gold)) + + def recall(self, gold) -> Dict[str, float]: + """ + Compute the recall for each tag from ``gold`` or from running ``tag`` + on the tokenized sentences from ``gold``. Then, return the dictionary + with mappings from tag to recall. The recall is defined as: + + - *r* = true positive / (true positive + false positive) + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :return: A mapping from tags to recall + :rtype: Dict[str, float] + """ + + cm = self.confusion(gold) + return {tag: cm.recall(tag) for tag in cm._values} + + def precision(self, gold): + """ + Compute the precision for each tag from ``gold`` or from running ``tag`` + on the tokenized sentences from ``gold``. Then, return the dictionary + with mappings from tag to precision. The precision is defined as: + + - *p* = true positive / (true positive + false negative) + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :return: A mapping from tags to precision + :rtype: Dict[str, float] + """ + + cm = self.confusion(gold) + return {tag: cm.precision(tag) for tag in cm._values} + + def f_measure(self, gold, alpha=0.5): + """ + Compute the f-measure for each tag from ``gold`` or from running ``tag`` + on the tokenized sentences from ``gold``. Then, return the dictionary + with mappings from tag to f-measure. The f-measure is the harmonic mean + of the ``precision`` and ``recall``, weighted by ``alpha``. + In particular, given the precision *p* and recall *r* defined by: + + - *p* = true positive / (true positive + false negative) + - *r* = true positive / (true positive + false positive) + + The f-measure is: + + - *1/(alpha/p + (1-alpha)/r)* + + With ``alpha = 0.5``, this reduces to: + + - *2pr / (p + r)* + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :param alpha: Ratio of the cost of false negative compared to false + positives. Defaults to 0.5, where the costs are equal. + :type alpha: float + :return: A mapping from tags to precision + :rtype: Dict[str, float] + """ + cm = self.confusion(gold) + return {tag: cm.f_measure(tag, alpha) for tag in cm._values} + + def evaluate_per_tag(self, gold, alpha=0.5, truncate=None, sort_by_count=False): + """Tabulate the **recall**, **precision** and **f-measure** + for each tag from ``gold`` or from running ``tag`` on the tokenized + sentences from ``gold``. + + >>> from nltk.tag import PerceptronTagger + >>> from nltk.corpus import treebank + >>> tagger = PerceptronTagger() + >>> gold_data = treebank.tagged_sents()[:10] + >>> print(tagger.evaluate_per_tag(gold_data)) + Tag | Prec. | Recall | F-measure + -------+--------+--------+----------- + '' | 1.0000 | 1.0000 | 1.0000 + , | 1.0000 | 1.0000 | 1.0000 + -NONE- | 0.0000 | 0.0000 | 0.0000 + . | 1.0000 | 1.0000 | 1.0000 + CC | 1.0000 | 1.0000 | 1.0000 + CD | 0.7143 | 1.0000 | 0.8333 + DT | 1.0000 | 1.0000 | 1.0000 + EX | 1.0000 | 1.0000 | 1.0000 + IN | 0.9167 | 0.8800 | 0.8980 + JJ | 0.8889 | 0.8889 | 0.8889 + JJR | 0.0000 | 0.0000 | 0.0000 + JJS | 1.0000 | 1.0000 | 1.0000 + MD | 1.0000 | 1.0000 | 1.0000 + NN | 0.8000 | 0.9333 | 0.8615 + NNP | 0.8929 | 1.0000 | 0.9434 + NNS | 0.9500 | 1.0000 | 0.9744 + POS | 1.0000 | 1.0000 | 1.0000 + PRP | 1.0000 | 1.0000 | 1.0000 + PRP$ | 1.0000 | 1.0000 | 1.0000 + RB | 0.4000 | 1.0000 | 0.5714 + RBR | 1.0000 | 0.5000 | 0.6667 + RP | 1.0000 | 1.0000 | 1.0000 + TO | 1.0000 | 1.0000 | 1.0000 + VB | 1.0000 | 1.0000 | 1.0000 + VBD | 0.8571 | 0.8571 | 0.8571 + VBG | 1.0000 | 0.8000 | 0.8889 + VBN | 1.0000 | 0.8000 | 0.8889 + VBP | 1.0000 | 1.0000 | 1.0000 + VBZ | 1.0000 | 1.0000 | 1.0000 + WDT | 0.0000 | 0.0000 | 0.0000 + `` | 1.0000 | 1.0000 | 1.0000 + + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :param alpha: Ratio of the cost of false negative compared to false + positives, as used in the f-measure computation. Defaults to 0.5, + where the costs are equal. + :type alpha: float + :param truncate: If specified, then only show the specified + number of values. Any sorting (e.g., sort_by_count) + will be performed before truncation. Defaults to None + :type truncate: int, optional + :param sort_by_count: Whether to sort the outputs on number of + occurrences of that tag in the ``gold`` data, defaults to False + :type sort_by_count: bool, optional + :return: A tabulated recall, precision and f-measure string + :rtype: str + """ + cm = self.confusion(gold) + return cm.evaluate(alpha=alpha, truncate=truncate, sort_by_count=sort_by_count) + + def _check_params(self, train, model): + if (train and model) or (not train and not model): + raise ValueError("Must specify either training data or trained model.") + + +class FeaturesetTaggerI(TaggerI): + """ + A tagger that requires tokens to be ``featuresets``. A featureset + is a dictionary that maps from feature names to feature + values. See ``nltk.classify`` for more information about features + and featuresets. + """ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/brill.py b/env-llmeval/lib/python3.10/site-packages/nltk/tag/brill.py new file mode 100644 index 0000000000000000000000000000000000000000..d3bd1cd3b6cb10c4b62b7d23910e2a8ba9568cd2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tag/brill.py @@ -0,0 +1,449 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from collections import Counter, defaultdict + +from nltk import jsontags +from nltk.tag import TaggerI +from nltk.tbl import Feature, Template + +###################################################################### +# Brill Templates +###################################################################### + + +@jsontags.register_tag +class Word(Feature): + """ + Feature which examines the text (word) of nearby tokens. + """ + + json_tag = "nltk.tag.brill.Word" + + @staticmethod + def extract_property(tokens, index): + """@return: The given token's text.""" + return tokens[index][0] + + +@jsontags.register_tag +class Pos(Feature): + """ + Feature which examines the tags of nearby tokens. + """ + + json_tag = "nltk.tag.brill.Pos" + + @staticmethod + def extract_property(tokens, index): + """@return: The given token's tag.""" + return tokens[index][1] + + +def nltkdemo18(): + """ + Return 18 templates, from the original nltk demo, in multi-feature syntax + """ + return [ + Template(Pos([-1])), + Template(Pos([1])), + Template(Pos([-2])), + Template(Pos([2])), + Template(Pos([-2, -1])), + Template(Pos([1, 2])), + Template(Pos([-3, -2, -1])), + Template(Pos([1, 2, 3])), + Template(Pos([-1]), Pos([1])), + Template(Word([-1])), + Template(Word([1])), + Template(Word([-2])), + Template(Word([2])), + Template(Word([-2, -1])), + Template(Word([1, 2])), + Template(Word([-3, -2, -1])), + Template(Word([1, 2, 3])), + Template(Word([-1]), Word([1])), + ] + + +def nltkdemo18plus(): + """ + Return 18 templates, from the original nltk demo, and additionally a few + multi-feature ones (the motivation is easy comparison with nltkdemo18) + """ + return nltkdemo18() + [ + Template(Word([-1]), Pos([1])), + Template(Pos([-1]), Word([1])), + Template(Word([-1]), Word([0]), Pos([1])), + Template(Pos([-1]), Word([0]), Word([1])), + Template(Pos([-1]), Word([0]), Pos([1])), + ] + + +def fntbl37(): + """ + Return 37 templates taken from the postagging task of the + fntbl distribution https://www.cs.jhu.edu/~rflorian/fntbl/ + (37 is after excluding a handful which do not condition on Pos[0]; + fntbl can do that but the current nltk implementation cannot.) + """ + return [ + Template(Word([0]), Word([1]), Word([2])), + Template(Word([-1]), Word([0]), Word([1])), + Template(Word([0]), Word([-1])), + Template(Word([0]), Word([1])), + Template(Word([0]), Word([2])), + Template(Word([0]), Word([-2])), + Template(Word([1, 2])), + Template(Word([-2, -1])), + Template(Word([1, 2, 3])), + Template(Word([-3, -2, -1])), + Template(Word([0]), Pos([2])), + Template(Word([0]), Pos([-2])), + Template(Word([0]), Pos([1])), + Template(Word([0]), Pos([-1])), + Template(Word([0])), + Template(Word([-2])), + Template(Word([2])), + Template(Word([1])), + Template(Word([-1])), + Template(Pos([-1]), Pos([1])), + Template(Pos([1]), Pos([2])), + Template(Pos([-1]), Pos([-2])), + Template(Pos([1])), + Template(Pos([-1])), + Template(Pos([-2])), + Template(Pos([2])), + Template(Pos([1, 2, 3])), + Template(Pos([1, 2])), + Template(Pos([-3, -2, -1])), + Template(Pos([-2, -1])), + Template(Pos([1]), Word([0]), Word([1])), + Template(Pos([1]), Word([0]), Word([-1])), + Template(Pos([-1]), Word([-1]), Word([0])), + Template(Pos([-1]), Word([0]), Word([1])), + Template(Pos([-2]), Pos([-1])), + Template(Pos([1]), Pos([2])), + Template(Pos([1]), Pos([2]), Word([1])), + ] + + +def brill24(): + """ + Return 24 templates of the seminal TBL paper, Brill (1995) + """ + return [ + Template(Pos([-1])), + Template(Pos([1])), + Template(Pos([-2])), + Template(Pos([2])), + Template(Pos([-2, -1])), + Template(Pos([1, 2])), + Template(Pos([-3, -2, -1])), + Template(Pos([1, 2, 3])), + Template(Pos([-1]), Pos([1])), + Template(Pos([-2]), Pos([-1])), + Template(Pos([1]), Pos([2])), + Template(Word([-1])), + Template(Word([1])), + Template(Word([-2])), + Template(Word([2])), + Template(Word([-2, -1])), + Template(Word([1, 2])), + Template(Word([-1, 0])), + Template(Word([0, 1])), + Template(Word([0])), + Template(Word([-1]), Pos([-1])), + Template(Word([1]), Pos([1])), + Template(Word([0]), Word([-1]), Pos([-1])), + Template(Word([0]), Word([1]), Pos([1])), + ] + + +def describe_template_sets(): + """ + Print the available template sets in this demo, with a short description" + """ + import inspect + import sys + + # a bit of magic to get all functions in this module + templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction) + for (name, obj) in templatesets: + if name == "describe_template_sets": + continue + print(name, obj.__doc__, "\n") + + +###################################################################### +# The Brill Tagger +###################################################################### + + +@jsontags.register_tag +class BrillTagger(TaggerI): + """ + Brill's transformational rule-based tagger. Brill taggers use an + initial tagger (such as ``tag.DefaultTagger``) to assign an initial + tag sequence to a text; and then apply an ordered list of + transformational rules to correct the tags of individual tokens. + These transformation rules are specified by the ``TagRule`` + interface. + + Brill taggers can be created directly, from an initial tagger and + a list of transformational rules; but more often, Brill taggers + are created by learning rules from a training corpus, using one + of the TaggerTrainers available. + """ + + json_tag = "nltk.tag.BrillTagger" + + def __init__(self, initial_tagger, rules, training_stats=None): + """ + :param initial_tagger: The initial tagger + :type initial_tagger: TaggerI + + :param rules: An ordered list of transformation rules that + should be used to correct the initial tagging. + :type rules: list(TagRule) + + :param training_stats: A dictionary of statistics collected + during training, for possible later use + :type training_stats: dict + + """ + self._initial_tagger = initial_tagger + self._rules = tuple(rules) + self._training_stats = training_stats + + def encode_json_obj(self): + return self._initial_tagger, self._rules, self._training_stats + + @classmethod + def decode_json_obj(cls, obj): + _initial_tagger, _rules, _training_stats = obj + return cls(_initial_tagger, _rules, _training_stats) + + def rules(self): + """ + Return the ordered list of transformation rules that this tagger has learnt + + :return: the ordered list of transformation rules that correct the initial tagging + :rtype: list of Rules + """ + return self._rules + + def train_stats(self, statistic=None): + """ + Return a named statistic collected during training, or a dictionary of all + available statistics if no name given + + :param statistic: name of statistic + :type statistic: str + :return: some statistic collected during training of this tagger + :rtype: any (but usually a number) + """ + if statistic is None: + return self._training_stats + else: + return self._training_stats.get(statistic) + + def tag(self, tokens): + # Inherit documentation from TaggerI + + # Run the initial tagger. + tagged_tokens = self._initial_tagger.tag(tokens) + + # Create a dictionary that maps each tag to a list of the + # indices of tokens that have that tag. + tag_to_positions = defaultdict(set) + for i, (token, tag) in enumerate(tagged_tokens): + tag_to_positions[tag].add(i) + + # Apply each rule, in order. Only try to apply rules at + # positions that have the desired original tag. + for rule in self._rules: + # Find the positions where it might apply + positions = tag_to_positions.get(rule.original_tag, []) + # Apply the rule at those positions. + changed = rule.apply(tagged_tokens, positions) + # Update tag_to_positions with the positions of tags that + # were modified. + for i in changed: + tag_to_positions[rule.original_tag].remove(i) + tag_to_positions[rule.replacement_tag].add(i) + + return tagged_tokens + + def print_template_statistics(self, test_stats=None, printunused=True): + """ + Print a list of all templates, ranked according to efficiency. + + If test_stats is available, the templates are ranked according to their + relative contribution (summed for all rules created from a given template, + weighted by score) to the performance on the test set. If no test_stats, then + statistics collected during training are used instead. There is also + an unweighted measure (just counting the rules). This is less informative, + though, as many low-score rules will appear towards end of training. + + :param test_stats: dictionary of statistics collected during testing + :type test_stats: dict of str -> any (but usually numbers) + :param printunused: if True, print a list of all unused templates + :type printunused: bool + :return: None + :rtype: None + """ + tids = [r.templateid for r in self._rules] + train_stats = self.train_stats() + + trainscores = train_stats["rulescores"] + assert len(trainscores) == len( + tids + ), "corrupt statistics: " "{} train scores for {} rules".format( + trainscores, tids + ) + template_counts = Counter(tids) + weighted_traincounts = Counter() + for (tid, score) in zip(tids, trainscores): + weighted_traincounts[tid] += score + tottrainscores = sum(trainscores) + + # det_tplsort() is for deterministic sorting; + # the otherwise convenient Counter.most_common() unfortunately + # does not break ties deterministically + # between python versions and will break cross-version tests + def det_tplsort(tpl_value): + return (tpl_value[1], repr(tpl_value[0])) + + def print_train_stats(): + print( + "TEMPLATE STATISTICS (TRAIN) {} templates, {} rules)".format( + len(template_counts), len(tids) + ) + ) + print( + "TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} " + "final: {finalerrors:5d} {finalacc:.4f}".format(**train_stats) + ) + head = "#ID | Score (train) | #Rules | Template" + print(head, "\n", "-" * len(head), sep="") + train_tplscores = sorted( + weighted_traincounts.items(), key=det_tplsort, reverse=True + ) + for (tid, trainscore) in train_tplscores: + s = "{} | {:5d} {:5.3f} |{:4d} {:.3f} | {}".format( + tid, + trainscore, + trainscore / tottrainscores, + template_counts[tid], + template_counts[tid] / len(tids), + Template.ALLTEMPLATES[int(tid)], + ) + print(s) + + def print_testtrain_stats(): + testscores = test_stats["rulescores"] + print( + "TEMPLATE STATISTICS (TEST AND TRAIN) ({} templates, {} rules)".format( + len(template_counts), len(tids) + ) + ) + print( + "TEST ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} " + "final: {finalerrors:5d} {finalacc:.4f} ".format(**test_stats) + ) + print( + "TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} " + "final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats) + ) + weighted_testcounts = Counter() + for (tid, score) in zip(tids, testscores): + weighted_testcounts[tid] += score + tottestscores = sum(testscores) + head = "#ID | Score (test) | Score (train) | #Rules | Template" + print(head, "\n", "-" * len(head), sep="") + test_tplscores = sorted( + weighted_testcounts.items(), key=det_tplsort, reverse=True + ) + for (tid, testscore) in test_tplscores: + s = "{:s} |{:5d} {:6.3f} | {:4d} {:.3f} |{:4d} {:.3f} | {:s}".format( + tid, + testscore, + testscore / tottestscores, + weighted_traincounts[tid], + weighted_traincounts[tid] / tottrainscores, + template_counts[tid], + template_counts[tid] / len(tids), + Template.ALLTEMPLATES[int(tid)], + ) + print(s) + + def print_unused_templates(): + usedtpls = {int(tid) for tid in tids} + unused = [ + (tid, tpl) + for (tid, tpl) in enumerate(Template.ALLTEMPLATES) + if tid not in usedtpls + ] + print(f"UNUSED TEMPLATES ({len(unused)})") + + for (tid, tpl) in unused: + print(f"{tid:03d} {str(tpl):s}") + + if test_stats is None: + print_train_stats() + else: + print_testtrain_stats() + print() + if printunused: + print_unused_templates() + print() + + def batch_tag_incremental(self, sequences, gold): + """ + Tags by applying each rule to the entire corpus (rather than all rules to a + single sequence). The point is to collect statistics on the test set for + individual rules. + + NOTE: This is inefficient (does not build any index, so will traverse the entire + corpus N times for N rules) -- usually you would not care about statistics for + individual rules and thus use batch_tag() instead + + :param sequences: lists of token sequences (sentences, in some applications) to be tagged + :type sequences: list of list of strings + :param gold: the gold standard + :type gold: list of list of strings + :returns: tuple of (tagged_sequences, ordered list of rule scores (one for each rule)) + """ + + def counterrors(xs): + return sum(t[1] != g[1] for pair in zip(xs, gold) for (t, g) in zip(*pair)) + + testing_stats = {} + testing_stats["tokencount"] = sum(len(t) for t in sequences) + testing_stats["sequencecount"] = len(sequences) + tagged_tokenses = [self._initial_tagger.tag(tokens) for tokens in sequences] + testing_stats["initialerrors"] = counterrors(tagged_tokenses) + testing_stats["initialacc"] = ( + 1 - testing_stats["initialerrors"] / testing_stats["tokencount"] + ) + # Apply each rule to the entire corpus, in order + errors = [testing_stats["initialerrors"]] + for rule in self._rules: + for tagged_tokens in tagged_tokenses: + rule.apply(tagged_tokens) + errors.append(counterrors(tagged_tokenses)) + testing_stats["rulescores"] = [ + err0 - err1 for (err0, err1) in zip(errors, errors[1:]) + ] + testing_stats["finalerrors"] = errors[-1] + testing_stats["finalacc"] = ( + 1 - testing_stats["finalerrors"] / testing_stats["tokencount"] + ) + return (tagged_tokenses, testing_stats) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/brill_trainer.py b/env-llmeval/lib/python3.10/site-packages/nltk/tag/brill_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..236fd9858e755b501f3a8f384b68a383b6902f99 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tag/brill_trainer.py @@ -0,0 +1,629 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2013 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +import bisect +import textwrap +from collections import defaultdict + +from nltk.tag import BrillTagger, untag + +###################################################################### +# Brill Tagger Trainer +###################################################################### + + +class BrillTaggerTrainer: + """ + A trainer for tbl taggers. + """ + + def __init__( + self, initial_tagger, templates, trace=0, deterministic=None, ruleformat="str" + ): + """ + Construct a Brill tagger from a baseline tagger and a + set of templates + + :param initial_tagger: the baseline tagger + :type initial_tagger: Tagger + :param templates: templates to be used in training + :type templates: list of Templates + :param trace: verbosity level + :type trace: int + :param deterministic: if True, adjudicate ties deterministically + :type deterministic: bool + :param ruleformat: format of reported Rules + :type ruleformat: str + :return: An untrained BrillTagger + :rtype: BrillTagger + """ + + if deterministic is None: + deterministic = trace > 0 + self._initial_tagger = initial_tagger + self._templates = templates + self._trace = trace + self._deterministic = deterministic + self._ruleformat = ruleformat + + self._tag_positions = None + """Mapping from tags to lists of positions that use that tag.""" + + self._rules_by_position = None + """Mapping from positions to the set of rules that are known + to occur at that position. Position is (sentnum, wordnum). + Initially, this will only contain positions where each rule + applies in a helpful way; but when we examine a rule, we'll + extend this list to also include positions where each rule + applies in a harmful or neutral way.""" + + self._positions_by_rule = None + """Mapping from rule to position to effect, specifying the + effect that each rule has on the overall score, at each + position. Position is (sentnum, wordnum); and effect is + -1, 0, or 1. As with _rules_by_position, this mapping starts + out only containing rules with positive effects; but when + we examine a rule, we'll extend this mapping to include + the positions where the rule is harmful or neutral.""" + + self._rules_by_score = None + """Mapping from scores to the set of rules whose effect on the + overall score is upper bounded by that score. Invariant: + rulesByScore[s] will contain r iff the sum of + _positions_by_rule[r] is s.""" + + self._rule_scores = None + """Mapping from rules to upper bounds on their effects on the + overall score. This is the inverse mapping to _rules_by_score. + Invariant: ruleScores[r] = sum(_positions_by_rule[r])""" + + self._first_unknown_position = None + """Mapping from rules to the first position where we're unsure + if the rule applies. This records the next position we + need to check to see if the rule messed anything up.""" + + # Training + + def train(self, train_sents, max_rules=200, min_score=2, min_acc=None): + r""" + Trains the Brill tagger on the corpus *train_sents*, + producing at most *max_rules* transformations, each of which + reduces the net number of errors in the corpus by at least + *min_score*, and each of which has accuracy not lower than + *min_acc*. + + >>> # Relevant imports + >>> from nltk.tbl.template import Template + >>> from nltk.tag.brill import Pos, Word + >>> from nltk.tag import untag, RegexpTagger, BrillTaggerTrainer + + >>> # Load some data + >>> from nltk.corpus import treebank + >>> training_data = treebank.tagged_sents()[:100] + >>> baseline_data = treebank.tagged_sents()[100:200] + >>> gold_data = treebank.tagged_sents()[200:300] + >>> testing_data = [untag(s) for s in gold_data] + + >>> backoff = RegexpTagger([ + ... (r'^-?[0-9]+(\.[0-9]+)?$', 'CD'), # cardinal numbers + ... (r'(The|the|A|a|An|an)$', 'AT'), # articles + ... (r'.*able$', 'JJ'), # adjectives + ... (r'.*ness$', 'NN'), # nouns formed from adjectives + ... (r'.*ly$', 'RB'), # adverbs + ... (r'.*s$', 'NNS'), # plural nouns + ... (r'.*ing$', 'VBG'), # gerunds + ... (r'.*ed$', 'VBD'), # past tense verbs + ... (r'.*', 'NN') # nouns (default) + ... ]) + + >>> baseline = backoff #see NOTE1 + >>> baseline.accuracy(gold_data) #doctest: +ELLIPSIS + 0.243... + + >>> # Set up templates + >>> Template._cleartemplates() #clear any templates created in earlier tests + >>> templates = [Template(Pos([-1])), Template(Pos([-1]), Word([0]))] + + >>> # Construct a BrillTaggerTrainer + >>> tt = BrillTaggerTrainer(baseline, templates, trace=3) + + >>> tagger1 = tt.train(training_data, max_rules=10) + TBL train (fast) (seqs: 100; tokens: 2417; tpls: 2; min score: 2; min acc: None) + Finding initial useful rules... + Found 847 useful rules. + + B | + S F r O | Score = Fixed - Broken + c i o t | R Fixed = num tags changed incorrect -> correct + o x k h | u Broken = num tags changed correct -> incorrect + r e e e | l Other = num tags changed incorrect -> incorrect + e d n r | e + ------------------+------------------------------------------------------- + 132 132 0 0 | AT->DT if Pos:NN@[-1] + 85 85 0 0 | NN->, if Pos:NN@[-1] & Word:,@[0] + 69 69 0 0 | NN->. if Pos:NN@[-1] & Word:.@[0] + 51 51 0 0 | NN->IN if Pos:NN@[-1] & Word:of@[0] + 47 63 16 162 | NN->IN if Pos:NNS@[-1] + 33 33 0 0 | NN->TO if Pos:NN@[-1] & Word:to@[0] + 26 26 0 0 | IN->. if Pos:NNS@[-1] & Word:.@[0] + 24 24 0 0 | IN->, if Pos:NNS@[-1] & Word:,@[0] + 22 27 5 24 | NN->-NONE- if Pos:VBD@[-1] + 17 17 0 0 | NN->CC if Pos:NN@[-1] & Word:and@[0] + + >>> tagger1.rules()[1:3] + (Rule('001', 'NN', ',', [(Pos([-1]),'NN'), (Word([0]),',')]), Rule('001', 'NN', '.', [(Pos([-1]),'NN'), (Word([0]),'.')])) + + >>> train_stats = tagger1.train_stats() + >>> [train_stats[stat] for stat in ['initialerrors', 'finalerrors', 'rulescores']] + [1776, 1270, [132, 85, 69, 51, 47, 33, 26, 24, 22, 17]] + + >>> tagger1.print_template_statistics(printunused=False) + TEMPLATE STATISTICS (TRAIN) 2 templates, 10 rules) + TRAIN ( 2417 tokens) initial 1776 0.2652 final: 1270 0.4746 + #ID | Score (train) | #Rules | Template + -------------------------------------------- + 001 | 305 0.603 | 7 0.700 | Template(Pos([-1]),Word([0])) + 000 | 201 0.397 | 3 0.300 | Template(Pos([-1])) + + + + >>> round(tagger1.accuracy(gold_data),5) + 0.43834 + + >>> tagged, test_stats = tagger1.batch_tag_incremental(testing_data, gold_data) + + >>> tagged[33][12:] == [('foreign', 'IN'), ('debt', 'NN'), ('of', 'IN'), ('$', 'NN'), ('64', 'CD'), + ... ('billion', 'NN'), ('*U*', 'NN'), ('--', 'NN'), ('the', 'DT'), ('third-highest', 'NN'), ('in', 'NN'), + ... ('the', 'DT'), ('developing', 'VBG'), ('world', 'NN'), ('.', '.')] + True + + >>> [test_stats[stat] for stat in ['initialerrors', 'finalerrors', 'rulescores']] + [1859, 1380, [100, 85, 67, 58, 27, 36, 27, 16, 31, 32]] + + >>> # A high-accuracy tagger + >>> tagger2 = tt.train(training_data, max_rules=10, min_acc=0.99) + TBL train (fast) (seqs: 100; tokens: 2417; tpls: 2; min score: 2; min acc: 0.99) + Finding initial useful rules... + Found 847 useful rules. + + B | + S F r O | Score = Fixed - Broken + c i o t | R Fixed = num tags changed incorrect -> correct + o x k h | u Broken = num tags changed correct -> incorrect + r e e e | l Other = num tags changed incorrect -> incorrect + e d n r | e + ------------------+------------------------------------------------------- + 132 132 0 0 | AT->DT if Pos:NN@[-1] + 85 85 0 0 | NN->, if Pos:NN@[-1] & Word:,@[0] + 69 69 0 0 | NN->. if Pos:NN@[-1] & Word:.@[0] + 51 51 0 0 | NN->IN if Pos:NN@[-1] & Word:of@[0] + 36 36 0 0 | NN->TO if Pos:NN@[-1] & Word:to@[0] + 26 26 0 0 | NN->. if Pos:NNS@[-1] & Word:.@[0] + 24 24 0 0 | NN->, if Pos:NNS@[-1] & Word:,@[0] + 19 19 0 6 | NN->VB if Pos:TO@[-1] + 18 18 0 0 | CD->-NONE- if Pos:NN@[-1] & Word:0@[0] + 18 18 0 0 | NN->CC if Pos:NN@[-1] & Word:and@[0] + + >>> round(tagger2.accuracy(gold_data), 8) + 0.43996744 + + >>> tagger2.rules()[2:4] + (Rule('001', 'NN', '.', [(Pos([-1]),'NN'), (Word([0]),'.')]), Rule('001', 'NN', 'IN', [(Pos([-1]),'NN'), (Word([0]),'of')])) + + # NOTE1: (!!FIXME) A far better baseline uses nltk.tag.UnigramTagger, + # with a RegexpTagger only as backoff. For instance, + # >>> baseline = UnigramTagger(baseline_data, backoff=backoff) + # However, as of Nov 2013, nltk.tag.UnigramTagger does not yield consistent results + # between python versions. The simplistic backoff above is a workaround to make doctests + # get consistent input. + + :param train_sents: training data + :type train_sents: list(list(tuple)) + :param max_rules: output at most max_rules rules + :type max_rules: int + :param min_score: stop training when no rules better than min_score can be found + :type min_score: int + :param min_acc: discard any rule with lower accuracy than min_acc + :type min_acc: float or None + :return: the learned tagger + :rtype: BrillTagger + """ + # FIXME: several tests are a bit too dependent on tracing format + # FIXME: tests in trainer.fast and trainer.brillorig are exact duplicates + + # Basic idea: Keep track of the rules that apply at each position. + # And keep track of the positions to which each rule applies. + + # Create a new copy of the training corpus, and run the + # initial tagger on it. We will progressively update this + # test corpus to look more like the training corpus. + test_sents = [ + list(self._initial_tagger.tag(untag(sent))) for sent in train_sents + ] + + # Collect some statistics on the training process + trainstats = {} + trainstats["min_acc"] = min_acc + trainstats["min_score"] = min_score + trainstats["tokencount"] = sum(len(t) for t in test_sents) + trainstats["sequencecount"] = len(test_sents) + trainstats["templatecount"] = len(self._templates) + trainstats["rulescores"] = [] + trainstats["initialerrors"] = sum( + tag[1] != truth[1] + for paired in zip(test_sents, train_sents) + for (tag, truth) in zip(*paired) + ) + trainstats["initialacc"] = ( + 1 - trainstats["initialerrors"] / trainstats["tokencount"] + ) + if self._trace > 0: + print( + "TBL train (fast) (seqs: {sequencecount}; tokens: {tokencount}; " + "tpls: {templatecount}; min score: {min_score}; min acc: {min_acc})".format( + **trainstats + ) + ) + + # Initialize our mappings. This will find any errors made + # by the initial tagger, and use those to generate repair + # rules, which are added to the rule mappings. + if self._trace: + print("Finding initial useful rules...") + self._init_mappings(test_sents, train_sents) + if self._trace: + print(f" Found {len(self._rule_scores)} useful rules.") + + # Let the user know what we're up to. + if self._trace > 2: + self._trace_header() + elif self._trace == 1: + print("Selecting rules...") + + # Repeatedly select the best rule, and add it to `rules`. + rules = [] + try: + while len(rules) < max_rules: + # Find the best rule, and add it to our rule list. + rule = self._best_rule(train_sents, test_sents, min_score, min_acc) + if rule: + rules.append(rule) + score = self._rule_scores[rule] + trainstats["rulescores"].append(score) + else: + break # No more good rules left! + + # Report the rule that we found. + if self._trace > 1: + self._trace_rule(rule) + + # Apply the new rule at the relevant sites + self._apply_rule(rule, test_sents) + + # Update _tag_positions[rule.original_tag] and + # _tag_positions[rule.replacement_tag] for the affected + # positions (i.e., self._positions_by_rule[rule]). + self._update_tag_positions(rule) + + # Update rules that were affected by the change. + self._update_rules(rule, train_sents, test_sents) + + # The user can cancel training manually: + except KeyboardInterrupt: + print(f"Training stopped manually -- {len(rules)} rules found") + + # Discard our tag position mapping & rule mappings. + self._clean() + trainstats["finalerrors"] = trainstats["initialerrors"] - sum( + trainstats["rulescores"] + ) + trainstats["finalacc"] = ( + 1 - trainstats["finalerrors"] / trainstats["tokencount"] + ) + # Create and return a tagger from the rules we found. + return BrillTagger(self._initial_tagger, rules, trainstats) + + def _init_mappings(self, test_sents, train_sents): + """ + Initialize the tag position mapping & the rule related + mappings. For each error in test_sents, find new rules that + would correct them, and add them to the rule mappings. + """ + self._tag_positions = defaultdict(list) + self._rules_by_position = defaultdict(set) + self._positions_by_rule = defaultdict(dict) + self._rules_by_score = defaultdict(set) + self._rule_scores = defaultdict(int) + self._first_unknown_position = defaultdict(int) + # Scan through the corpus, initializing the tag_positions + # mapping and all the rule-related mappings. + for sentnum, sent in enumerate(test_sents): + for wordnum, (word, tag) in enumerate(sent): + + # Initialize tag_positions + self._tag_positions[tag].append((sentnum, wordnum)) + + # If it's an error token, update the rule-related mappings. + correct_tag = train_sents[sentnum][wordnum][1] + if tag != correct_tag: + for rule in self._find_rules(sent, wordnum, correct_tag): + self._update_rule_applies(rule, sentnum, wordnum, train_sents) + + def _clean(self): + self._tag_positions = None + self._rules_by_position = None + self._positions_by_rule = None + self._rules_by_score = None + self._rule_scores = None + self._first_unknown_position = None + + def _find_rules(self, sent, wordnum, new_tag): + """ + Use the templates to find rules that apply at index *wordnum* + in the sentence *sent* and generate the tag *new_tag*. + """ + for template in self._templates: + yield from template.applicable_rules(sent, wordnum, new_tag) + + def _update_rule_applies(self, rule, sentnum, wordnum, train_sents): + """ + Update the rule data tables to reflect the fact that + *rule* applies at the position *(sentnum, wordnum)*. + """ + pos = sentnum, wordnum + + # If the rule is already known to apply here, ignore. + # (This only happens if the position's tag hasn't changed.) + if pos in self._positions_by_rule[rule]: + return + + # Update self._positions_by_rule. + correct_tag = train_sents[sentnum][wordnum][1] + if rule.replacement_tag == correct_tag: + self._positions_by_rule[rule][pos] = 1 + elif rule.original_tag == correct_tag: + self._positions_by_rule[rule][pos] = -1 + else: # was wrong, remains wrong + self._positions_by_rule[rule][pos] = 0 + + # Update _rules_by_position + self._rules_by_position[pos].add(rule) + + # Update _rule_scores. + old_score = self._rule_scores[rule] + self._rule_scores[rule] += self._positions_by_rule[rule][pos] + + # Update _rules_by_score. + self._rules_by_score[old_score].discard(rule) + self._rules_by_score[self._rule_scores[rule]].add(rule) + + def _update_rule_not_applies(self, rule, sentnum, wordnum): + """ + Update the rule data tables to reflect the fact that *rule* + does not apply at the position *(sentnum, wordnum)*. + """ + pos = sentnum, wordnum + + # Update _rule_scores. + old_score = self._rule_scores[rule] + self._rule_scores[rule] -= self._positions_by_rule[rule][pos] + + # Update _rules_by_score. + self._rules_by_score[old_score].discard(rule) + self._rules_by_score[self._rule_scores[rule]].add(rule) + + # Update _positions_by_rule + del self._positions_by_rule[rule][pos] + self._rules_by_position[pos].remove(rule) + + # Optional addition: if the rule now applies nowhere, delete + # all its dictionary entries. + + def _best_rule(self, train_sents, test_sents, min_score, min_acc): + """ + Find the next best rule. This is done by repeatedly taking a + rule with the highest score and stepping through the corpus to + see where it applies. When it makes an error (decreasing its + score) it's bumped down, and we try a new rule with the + highest score. When we find a rule which has the highest + score *and* which has been tested against the entire corpus, we + can conclude that it's the next best rule. + """ + for max_score in sorted(self._rules_by_score.keys(), reverse=True): + if len(self._rules_by_score) == 0: + return None + if max_score < min_score or max_score <= 0: + return None + best_rules = list(self._rules_by_score[max_score]) + if self._deterministic: + best_rules.sort(key=repr) + for rule in best_rules: + positions = self._tag_positions[rule.original_tag] + + unk = self._first_unknown_position.get(rule, (0, -1)) + start = bisect.bisect_left(positions, unk) + + for i in range(start, len(positions)): + sentnum, wordnum = positions[i] + if rule.applies(test_sents[sentnum], wordnum): + self._update_rule_applies(rule, sentnum, wordnum, train_sents) + if self._rule_scores[rule] < max_score: + self._first_unknown_position[rule] = (sentnum, wordnum + 1) + break # The update demoted the rule. + + if self._rule_scores[rule] == max_score: + self._first_unknown_position[rule] = (len(train_sents) + 1, 0) + # optimization: if no min_acc threshold given, don't bother computing accuracy + if min_acc is None: + return rule + else: + changes = self._positions_by_rule[rule].values() + num_fixed = len([c for c in changes if c == 1]) + num_broken = len([c for c in changes if c == -1]) + # acc here is fixed/(fixed+broken); could also be + # fixed/(fixed+broken+other) == num_fixed/len(changes) + acc = num_fixed / (num_fixed + num_broken) + if acc >= min_acc: + return rule + # else: rule too inaccurate, discard and try next + + # We demoted (or skipped due to < min_acc, if that was given) + # all the rules with score==max_score. + + assert min_acc is not None or not self._rules_by_score[max_score] + if not self._rules_by_score[max_score]: + del self._rules_by_score[max_score] + + def _apply_rule(self, rule, test_sents): + """ + Update *test_sents* by applying *rule* everywhere where its + conditions are met. + """ + update_positions = set(self._positions_by_rule[rule]) + new_tag = rule.replacement_tag + + if self._trace > 3: + self._trace_apply(len(update_positions)) + + # Update test_sents. + for (sentnum, wordnum) in update_positions: + text = test_sents[sentnum][wordnum][0] + test_sents[sentnum][wordnum] = (text, new_tag) + + def _update_tag_positions(self, rule): + """ + Update _tag_positions to reflect the changes to tags that are + made by *rule*. + """ + # Update the tag index. + for pos in self._positions_by_rule[rule]: + # Delete the old tag. + old_tag_positions = self._tag_positions[rule.original_tag] + old_index = bisect.bisect_left(old_tag_positions, pos) + del old_tag_positions[old_index] + # Insert the new tag. + new_tag_positions = self._tag_positions[rule.replacement_tag] + bisect.insort_left(new_tag_positions, pos) + + def _update_rules(self, rule, train_sents, test_sents): + """ + Check if we should add or remove any rules from consideration, + given the changes made by *rule*. + """ + # Collect a list of all positions that might be affected. + neighbors = set() + for sentnum, wordnum in self._positions_by_rule[rule]: + for template in self._templates: + n = template.get_neighborhood(test_sents[sentnum], wordnum) + neighbors.update([(sentnum, i) for i in n]) + + # Update the rules at each position. + num_obsolete = num_new = num_unseen = 0 + for sentnum, wordnum in neighbors: + test_sent = test_sents[sentnum] + correct_tag = train_sents[sentnum][wordnum][1] + + # Check if the change causes any rule at this position to + # stop matching; if so, then update our rule mappings + # accordingly. + old_rules = set(self._rules_by_position[sentnum, wordnum]) + for old_rule in old_rules: + if not old_rule.applies(test_sent, wordnum): + num_obsolete += 1 + self._update_rule_not_applies(old_rule, sentnum, wordnum) + + # Check if the change causes our templates to propose any + # new rules for this position. + for template in self._templates: + for new_rule in template.applicable_rules( + test_sent, wordnum, correct_tag + ): + if new_rule not in old_rules: + num_new += 1 + if new_rule not in self._rule_scores: + num_unseen += 1 + old_rules.add(new_rule) + self._update_rule_applies( + new_rule, sentnum, wordnum, train_sents + ) + + # We may have caused other rules to match here, that are + # not proposed by our templates -- in particular, rules + # that are harmful or neutral. We therefore need to + # update any rule whose first_unknown_position is past + # this rule. + for new_rule, pos in self._first_unknown_position.items(): + if pos > (sentnum, wordnum): + if new_rule not in old_rules: + num_new += 1 + if new_rule.applies(test_sent, wordnum): + self._update_rule_applies( + new_rule, sentnum, wordnum, train_sents + ) + + if self._trace > 3: + self._trace_update_rules(num_obsolete, num_new, num_unseen) + + # Tracing + + def _trace_header(self): + print( + """ + B | + S F r O | Score = Fixed - Broken + c i o t | R Fixed = num tags changed incorrect -> correct + o x k h | u Broken = num tags changed correct -> incorrect + r e e e | l Other = num tags changed incorrect -> incorrect + e d n r | e +------------------+------------------------------------------------------- + """.rstrip() + ) + + def _trace_rule(self, rule): + assert self._rule_scores[rule] == sum(self._positions_by_rule[rule].values()) + + changes = self._positions_by_rule[rule].values() + num_fixed = len([c for c in changes if c == 1]) + num_broken = len([c for c in changes if c == -1]) + num_other = len([c for c in changes if c == 0]) + score = self._rule_scores[rule] + + rulestr = rule.format(self._ruleformat) + if self._trace > 2: + print( + "{:4d}{:4d}{:4d}{:4d} |".format( + score, num_fixed, num_broken, num_other + ), + end=" ", + ) + print( + textwrap.fill( + rulestr, + initial_indent=" " * 20, + width=79, + subsequent_indent=" " * 18 + "| ", + ).strip() + ) + else: + print(rulestr) + + def _trace_apply(self, num_updates): + prefix = " " * 18 + "|" + print(prefix) + print(prefix, f"Applying rule to {num_updates} positions.") + + def _trace_update_rules(self, num_obsolete, num_new, num_unseen): + prefix = " " * 18 + "|" + print(prefix, "Updated rule tables:") + print(prefix, (f" - {num_obsolete} rule applications removed")) + print( + prefix, + (f" - {num_new} rule applications added ({num_unseen} novel)"), + ) + print(prefix) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/crf.py b/env-llmeval/lib/python3.10/site-packages/nltk/tag/crf.py new file mode 100644 index 0000000000000000000000000000000000000000..dfc728c8d55c5eecadd7dc214f756f5224b7f017 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tag/crf.py @@ -0,0 +1,207 @@ +# Natural Language Toolkit: Interface to the CRFSuite Tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Long Duong +# URL: +# For license information, see LICENSE.TXT + +""" +A module for POS tagging using CRFSuite +""" + +import re +import unicodedata + +from nltk.tag.api import TaggerI + +try: + import pycrfsuite +except ImportError: + pass + + +class CRFTagger(TaggerI): + """ + A module for POS tagging using CRFSuite https://pypi.python.org/pypi/python-crfsuite + + >>> from nltk.tag import CRFTagger + >>> ct = CRFTagger() # doctest: +SKIP + + >>> train_data = [[('University','Noun'), ('is','Verb'), ('a','Det'), ('good','Adj'), ('place','Noun')], + ... [('dog','Noun'),('eat','Verb'),('meat','Noun')]] + + >>> ct.train(train_data,'model.crf.tagger') # doctest: +SKIP + >>> ct.tag_sents([['dog','is','good'], ['Cat','eat','meat']]) # doctest: +SKIP + [[('dog', 'Noun'), ('is', 'Verb'), ('good', 'Adj')], [('Cat', 'Noun'), ('eat', 'Verb'), ('meat', 'Noun')]] + + >>> gold_sentences = [[('dog','Noun'),('is','Verb'),('good','Adj')] , [('Cat','Noun'),('eat','Verb'), ('meat','Noun')]] + >>> ct.accuracy(gold_sentences) # doctest: +SKIP + 1.0 + + Setting learned model file + >>> ct = CRFTagger() # doctest: +SKIP + >>> ct.set_model_file('model.crf.tagger') # doctest: +SKIP + >>> ct.accuracy(gold_sentences) # doctest: +SKIP + 1.0 + """ + + def __init__(self, feature_func=None, verbose=False, training_opt={}): + """ + Initialize the CRFSuite tagger + + :param feature_func: The function that extracts features for each token of a sentence. This function should take + 2 parameters: tokens and index which extract features at index position from tokens list. See the build in + _get_features function for more detail. + :param verbose: output the debugging messages during training. + :type verbose: boolean + :param training_opt: python-crfsuite training options + :type training_opt: dictionary + + Set of possible training options (using LBFGS training algorithm). + :'feature.minfreq': The minimum frequency of features. + :'feature.possible_states': Force to generate possible state features. + :'feature.possible_transitions': Force to generate possible transition features. + :'c1': Coefficient for L1 regularization. + :'c2': Coefficient for L2 regularization. + :'max_iterations': The maximum number of iterations for L-BFGS optimization. + :'num_memories': The number of limited memories for approximating the inverse hessian matrix. + :'epsilon': Epsilon for testing the convergence of the objective. + :'period': The duration of iterations to test the stopping criterion. + :'delta': The threshold for the stopping criterion; an L-BFGS iteration stops when the + improvement of the log likelihood over the last ${period} iterations is no greater than this threshold. + :'linesearch': The line search algorithm used in L-BFGS updates: + + - 'MoreThuente': More and Thuente's method, + - 'Backtracking': Backtracking method with regular Wolfe condition, + - 'StrongBacktracking': Backtracking method with strong Wolfe condition + :'max_linesearch': The maximum number of trials for the line search algorithm. + """ + + self._model_file = "" + self._tagger = pycrfsuite.Tagger() + + if feature_func is None: + self._feature_func = self._get_features + else: + self._feature_func = feature_func + + self._verbose = verbose + self._training_options = training_opt + self._pattern = re.compile(r"\d") + + def set_model_file(self, model_file): + self._model_file = model_file + self._tagger.open(self._model_file) + + def _get_features(self, tokens, idx): + """ + Extract basic features about this word including + - Current word + - is it capitalized? + - Does it have punctuation? + - Does it have a number? + - Suffixes up to length 3 + + Note that : we might include feature over previous word, next word etc. + + :return: a list which contains the features + :rtype: list(str) + """ + token = tokens[idx] + + feature_list = [] + + if not token: + return feature_list + + # Capitalization + if token[0].isupper(): + feature_list.append("CAPITALIZATION") + + # Number + if re.search(self._pattern, token) is not None: + feature_list.append("HAS_NUM") + + # Punctuation + punc_cat = {"Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po"} + if all(unicodedata.category(x) in punc_cat for x in token): + feature_list.append("PUNCTUATION") + + # Suffix up to length 3 + if len(token) > 1: + feature_list.append("SUF_" + token[-1:]) + if len(token) > 2: + feature_list.append("SUF_" + token[-2:]) + if len(token) > 3: + feature_list.append("SUF_" + token[-3:]) + + feature_list.append("WORD_" + token) + + return feature_list + + def tag_sents(self, sents): + """ + Tag a list of sentences. NB before using this function, user should specify the mode_file either by + + - Train a new model using ``train`` function + - Use the pre-trained model which is set via ``set_model_file`` function + + :params sentences: list of sentences needed to tag. + :type sentences: list(list(str)) + :return: list of tagged sentences. + :rtype: list(list(tuple(str,str))) + """ + if self._model_file == "": + raise Exception( + " No model file is found !! Please use train or set_model_file function" + ) + + # We need the list of sentences instead of the list generator for matching the input and output + result = [] + for tokens in sents: + features = [self._feature_func(tokens, i) for i in range(len(tokens))] + labels = self._tagger.tag(features) + + if len(labels) != len(tokens): + raise Exception(" Predicted Length Not Matched, Expect Errors !") + + tagged_sent = list(zip(tokens, labels)) + result.append(tagged_sent) + + return result + + def train(self, train_data, model_file): + """ + Train the CRF tagger using CRFSuite + :params train_data : is the list of annotated sentences. + :type train_data : list (list(tuple(str,str))) + :params model_file : the model will be saved to this file. + + """ + trainer = pycrfsuite.Trainer(verbose=self._verbose) + trainer.set_params(self._training_options) + + for sent in train_data: + tokens, labels = zip(*sent) + features = [self._feature_func(tokens, i) for i in range(len(tokens))] + trainer.append(features, labels) + + # Now train the model, the output should be model_file + trainer.train(model_file) + # Save the model file + self.set_model_file(model_file) + + def tag(self, tokens): + """ + Tag a sentence using Python CRFSuite Tagger. NB before using this function, user should specify the mode_file either by + + - Train a new model using ``train`` function + - Use the pre-trained model which is set via ``set_model_file`` function + + :params tokens: list of tokens needed to tag. + :type tokens: list(str) + :return: list of tagged tokens. + :rtype: list(tuple(str,str)) + """ + + return self.tag_sents([tokens])[0] diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/hmm.py b/env-llmeval/lib/python3.10/site-packages/nltk/tag/hmm.py new file mode 100644 index 0000000000000000000000000000000000000000..6577789b883828ce01e84c0864de57eead81f12b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tag/hmm.py @@ -0,0 +1,1329 @@ +# Natural Language Toolkit: Hidden Markov Model +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Philip Blunsom +# Tiago Tresoldi (fixes) +# Steven Bird (fixes) +# Joseph Frazee (fixes) +# Steven Xu (fixes) +# URL: +# For license information, see LICENSE.TXT + +""" +Hidden Markov Models (HMMs) largely used to assign the correct label sequence +to sequential data or assess the probability of a given label and data +sequence. These models are finite state machines characterised by a number of +states, transitions between these states, and output symbols emitted while in +each state. The HMM is an extension to the Markov chain, where each state +corresponds deterministically to a given event. In the HMM the observation is +a probabilistic function of the state. HMMs share the Markov chain's +assumption, being that the probability of transition from one state to another +only depends on the current state - i.e. the series of states that led to the +current state are not used. They are also time invariant. + +The HMM is a directed graph, with probability weighted edges (representing the +probability of a transition between the source and sink states) where each +vertex emits an output symbol when entered. The symbol (or observation) is +non-deterministically generated. For this reason, knowing that a sequence of +output observations was generated by a given HMM does not mean that the +corresponding sequence of states (and what the current state is) is known. +This is the 'hidden' in the hidden markov model. + +Formally, a HMM can be characterised by: + +- the output observation alphabet. This is the set of symbols which may be + observed as output of the system. +- the set of states. +- the transition probabilities *a_{ij} = P(s_t = j | s_{t-1} = i)*. These + represent the probability of transition to each state from a given state. +- the output probability matrix *b_i(k) = P(X_t = o_k | s_t = i)*. These + represent the probability of observing each symbol in a given state. +- the initial state distribution. This gives the probability of starting + in each state. + +To ground this discussion, take a common NLP application, part-of-speech (POS) +tagging. An HMM is desirable for this task as the highest probability tag +sequence can be calculated for a given sequence of word forms. This differs +from other tagging techniques which often tag each word individually, seeking +to optimise each individual tagging greedily without regard to the optimal +combination of tags for a larger unit, such as a sentence. The HMM does this +with the Viterbi algorithm, which efficiently computes the optimal path +through the graph given the sequence of words forms. + +In POS tagging the states usually have a 1:1 correspondence with the tag +alphabet - i.e. each state represents a single tag. The output observation +alphabet is the set of word forms (the lexicon), and the remaining three +parameters are derived by a training regime. With this information the +probability of a given sentence can be easily derived, by simply summing the +probability of each distinct path through the model. Similarly, the highest +probability tagging sequence can be derived with the Viterbi algorithm, +yielding a state sequence which can be mapped into a tag sequence. + +This discussion assumes that the HMM has been trained. This is probably the +most difficult task with the model, and requires either MLE estimates of the +parameters or unsupervised learning using the Baum-Welch algorithm, a variant +of EM. + +For more information, please consult the source code for this module, +which includes extensive demonstration code. +""" + +import itertools +import re + +try: + import numpy as np +except ImportError: + pass + +from nltk.metrics import accuracy +from nltk.probability import ( + ConditionalFreqDist, + ConditionalProbDist, + DictionaryConditionalProbDist, + DictionaryProbDist, + FreqDist, + LidstoneProbDist, + MLEProbDist, + MutableProbDist, + RandomProbDist, +) +from nltk.tag.api import TaggerI +from nltk.util import LazyMap, unique_list + +_TEXT = 0 # index of text in a tuple +_TAG = 1 # index of tag in a tuple + + +def _identity(labeled_symbols): + return labeled_symbols + + +class HiddenMarkovModelTagger(TaggerI): + """ + Hidden Markov model class, a generative model for labelling sequence data. + These models define the joint probability of a sequence of symbols and + their labels (state transitions) as the product of the starting state + probability, the probability of each state transition, and the probability + of each observation being generated from each state. This is described in + more detail in the module documentation. + + This implementation is based on the HMM description in Chapter 8, Huang, + Acero and Hon, Spoken Language Processing and includes an extension for + training shallow HMM parsers or specialized HMMs as in Molina et. + al, 2002. A specialized HMM modifies training data by applying a + specialization function to create a new training set that is more + appropriate for sequential tagging with an HMM. A typical use case is + chunking. + + :param symbols: the set of output symbols (alphabet) + :type symbols: seq of any + :param states: a set of states representing state space + :type states: seq of any + :param transitions: transition probabilities; Pr(s_i | s_j) is the + probability of transition from state i given the model is in + state_j + :type transitions: ConditionalProbDistI + :param outputs: output probabilities; Pr(o_k | s_i) is the probability + of emitting symbol k when entering state i + :type outputs: ConditionalProbDistI + :param priors: initial state distribution; Pr(s_i) is the probability + of starting in state i + :type priors: ProbDistI + :param transform: an optional function for transforming training + instances, defaults to the identity function. + :type transform: callable + """ + + def __init__( + self, symbols, states, transitions, outputs, priors, transform=_identity + ): + self._symbols = unique_list(symbols) + self._states = unique_list(states) + self._transitions = transitions + self._outputs = outputs + self._priors = priors + self._cache = None + self._transform = transform + + @classmethod + def _train( + cls, + labeled_sequence, + test_sequence=None, + unlabeled_sequence=None, + transform=_identity, + estimator=None, + **kwargs, + ): + + if estimator is None: + + def estimator(fd, bins): + return LidstoneProbDist(fd, 0.1, bins) + + labeled_sequence = LazyMap(transform, labeled_sequence) + symbols = unique_list(word for sent in labeled_sequence for word, tag in sent) + tag_set = unique_list(tag for sent in labeled_sequence for word, tag in sent) + + trainer = HiddenMarkovModelTrainer(tag_set, symbols) + hmm = trainer.train_supervised(labeled_sequence, estimator=estimator) + hmm = cls( + hmm._symbols, + hmm._states, + hmm._transitions, + hmm._outputs, + hmm._priors, + transform=transform, + ) + + if test_sequence: + hmm.test(test_sequence, verbose=kwargs.get("verbose", False)) + + if unlabeled_sequence: + max_iterations = kwargs.get("max_iterations", 5) + hmm = trainer.train_unsupervised( + unlabeled_sequence, model=hmm, max_iterations=max_iterations + ) + if test_sequence: + hmm.test(test_sequence, verbose=kwargs.get("verbose", False)) + + return hmm + + @classmethod + def train( + cls, labeled_sequence, test_sequence=None, unlabeled_sequence=None, **kwargs + ): + """ + Train a new HiddenMarkovModelTagger using the given labeled and + unlabeled training instances. Testing will be performed if test + instances are provided. + + :return: a hidden markov model tagger + :rtype: HiddenMarkovModelTagger + :param labeled_sequence: a sequence of labeled training instances, + i.e. a list of sentences represented as tuples + :type labeled_sequence: list(list) + :param test_sequence: a sequence of labeled test instances + :type test_sequence: list(list) + :param unlabeled_sequence: a sequence of unlabeled training instances, + i.e. a list of sentences represented as words + :type unlabeled_sequence: list(list) + :param transform: an optional function for transforming training + instances, defaults to the identity function, see ``transform()`` + :type transform: function + :param estimator: an optional function or class that maps a + condition's frequency distribution to its probability + distribution, defaults to a Lidstone distribution with gamma = 0.1 + :type estimator: class or function + :param verbose: boolean flag indicating whether training should be + verbose or include printed output + :type verbose: bool + :param max_iterations: number of Baum-Welch iterations to perform + :type max_iterations: int + """ + return cls._train(labeled_sequence, test_sequence, unlabeled_sequence, **kwargs) + + def probability(self, sequence): + """ + Returns the probability of the given symbol sequence. If the sequence + is labelled, then returns the joint probability of the symbol, state + sequence. Otherwise, uses the forward algorithm to find the + probability over all label sequences. + + :return: the probability of the sequence + :rtype: float + :param sequence: the sequence of symbols which must contain the TEXT + property, and optionally the TAG property + :type sequence: Token + """ + return 2 ** (self.log_probability(self._transform(sequence))) + + def log_probability(self, sequence): + """ + Returns the log-probability of the given symbol sequence. If the + sequence is labelled, then returns the joint log-probability of the + symbol, state sequence. Otherwise, uses the forward algorithm to find + the log-probability over all label sequences. + + :return: the log-probability of the sequence + :rtype: float + :param sequence: the sequence of symbols which must contain the TEXT + property, and optionally the TAG property + :type sequence: Token + """ + sequence = self._transform(sequence) + + T = len(sequence) + + if T > 0 and sequence[0][_TAG]: + last_state = sequence[0][_TAG] + p = self._priors.logprob(last_state) + self._output_logprob( + last_state, sequence[0][_TEXT] + ) + for t in range(1, T): + state = sequence[t][_TAG] + p += self._transitions[last_state].logprob( + state + ) + self._output_logprob(state, sequence[t][_TEXT]) + last_state = state + return p + else: + alpha = self._forward_probability(sequence) + p = logsumexp2(alpha[T - 1]) + return p + + def tag(self, unlabeled_sequence): + """ + Tags the sequence with the highest probability state sequence. This + uses the best_path method to find the Viterbi path. + + :return: a labelled sequence of symbols + :rtype: list + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + return self._tag(unlabeled_sequence) + + def _tag(self, unlabeled_sequence): + path = self._best_path(unlabeled_sequence) + return list(zip(unlabeled_sequence, path)) + + def _output_logprob(self, state, symbol): + """ + :return: the log probability of the symbol being observed in the given + state + :rtype: float + """ + return self._outputs[state].logprob(symbol) + + def _create_cache(self): + """ + The cache is a tuple (P, O, X, S) where: + + - S maps symbols to integers. I.e., it is the inverse + mapping from self._symbols; for each symbol s in + self._symbols, the following is true:: + + self._symbols[S[s]] == s + + - O is the log output probabilities:: + + O[i,k] = log( P(token[t]=sym[k]|tag[t]=state[i]) ) + + - X is the log transition probabilities:: + + X[i,j] = log( P(tag[t]=state[j]|tag[t-1]=state[i]) ) + + - P is the log prior probabilities:: + + P[i] = log( P(tag[0]=state[i]) ) + """ + if not self._cache: + N = len(self._states) + M = len(self._symbols) + P = np.zeros(N, np.float32) + X = np.zeros((N, N), np.float32) + O = np.zeros((N, M), np.float32) + for i in range(N): + si = self._states[i] + P[i] = self._priors.logprob(si) + for j in range(N): + X[i, j] = self._transitions[si].logprob(self._states[j]) + for k in range(M): + O[i, k] = self._output_logprob(si, self._symbols[k]) + S = {} + for k in range(M): + S[self._symbols[k]] = k + self._cache = (P, O, X, S) + + def _update_cache(self, symbols): + # add new symbols to the symbol table and repopulate the output + # probabilities and symbol table mapping + if symbols: + self._create_cache() + P, O, X, S = self._cache + for symbol in symbols: + if symbol not in self._symbols: + self._cache = None + self._symbols.append(symbol) + # don't bother with the work if there aren't any new symbols + if not self._cache: + N = len(self._states) + M = len(self._symbols) + Q = O.shape[1] + # add new columns to the output probability table without + # destroying the old probabilities + O = np.hstack([O, np.zeros((N, M - Q), np.float32)]) + for i in range(N): + si = self._states[i] + # only calculate probabilities for new symbols + for k in range(Q, M): + O[i, k] = self._output_logprob(si, self._symbols[k]) + # only create symbol mappings for new symbols + for k in range(Q, M): + S[self._symbols[k]] = k + self._cache = (P, O, X, S) + + def reset_cache(self): + self._cache = None + + def best_path(self, unlabeled_sequence): + """ + Returns the state sequence of the optimal (most probable) path through + the HMM. Uses the Viterbi algorithm to calculate this part by dynamic + programming. + + :return: the state sequence + :rtype: sequence of any + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + return self._best_path(unlabeled_sequence) + + def _best_path(self, unlabeled_sequence): + T = len(unlabeled_sequence) + N = len(self._states) + self._create_cache() + self._update_cache(unlabeled_sequence) + P, O, X, S = self._cache + + V = np.zeros((T, N), np.float32) + B = -np.ones((T, N), int) + + V[0] = P + O[:, S[unlabeled_sequence[0]]] + for t in range(1, T): + for j in range(N): + vs = V[t - 1, :] + X[:, j] + best = np.argmax(vs) + V[t, j] = vs[best] + O[j, S[unlabeled_sequence[t]]] + B[t, j] = best + + current = np.argmax(V[T - 1, :]) + sequence = [current] + for t in range(T - 1, 0, -1): + last = B[t, current] + sequence.append(last) + current = last + + sequence.reverse() + return list(map(self._states.__getitem__, sequence)) + + def best_path_simple(self, unlabeled_sequence): + """ + Returns the state sequence of the optimal (most probable) path through + the HMM. Uses the Viterbi algorithm to calculate this part by dynamic + programming. This uses a simple, direct method, and is included for + teaching purposes. + + :return: the state sequence + :rtype: sequence of any + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + return self._best_path_simple(unlabeled_sequence) + + def _best_path_simple(self, unlabeled_sequence): + T = len(unlabeled_sequence) + N = len(self._states) + V = np.zeros((T, N), np.float64) + B = {} + + # find the starting log probabilities for each state + symbol = unlabeled_sequence[0] + for i, state in enumerate(self._states): + V[0, i] = self._priors.logprob(state) + self._output_logprob(state, symbol) + B[0, state] = None + + # find the maximum log probabilities for reaching each state at time t + for t in range(1, T): + symbol = unlabeled_sequence[t] + for j in range(N): + sj = self._states[j] + best = None + for i in range(N): + si = self._states[i] + va = V[t - 1, i] + self._transitions[si].logprob(sj) + if not best or va > best[0]: + best = (va, si) + V[t, j] = best[0] + self._output_logprob(sj, symbol) + B[t, sj] = best[1] + + # find the highest probability final state + best = None + for i in range(N): + val = V[T - 1, i] + if not best or val > best[0]: + best = (val, self._states[i]) + + # traverse the back-pointers B to find the state sequence + current = best[1] + sequence = [current] + for t in range(T - 1, 0, -1): + last = B[t, current] + sequence.append(last) + current = last + + sequence.reverse() + return sequence + + def random_sample(self, rng, length): + """ + Randomly sample the HMM to generate a sentence of a given length. This + samples the prior distribution then the observation distribution and + transition distribution for each subsequent observation and state. + This will mostly generate unintelligible garbage, but can provide some + amusement. + + :return: the randomly created state/observation sequence, + generated according to the HMM's probability + distributions. The SUBTOKENS have TEXT and TAG + properties containing the observation and state + respectively. + :rtype: list + :param rng: random number generator + :type rng: Random (or any object with a random() method) + :param length: desired output length + :type length: int + """ + + # sample the starting state and symbol prob dists + tokens = [] + state = self._sample_probdist(self._priors, rng.random(), self._states) + symbol = self._sample_probdist( + self._outputs[state], rng.random(), self._symbols + ) + tokens.append((symbol, state)) + + for i in range(1, length): + # sample the state transition and symbol prob dists + state = self._sample_probdist( + self._transitions[state], rng.random(), self._states + ) + symbol = self._sample_probdist( + self._outputs[state], rng.random(), self._symbols + ) + tokens.append((symbol, state)) + + return tokens + + def _sample_probdist(self, probdist, p, samples): + cum_p = 0 + for sample in samples: + add_p = probdist.prob(sample) + if cum_p <= p <= cum_p + add_p: + return sample + cum_p += add_p + raise Exception("Invalid probability distribution - " "does not sum to one") + + def entropy(self, unlabeled_sequence): + """ + Returns the entropy over labellings of the given sequence. This is + given by:: + + H(O) = - sum_S Pr(S | O) log Pr(S | O) + + where the summation ranges over all state sequences, S. Let + *Z = Pr(O) = sum_S Pr(S, O)}* where the summation ranges over all state + sequences and O is the observation sequence. As such the entropy can + be re-expressed as:: + + H = - sum_S Pr(S | O) log [ Pr(S, O) / Z ] + = log Z - sum_S Pr(S | O) log Pr(S, 0) + = log Z - sum_S Pr(S | O) [ log Pr(S_0) + sum_t Pr(S_t | S_{t-1}) + sum_t Pr(O_t | S_t) ] + + The order of summation for the log terms can be flipped, allowing + dynamic programming to be used to calculate the entropy. Specifically, + we use the forward and backward probabilities (alpha, beta) giving:: + + H = log Z - sum_s0 alpha_0(s0) beta_0(s0) / Z * log Pr(s0) + + sum_t,si,sj alpha_t(si) Pr(sj | si) Pr(O_t+1 | sj) beta_t(sj) / Z * log Pr(sj | si) + + sum_t,st alpha_t(st) beta_t(st) / Z * log Pr(O_t | st) + + This simply uses alpha and beta to find the probabilities of partial + sequences, constrained to include the given state(s) at some point in + time. + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + alpha = self._forward_probability(unlabeled_sequence) + beta = self._backward_probability(unlabeled_sequence) + normalisation = logsumexp2(alpha[T - 1]) + + entropy = normalisation + + # starting state, t = 0 + for i, state in enumerate(self._states): + p = 2 ** (alpha[0, i] + beta[0, i] - normalisation) + entropy -= p * self._priors.logprob(state) + # print('p(s_0 = %s) =' % state, p) + + # state transitions + for t0 in range(T - 1): + t1 = t0 + 1 + for i0, s0 in enumerate(self._states): + for i1, s1 in enumerate(self._states): + p = 2 ** ( + alpha[t0, i0] + + self._transitions[s0].logprob(s1) + + self._outputs[s1].logprob(unlabeled_sequence[t1][_TEXT]) + + beta[t1, i1] + - normalisation + ) + entropy -= p * self._transitions[s0].logprob(s1) + # print('p(s_%d = %s, s_%d = %s) =' % (t0, s0, t1, s1), p) + + # symbol emissions + for t in range(T): + for i, state in enumerate(self._states): + p = 2 ** (alpha[t, i] + beta[t, i] - normalisation) + entropy -= p * self._outputs[state].logprob( + unlabeled_sequence[t][_TEXT] + ) + # print('p(s_%d = %s) =' % (t, state), p) + + return entropy + + def point_entropy(self, unlabeled_sequence): + """ + Returns the pointwise entropy over the possible states at each + position in the chain, given the observation sequence. + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + alpha = self._forward_probability(unlabeled_sequence) + beta = self._backward_probability(unlabeled_sequence) + normalisation = logsumexp2(alpha[T - 1]) + + entropies = np.zeros(T, np.float64) + probs = np.zeros(N, np.float64) + for t in range(T): + for s in range(N): + probs[s] = alpha[t, s] + beta[t, s] - normalisation + + for s in range(N): + entropies[t] -= 2 ** (probs[s]) * probs[s] + + return entropies + + def _exhaustive_entropy(self, unlabeled_sequence): + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + labellings = [[state] for state in self._states] + for t in range(T - 1): + current = labellings + labellings = [] + for labelling in current: + for state in self._states: + labellings.append(labelling + [state]) + + log_probs = [] + for labelling in labellings: + labeled_sequence = unlabeled_sequence[:] + for t, label in enumerate(labelling): + labeled_sequence[t] = (labeled_sequence[t][_TEXT], label) + lp = self.log_probability(labeled_sequence) + log_probs.append(lp) + normalisation = _log_add(*log_probs) + + entropy = 0 + for lp in log_probs: + lp -= normalisation + entropy -= 2 ** (lp) * lp + + return entropy + + def _exhaustive_point_entropy(self, unlabeled_sequence): + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + labellings = [[state] for state in self._states] + for t in range(T - 1): + current = labellings + labellings = [] + for labelling in current: + for state in self._states: + labellings.append(labelling + [state]) + + log_probs = [] + for labelling in labellings: + labelled_sequence = unlabeled_sequence[:] + for t, label in enumerate(labelling): + labelled_sequence[t] = (labelled_sequence[t][_TEXT], label) + lp = self.log_probability(labelled_sequence) + log_probs.append(lp) + + normalisation = _log_add(*log_probs) + + probabilities = _ninf_array((T, N)) + + for labelling, lp in zip(labellings, log_probs): + lp -= normalisation + for t, label in enumerate(labelling): + index = self._states.index(label) + probabilities[t, index] = _log_add(probabilities[t, index], lp) + + entropies = np.zeros(T, np.float64) + for t in range(T): + for s in range(N): + entropies[t] -= 2 ** (probabilities[t, s]) * probabilities[t, s] + + return entropies + + def _transitions_matrix(self): + """Return a matrix of transition log probabilities.""" + trans_iter = ( + self._transitions[sj].logprob(si) + for sj in self._states + for si in self._states + ) + + transitions_logprob = np.fromiter(trans_iter, dtype=np.float64) + N = len(self._states) + return transitions_logprob.reshape((N, N)).T + + def _outputs_vector(self, symbol): + """ + Return a vector with log probabilities of emitting a symbol + when entering states. + """ + out_iter = (self._output_logprob(sj, symbol) for sj in self._states) + return np.fromiter(out_iter, dtype=np.float64) + + def _forward_probability(self, unlabeled_sequence): + """ + Return the forward probability matrix, a T by N array of + log-probabilities, where T is the length of the sequence and N is the + number of states. Each entry (t, s) gives the probability of being in + state s at time t after observing the partial symbol sequence up to + and including t. + + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + :return: the forward log probability matrix + :rtype: array + """ + T = len(unlabeled_sequence) + N = len(self._states) + alpha = _ninf_array((T, N)) + + transitions_logprob = self._transitions_matrix() + + # Initialization + symbol = unlabeled_sequence[0][_TEXT] + for i, state in enumerate(self._states): + alpha[0, i] = self._priors.logprob(state) + self._output_logprob( + state, symbol + ) + + # Induction + for t in range(1, T): + symbol = unlabeled_sequence[t][_TEXT] + output_logprob = self._outputs_vector(symbol) + + for i in range(N): + summand = alpha[t - 1] + transitions_logprob[i] + alpha[t, i] = logsumexp2(summand) + output_logprob[i] + + return alpha + + def _backward_probability(self, unlabeled_sequence): + """ + Return the backward probability matrix, a T by N array of + log-probabilities, where T is the length of the sequence and N is the + number of states. Each entry (t, s) gives the probability of being in + state s at time t after observing the partial symbol sequence from t + .. T. + + :return: the backward log probability matrix + :rtype: array + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + T = len(unlabeled_sequence) + N = len(self._states) + beta = _ninf_array((T, N)) + + transitions_logprob = self._transitions_matrix().T + + # initialise the backward values; + # "1" is an arbitrarily chosen value from Rabiner tutorial + beta[T - 1, :] = np.log2(1) + + # inductively calculate remaining backward values + for t in range(T - 2, -1, -1): + symbol = unlabeled_sequence[t + 1][_TEXT] + outputs = self._outputs_vector(symbol) + + for i in range(N): + summand = transitions_logprob[i] + beta[t + 1] + outputs + beta[t, i] = logsumexp2(summand) + + return beta + + def test(self, test_sequence, verbose=False, **kwargs): + """ + Tests the HiddenMarkovModelTagger instance. + + :param test_sequence: a sequence of labeled test instances + :type test_sequence: list(list) + :param verbose: boolean flag indicating whether training should be + verbose or include printed output + :type verbose: bool + """ + + def words(sent): + return [word for (word, tag) in sent] + + def tags(sent): + return [tag for (word, tag) in sent] + + def flatten(seq): + return list(itertools.chain(*seq)) + + test_sequence = self._transform(test_sequence) + predicted_sequence = list(map(self._tag, map(words, test_sequence))) + + if verbose: + for test_sent, predicted_sent in zip(test_sequence, predicted_sequence): + print( + "Test:", + " ".join(f"{token}/{tag}" for (token, tag) in test_sent), + ) + print() + print("Untagged:", " ".join("%s" % token for (token, tag) in test_sent)) + print() + print( + "HMM-tagged:", + " ".join(f"{token}/{tag}" for (token, tag) in predicted_sent), + ) + print() + print( + "Entropy:", + self.entropy([(token, None) for (token, tag) in predicted_sent]), + ) + print() + print("-" * 60) + + test_tags = flatten(map(tags, test_sequence)) + predicted_tags = flatten(map(tags, predicted_sequence)) + + acc = accuracy(test_tags, predicted_tags) + count = sum(len(sent) for sent in test_sequence) + print("accuracy over %d tokens: %.2f" % (count, acc * 100)) + + def __repr__(self): + return "" % ( + len(self._states), + len(self._symbols), + ) + + +class HiddenMarkovModelTrainer: + """ + Algorithms for learning HMM parameters from training data. These include + both supervised learning (MLE) and unsupervised learning (Baum-Welch). + + Creates an HMM trainer to induce an HMM with the given states and + output symbol alphabet. A supervised and unsupervised training + method may be used. If either of the states or symbols are not given, + these may be derived from supervised training. + + :param states: the set of state labels + :type states: sequence of any + :param symbols: the set of observation symbols + :type symbols: sequence of any + """ + + def __init__(self, states=None, symbols=None): + self._states = states if states else [] + self._symbols = symbols if symbols else [] + + def train(self, labeled_sequences=None, unlabeled_sequences=None, **kwargs): + """ + Trains the HMM using both (or either of) supervised and unsupervised + techniques. + + :return: the trained model + :rtype: HiddenMarkovModelTagger + :param labelled_sequences: the supervised training data, a set of + labelled sequences of observations + ex: [ (word_1, tag_1),...,(word_n,tag_n) ] + :type labelled_sequences: list + :param unlabeled_sequences: the unsupervised training data, a set of + sequences of observations + ex: [ word_1, ..., word_n ] + :type unlabeled_sequences: list + :param kwargs: additional arguments to pass to the training methods + """ + assert labeled_sequences or unlabeled_sequences + model = None + if labeled_sequences: + model = self.train_supervised(labeled_sequences, **kwargs) + if unlabeled_sequences: + if model: + kwargs["model"] = model + model = self.train_unsupervised(unlabeled_sequences, **kwargs) + return model + + def _baum_welch_step(self, sequence, model, symbol_to_number): + + N = len(model._states) + M = len(model._symbols) + T = len(sequence) + + # compute forward and backward probabilities + alpha = model._forward_probability(sequence) + beta = model._backward_probability(sequence) + + # find the log probability of the sequence + lpk = logsumexp2(alpha[T - 1]) + + A_numer = _ninf_array((N, N)) + B_numer = _ninf_array((N, M)) + A_denom = _ninf_array(N) + B_denom = _ninf_array(N) + + transitions_logprob = model._transitions_matrix().T + + for t in range(T): + symbol = sequence[t][_TEXT] # not found? FIXME + next_symbol = None + if t < T - 1: + next_symbol = sequence[t + 1][_TEXT] # not found? FIXME + xi = symbol_to_number[symbol] + + next_outputs_logprob = model._outputs_vector(next_symbol) + alpha_plus_beta = alpha[t] + beta[t] + + if t < T - 1: + numer_add = ( + transitions_logprob + + next_outputs_logprob + + beta[t + 1] + + alpha[t].reshape(N, 1) + ) + A_numer = np.logaddexp2(A_numer, numer_add) + A_denom = np.logaddexp2(A_denom, alpha_plus_beta) + else: + B_denom = np.logaddexp2(A_denom, alpha_plus_beta) + + B_numer[:, xi] = np.logaddexp2(B_numer[:, xi], alpha_plus_beta) + + return lpk, A_numer, A_denom, B_numer, B_denom + + def train_unsupervised(self, unlabeled_sequences, update_outputs=True, **kwargs): + """ + Trains the HMM using the Baum-Welch algorithm to maximise the + probability of the data sequence. This is a variant of the EM + algorithm, and is unsupervised in that it doesn't need the state + sequences for the symbols. The code is based on 'A Tutorial on Hidden + Markov Models and Selected Applications in Speech Recognition', + Lawrence Rabiner, IEEE, 1989. + + :return: the trained model + :rtype: HiddenMarkovModelTagger + :param unlabeled_sequences: the training data, a set of + sequences of observations + :type unlabeled_sequences: list + + kwargs may include following parameters: + + :param model: a HiddenMarkovModelTagger instance used to begin + the Baum-Welch algorithm + :param max_iterations: the maximum number of EM iterations + :param convergence_logprob: the maximum change in log probability to + allow convergence + """ + + # create a uniform HMM, which will be iteratively refined, unless + # given an existing model + model = kwargs.get("model") + if not model: + priors = RandomProbDist(self._states) + transitions = DictionaryConditionalProbDist( + {state: RandomProbDist(self._states) for state in self._states} + ) + outputs = DictionaryConditionalProbDist( + {state: RandomProbDist(self._symbols) for state in self._states} + ) + model = HiddenMarkovModelTagger( + self._symbols, self._states, transitions, outputs, priors + ) + + self._states = model._states + self._symbols = model._symbols + + N = len(self._states) + M = len(self._symbols) + symbol_numbers = {sym: i for i, sym in enumerate(self._symbols)} + + # update model prob dists so that they can be modified + # model._priors = MutableProbDist(model._priors, self._states) + + model._transitions = DictionaryConditionalProbDist( + { + s: MutableProbDist(model._transitions[s], self._states) + for s in self._states + } + ) + + if update_outputs: + model._outputs = DictionaryConditionalProbDist( + { + s: MutableProbDist(model._outputs[s], self._symbols) + for s in self._states + } + ) + + model.reset_cache() + + # iterate until convergence + converged = False + last_logprob = None + iteration = 0 + max_iterations = kwargs.get("max_iterations", 1000) + epsilon = kwargs.get("convergence_logprob", 1e-6) + + while not converged and iteration < max_iterations: + A_numer = _ninf_array((N, N)) + B_numer = _ninf_array((N, M)) + A_denom = _ninf_array(N) + B_denom = _ninf_array(N) + + logprob = 0 + for sequence in unlabeled_sequences: + sequence = list(sequence) + if not sequence: + continue + + ( + lpk, + seq_A_numer, + seq_A_denom, + seq_B_numer, + seq_B_denom, + ) = self._baum_welch_step(sequence, model, symbol_numbers) + + # add these sums to the global A and B values + for i in range(N): + A_numer[i] = np.logaddexp2(A_numer[i], seq_A_numer[i] - lpk) + B_numer[i] = np.logaddexp2(B_numer[i], seq_B_numer[i] - lpk) + + A_denom = np.logaddexp2(A_denom, seq_A_denom - lpk) + B_denom = np.logaddexp2(B_denom, seq_B_denom - lpk) + + logprob += lpk + + # use the calculated values to update the transition and output + # probability values + for i in range(N): + logprob_Ai = A_numer[i] - A_denom[i] + logprob_Bi = B_numer[i] - B_denom[i] + + # We should normalize all probabilities (see p.391 Huang et al) + # Let sum(P) be K. + # We can divide each Pi by K to make sum(P) == 1. + # Pi' = Pi/K + # log2(Pi') = log2(Pi) - log2(K) + logprob_Ai -= logsumexp2(logprob_Ai) + logprob_Bi -= logsumexp2(logprob_Bi) + + # update output and transition probabilities + si = self._states[i] + + for j in range(N): + sj = self._states[j] + model._transitions[si].update(sj, logprob_Ai[j]) + + if update_outputs: + for k in range(M): + ok = self._symbols[k] + model._outputs[si].update(ok, logprob_Bi[k]) + + # Rabiner says the priors don't need to be updated. I don't + # believe him. FIXME + + # test for convergence + if iteration > 0 and abs(logprob - last_logprob) < epsilon: + converged = True + + print("iteration", iteration, "logprob", logprob) + iteration += 1 + last_logprob = logprob + + return model + + def train_supervised(self, labelled_sequences, estimator=None): + """ + Supervised training maximising the joint probability of the symbol and + state sequences. This is done via collecting frequencies of + transitions between states, symbol observations while within each + state and which states start a sentence. These frequency distributions + are then normalised into probability estimates, which can be + smoothed if desired. + + :return: the trained model + :rtype: HiddenMarkovModelTagger + :param labelled_sequences: the training data, a set of + labelled sequences of observations + :type labelled_sequences: list + :param estimator: a function taking + a FreqDist and a number of bins and returning a CProbDistI; + otherwise a MLE estimate is used + """ + + # default to the MLE estimate + if estimator is None: + estimator = lambda fdist, bins: MLEProbDist(fdist) + + # count occurrences of starting states, transitions out of each state + # and output symbols observed in each state + known_symbols = set(self._symbols) + known_states = set(self._states) + + starting = FreqDist() + transitions = ConditionalFreqDist() + outputs = ConditionalFreqDist() + for sequence in labelled_sequences: + lasts = None + for token in sequence: + state = token[_TAG] + symbol = token[_TEXT] + if lasts is None: + starting[state] += 1 + else: + transitions[lasts][state] += 1 + outputs[state][symbol] += 1 + lasts = state + + # update the state and symbol lists + if state not in known_states: + self._states.append(state) + known_states.add(state) + + if symbol not in known_symbols: + self._symbols.append(symbol) + known_symbols.add(symbol) + + # create probability distributions (with smoothing) + N = len(self._states) + pi = estimator(starting, N) + A = ConditionalProbDist(transitions, estimator, N) + B = ConditionalProbDist(outputs, estimator, len(self._symbols)) + + return HiddenMarkovModelTagger(self._symbols, self._states, A, B, pi) + + +def _ninf_array(shape): + res = np.empty(shape, np.float64) + res.fill(-np.inf) + return res + + +def logsumexp2(arr): + max_ = arr.max() + return np.log2(np.sum(2 ** (arr - max_))) + max_ + + +def _log_add(*values): + """ + Adds the logged values, returning the logarithm of the addition. + """ + x = max(values) + if x > -np.inf: + sum_diffs = 0 + for value in values: + sum_diffs += 2 ** (value - x) + return x + np.log2(sum_diffs) + else: + return x + + +def _create_hmm_tagger(states, symbols, A, B, pi): + def pd(values, samples): + d = dict(zip(samples, values)) + return DictionaryProbDist(d) + + def cpd(array, conditions, samples): + d = {} + for values, condition in zip(array, conditions): + d[condition] = pd(values, samples) + return DictionaryConditionalProbDist(d) + + A = cpd(A, states, states) + B = cpd(B, states, symbols) + pi = pd(pi, states) + return HiddenMarkovModelTagger( + symbols=symbols, states=states, transitions=A, outputs=B, priors=pi + ) + + +def _market_hmm_example(): + """ + Return an example HMM (described at page 381, Huang et al) + """ + states = ["bull", "bear", "static"] + symbols = ["up", "down", "unchanged"] + A = np.array([[0.6, 0.2, 0.2], [0.5, 0.3, 0.2], [0.4, 0.1, 0.5]], np.float64) + B = np.array([[0.7, 0.1, 0.2], [0.1, 0.6, 0.3], [0.3, 0.3, 0.4]], np.float64) + pi = np.array([0.5, 0.2, 0.3], np.float64) + + model = _create_hmm_tagger(states, symbols, A, B, pi) + return model, states, symbols + + +def demo(): + # demonstrates HMM probability calculation + + print() + print("HMM probability calculation demo") + print() + + model, states, symbols = _market_hmm_example() + + print("Testing", model) + + for test in [ + ["up", "up"], + ["up", "down", "up"], + ["down"] * 5, + ["unchanged"] * 5 + ["up"], + ]: + + sequence = [(t, None) for t in test] + + print("Testing with state sequence", test) + print("probability =", model.probability(sequence)) + print("tagging = ", model.tag([word for (word, tag) in sequence])) + print("p(tagged) = ", model.probability(sequence)) + print("H = ", model.entropy(sequence)) + print("H_exh = ", model._exhaustive_entropy(sequence)) + print("H(point) = ", model.point_entropy(sequence)) + print("H_exh(point)=", model._exhaustive_point_entropy(sequence)) + print() + + +def load_pos(num_sents): + from nltk.corpus import brown + + sentences = brown.tagged_sents(categories="news")[:num_sents] + + tag_re = re.compile(r"[*]|--|[^+*-]+") + tag_set = set() + symbols = set() + + cleaned_sentences = [] + for sentence in sentences: + for i in range(len(sentence)): + word, tag = sentence[i] + word = word.lower() # normalize + symbols.add(word) # log this word + # Clean up the tag. + tag = tag_re.match(tag).group() + tag_set.add(tag) + sentence[i] = (word, tag) # store cleaned-up tagged token + cleaned_sentences += [sentence] + + return cleaned_sentences, list(tag_set), list(symbols) + + +def demo_pos(): + # demonstrates POS tagging using supervised training + + print() + print("HMM POS tagging demo") + print() + + print("Training HMM...") + labelled_sequences, tag_set, symbols = load_pos(20000) + trainer = HiddenMarkovModelTrainer(tag_set, symbols) + hmm = trainer.train_supervised( + labelled_sequences[10:], + estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins), + ) + + print("Testing...") + hmm.test(labelled_sequences[:10], verbose=True) + + +def _untag(sentences): + unlabeled = [] + for sentence in sentences: + unlabeled.append([(token[_TEXT], None) for token in sentence]) + return unlabeled + + +def demo_pos_bw( + test=10, supervised=20, unsupervised=10, verbose=True, max_iterations=5 +): + # demonstrates the Baum-Welch algorithm in POS tagging + + print() + print("Baum-Welch demo for POS tagging") + print() + + print("Training HMM (supervised, %d sentences)..." % supervised) + + sentences, tag_set, symbols = load_pos(test + supervised + unsupervised) + + symbols = set() + for sentence in sentences: + for token in sentence: + symbols.add(token[_TEXT]) + + trainer = HiddenMarkovModelTrainer(tag_set, list(symbols)) + hmm = trainer.train_supervised( + sentences[test : test + supervised], + estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins), + ) + + hmm.test(sentences[:test], verbose=verbose) + + print("Training (unsupervised, %d sentences)..." % unsupervised) + # it's rather slow - so only use 10 samples by default + unlabeled = _untag(sentences[test + supervised :]) + hmm = trainer.train_unsupervised( + unlabeled, model=hmm, max_iterations=max_iterations + ) + hmm.test(sentences[:test], verbose=verbose) + + +def demo_bw(): + # demo Baum Welch by generating some sequences and then performing + # unsupervised training on them + + print() + print("Baum-Welch demo for market example") + print() + + model, states, symbols = _market_hmm_example() + + # generate some random sequences + training = [] + import random + + rng = random.Random() + rng.seed(0) + for i in range(10): + item = model.random_sample(rng, 5) + training.append([(i[0], None) for i in item]) + + # train on those examples, starting with the model that generated them + trainer = HiddenMarkovModelTrainer(states, symbols) + hmm = trainer.train_unsupervised(training, model=model, max_iterations=1000) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/hunpos.py b/env-llmeval/lib/python3.10/site-packages/nltk/tag/hunpos.py new file mode 100644 index 0000000000000000000000000000000000000000..e001c6d6dbc1257515ed1149abe6bab06f1c7337 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tag/hunpos.py @@ -0,0 +1,142 @@ +# Natural Language Toolkit: Interface to the HunPos POS-tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Peter Ljunglöf +# Dávid Márk Nemeskey (modifications) +# Attila Zséder (modifications) +# URL: +# For license information, see LICENSE.TXT + +""" +A module for interfacing with the HunPos open-source POS-tagger. +""" + +import os +from subprocess import PIPE, Popen + +from nltk.internals import find_binary, find_file +from nltk.tag.api import TaggerI + +_hunpos_url = "https://code.google.com/p/hunpos/" + +_hunpos_charset = "ISO-8859-1" +"""The default encoding used by hunpos: ISO-8859-1.""" + + +class HunposTagger(TaggerI): + """ + A class for pos tagging with HunPos. The input is the paths to: + - a model trained on training data + - (optionally) the path to the hunpos-tag binary + - (optionally) the encoding of the training data (default: ISO-8859-1) + + Check whether the required "hunpos-tag" binary is available: + + >>> from nltk.test.setup_fixt import check_binary + >>> check_binary('hunpos-tag') + + Example: + >>> from nltk.tag import HunposTagger + >>> ht = HunposTagger('en_wsj.model') + >>> ht.tag('What is the airspeed of an unladen swallow ?'.split()) + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'VB'), ('?', '.')] + >>> ht.close() + + This class communicates with the hunpos-tag binary via pipes. When the + tagger object is no longer needed, the close() method should be called to + free system resources. The class supports the context manager interface; if + used in a with statement, the close() method is invoked automatically: + + >>> with HunposTagger('en_wsj.model') as ht: + ... ht.tag('What is the airspeed of an unladen swallow ?'.split()) + ... + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'VB'), ('?', '.')] + """ + + def __init__( + self, path_to_model, path_to_bin=None, encoding=_hunpos_charset, verbose=False + ): + """ + Starts the hunpos-tag executable and establishes a connection with it. + + :param path_to_model: The model file. + :param path_to_bin: The hunpos-tag binary. + :param encoding: The encoding used by the model. Unicode tokens + passed to the tag() and tag_sents() methods are converted to + this charset when they are sent to hunpos-tag. + The default is ISO-8859-1 (Latin-1). + + This parameter is ignored for str tokens, which are sent as-is. + The caller must ensure that tokens are encoded in the right charset. + """ + self._closed = True + hunpos_paths = [ + ".", + "/usr/bin", + "/usr/local/bin", + "/opt/local/bin", + "/Applications/bin", + "~/bin", + "~/Applications/bin", + ] + hunpos_paths = list(map(os.path.expanduser, hunpos_paths)) + + self._hunpos_bin = find_binary( + "hunpos-tag", + path_to_bin, + env_vars=("HUNPOS_TAGGER",), + searchpath=hunpos_paths, + url=_hunpos_url, + verbose=verbose, + ) + + self._hunpos_model = find_file( + path_to_model, env_vars=("HUNPOS_TAGGER",), verbose=verbose + ) + self._encoding = encoding + self._hunpos = Popen( + [self._hunpos_bin, self._hunpos_model], + shell=False, + stdin=PIPE, + stdout=PIPE, + stderr=PIPE, + ) + self._closed = False + + def __del__(self): + self.close() + + def close(self): + """Closes the pipe to the hunpos executable.""" + if not self._closed: + self._hunpos.communicate() + self._closed = True + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def tag(self, tokens): + """Tags a single sentence: a list of words. + The tokens should not contain any newline characters. + """ + for token in tokens: + assert "\n" not in token, "Tokens should not contain newlines" + if isinstance(token, str): + token = token.encode(self._encoding) + self._hunpos.stdin.write(token + b"\n") + # We write a final empty line to tell hunpos that the sentence is finished: + self._hunpos.stdin.write(b"\n") + self._hunpos.stdin.flush() + + tagged_tokens = [] + for token in tokens: + tagged = self._hunpos.stdout.readline().strip().split(b"\t") + tag = tagged[1] if len(tagged) > 1 else None + tagged_tokens.append((token, tag)) + # We have to read (and dismiss) the final empty line: + self._hunpos.stdout.readline() + + return tagged_tokens diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/mapping.py b/env-llmeval/lib/python3.10/site-packages/nltk/tag/mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..0af1a0eef945b3cfb2bb3a5860b223a42dbaeae7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tag/mapping.py @@ -0,0 +1,136 @@ +# Natural Language Toolkit: Tagset Mapping +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Nathan Schneider +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Interface for converting POS tags from various treebanks +to the universal tagset of Petrov, Das, & McDonald. + +The tagset consists of the following 12 coarse tags: + +VERB - verbs (all tenses and modes) +NOUN - nouns (common and proper) +PRON - pronouns +ADJ - adjectives +ADV - adverbs +ADP - adpositions (prepositions and postpositions) +CONJ - conjunctions +DET - determiners +NUM - cardinal numbers +PRT - particles or other function words +X - other: foreign words, typos, abbreviations +. - punctuation + +@see: https://arxiv.org/abs/1104.2086 and https://code.google.com/p/universal-pos-tags/ + +""" + +from collections import defaultdict +from os.path import join + +from nltk.data import load + +_UNIVERSAL_DATA = "taggers/universal_tagset" +_UNIVERSAL_TAGS = ( + "VERB", + "NOUN", + "PRON", + "ADJ", + "ADV", + "ADP", + "CONJ", + "DET", + "NUM", + "PRT", + "X", + ".", +) + +# _MAPPINGS = defaultdict(lambda: defaultdict(dict)) +# the mapping between tagset T1 and T2 returns UNK if applied to an unrecognized tag +_MAPPINGS = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: "UNK"))) + + +def _load_universal_map(fileid): + contents = load(join(_UNIVERSAL_DATA, fileid + ".map"), format="text") + + # When mapping to the Universal Tagset, + # map unknown inputs to 'X' not 'UNK' + _MAPPINGS[fileid]["universal"].default_factory = lambda: "X" + + for line in contents.splitlines(): + line = line.strip() + if line == "": + continue + fine, coarse = line.split("\t") + + assert coarse in _UNIVERSAL_TAGS, f"Unexpected coarse tag: {coarse}" + assert ( + fine not in _MAPPINGS[fileid]["universal"] + ), f"Multiple entries for original tag: {fine}" + + _MAPPINGS[fileid]["universal"][fine] = coarse + + +def tagset_mapping(source, target): + """ + Retrieve the mapping dictionary between tagsets. + + >>> tagset_mapping('ru-rnc', 'universal') == {'!': '.', 'A': 'ADJ', 'C': 'CONJ', 'AD': 'ADV',\ + 'NN': 'NOUN', 'VG': 'VERB', 'COMP': 'CONJ', 'NC': 'NUM', 'VP': 'VERB', 'P': 'ADP',\ + 'IJ': 'X', 'V': 'VERB', 'Z': 'X', 'VI': 'VERB', 'YES_NO_SENT': 'X', 'PTCL': 'PRT'} + True + """ + + if source not in _MAPPINGS or target not in _MAPPINGS[source]: + if target == "universal": + _load_universal_map(source) + # Added the new Russian National Corpus mappings because the + # Russian model for nltk.pos_tag() uses it. + _MAPPINGS["ru-rnc-new"]["universal"] = { + "A": "ADJ", + "A-PRO": "PRON", + "ADV": "ADV", + "ADV-PRO": "PRON", + "ANUM": "ADJ", + "CONJ": "CONJ", + "INTJ": "X", + "NONLEX": ".", + "NUM": "NUM", + "PARENTH": "PRT", + "PART": "PRT", + "PR": "ADP", + "PRAEDIC": "PRT", + "PRAEDIC-PRO": "PRON", + "S": "NOUN", + "S-PRO": "PRON", + "V": "VERB", + } + + return _MAPPINGS[source][target] + + +def map_tag(source, target, source_tag): + """ + Maps the tag from the source tagset to the target tagset. + + >>> map_tag('en-ptb', 'universal', 'VBZ') + 'VERB' + >>> map_tag('en-ptb', 'universal', 'VBP') + 'VERB' + >>> map_tag('en-ptb', 'universal', '``') + '.' + """ + + # we need a systematic approach to naming + if target == "universal": + if source == "wsj": + source = "en-ptb" + if source == "brown": + source = "en-brown" + + return tagset_mapping(source, target)[source_tag] diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/perceptron.py b/env-llmeval/lib/python3.10/site-packages/nltk/tag/perceptron.py new file mode 100644 index 0000000000000000000000000000000000000000..9afe08f0c8d6a9d5852a225e6c9569a291fb1e3d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tag/perceptron.py @@ -0,0 +1,371 @@ +# This module is a port of the Textblob Averaged Perceptron Tagger +# Author: Matthew Honnibal , +# Long Duong (NLTK port) +# URL: +# +# Copyright 2013 Matthew Honnibal +# NLTK modifications Copyright 2015 The NLTK Project +# +# This module is provided under the terms of the MIT License. + +import logging +import pickle +import random +from collections import defaultdict + +from nltk import jsontags +from nltk.data import find, load +from nltk.tag.api import TaggerI + +try: + import numpy as np +except ImportError: + pass + +PICKLE = "averaged_perceptron_tagger.pickle" + + +@jsontags.register_tag +class AveragedPerceptron: + + """An averaged perceptron, as implemented by Matthew Honnibal. + + See more implementation details here: + https://explosion.ai/blog/part-of-speech-pos-tagger-in-python + """ + + json_tag = "nltk.tag.perceptron.AveragedPerceptron" + + def __init__(self, weights=None): + # Each feature gets its own weight vector, so weights is a dict-of-dicts + self.weights = weights if weights else {} + self.classes = set() + # The accumulated values, for the averaging. These will be keyed by + # feature/clas tuples + self._totals = defaultdict(int) + # The last time the feature was changed, for the averaging. Also + # keyed by feature/clas tuples + # (tstamps is short for timestamps) + self._tstamps = defaultdict(int) + # Number of instances seen + self.i = 0 + + def _softmax(self, scores): + s = np.fromiter(scores.values(), dtype=float) + exps = np.exp(s) + return exps / np.sum(exps) + + def predict(self, features, return_conf=False): + """Dot-product the features and current weights and return the best label.""" + scores = defaultdict(float) + for feat, value in features.items(): + if feat not in self.weights or value == 0: + continue + weights = self.weights[feat] + for label, weight in weights.items(): + scores[label] += value * weight + + # Do a secondary alphabetic sort, for stability + best_label = max(self.classes, key=lambda label: (scores[label], label)) + # compute the confidence + conf = max(self._softmax(scores)) if return_conf == True else None + + return best_label, conf + + def update(self, truth, guess, features): + """Update the feature weights.""" + + def upd_feat(c, f, w, v): + param = (f, c) + self._totals[param] += (self.i - self._tstamps[param]) * w + self._tstamps[param] = self.i + self.weights[f][c] = w + v + + self.i += 1 + if truth == guess: + return None + for f in features: + weights = self.weights.setdefault(f, {}) + upd_feat(truth, f, weights.get(truth, 0.0), 1.0) + upd_feat(guess, f, weights.get(guess, 0.0), -1.0) + + def average_weights(self): + """Average weights from all iterations.""" + for feat, weights in self.weights.items(): + new_feat_weights = {} + for clas, weight in weights.items(): + param = (feat, clas) + total = self._totals[param] + total += (self.i - self._tstamps[param]) * weight + averaged = round(total / self.i, 3) + if averaged: + new_feat_weights[clas] = averaged + self.weights[feat] = new_feat_weights + + def save(self, path): + """Save the pickled model weights.""" + with open(path, "wb") as fout: + return pickle.dump(dict(self.weights), fout) + + def load(self, path): + """Load the pickled model weights.""" + self.weights = load(path) + + def encode_json_obj(self): + return self.weights + + @classmethod + def decode_json_obj(cls, obj): + return cls(obj) + + +@jsontags.register_tag +class PerceptronTagger(TaggerI): + + """ + Greedy Averaged Perceptron tagger, as implemented by Matthew Honnibal. + See more implementation details here: + https://explosion.ai/blog/part-of-speech-pos-tagger-in-python + + >>> from nltk.tag.perceptron import PerceptronTagger + + Train the model + + >>> tagger = PerceptronTagger(load=False) + + >>> tagger.train([[('today','NN'),('is','VBZ'),('good','JJ'),('day','NN')], + ... [('yes','NNS'),('it','PRP'),('beautiful','JJ')]]) + + >>> tagger.tag(['today','is','a','beautiful','day']) + [('today', 'NN'), ('is', 'PRP'), ('a', 'PRP'), ('beautiful', 'JJ'), ('day', 'NN')] + + Use the pretrain model (the default constructor) + + >>> pretrain = PerceptronTagger() + + >>> pretrain.tag('The quick brown fox jumps over the lazy dog'.split()) + [('The', 'DT'), ('quick', 'JJ'), ('brown', 'NN'), ('fox', 'NN'), ('jumps', 'VBZ'), ('over', 'IN'), ('the', 'DT'), ('lazy', 'JJ'), ('dog', 'NN')] + + >>> pretrain.tag("The red cat".split()) + [('The', 'DT'), ('red', 'JJ'), ('cat', 'NN')] + """ + + json_tag = "nltk.tag.sequential.PerceptronTagger" + + START = ["-START-", "-START2-"] + END = ["-END-", "-END2-"] + + def __init__(self, load=True): + """ + :param load: Load the pickled model upon instantiation. + """ + self.model = AveragedPerceptron() + self.tagdict = {} + self.classes = set() + if load: + AP_MODEL_LOC = "file:" + str( + find("taggers/averaged_perceptron_tagger/" + PICKLE) + ) + self.load(AP_MODEL_LOC) + + def tag(self, tokens, return_conf=False, use_tagdict=True): + """ + Tag tokenized sentences. + :params tokens: list of word + :type tokens: list(str) + """ + prev, prev2 = self.START + output = [] + + context = self.START + [self.normalize(w) for w in tokens] + self.END + for i, word in enumerate(tokens): + tag, conf = ( + (self.tagdict.get(word), 1.0) if use_tagdict == True else (None, None) + ) + if not tag: + features = self._get_features(i, word, context, prev, prev2) + tag, conf = self.model.predict(features, return_conf) + output.append((word, tag, conf) if return_conf == True else (word, tag)) + + prev2 = prev + prev = tag + + return output + + def train(self, sentences, save_loc=None, nr_iter=5): + """Train a model from sentences, and save it at ``save_loc``. ``nr_iter`` + controls the number of Perceptron training iterations. + + :param sentences: A list or iterator of sentences, where each sentence + is a list of (words, tags) tuples. + :param save_loc: If not ``None``, saves a pickled model in this location. + :param nr_iter: Number of training iterations. + """ + # We'd like to allow ``sentences`` to be either a list or an iterator, + # the latter being especially important for a large training dataset. + # Because ``self._make_tagdict(sentences)`` runs regardless, we make + # it populate ``self._sentences`` (a list) with all the sentences. + # This saves the overheard of just iterating through ``sentences`` to + # get the list by ``sentences = list(sentences)``. + + self._sentences = list() # to be populated by self._make_tagdict... + self._make_tagdict(sentences) + self.model.classes = self.classes + for iter_ in range(nr_iter): + c = 0 + n = 0 + for sentence in self._sentences: + words, tags = zip(*sentence) + + prev, prev2 = self.START + context = self.START + [self.normalize(w) for w in words] + self.END + for i, word in enumerate(words): + guess = self.tagdict.get(word) + if not guess: + feats = self._get_features(i, word, context, prev, prev2) + guess, _ = self.model.predict(feats) + self.model.update(tags[i], guess, feats) + prev2 = prev + prev = guess + c += guess == tags[i] + n += 1 + random.shuffle(self._sentences) + logging.info(f"Iter {iter_}: {c}/{n}={_pc(c, n)}") + + # We don't need the training sentences anymore, and we don't want to + # waste space on them when we pickle the trained tagger. + self._sentences = None + + self.model.average_weights() + # Pickle as a binary file + if save_loc is not None: + with open(save_loc, "wb") as fout: + # changed protocol from -1 to 2 to make pickling Python 2 compatible + pickle.dump((self.model.weights, self.tagdict, self.classes), fout, 2) + + def load(self, loc): + """ + :param loc: Load a pickled model at location. + :type loc: str + """ + + self.model.weights, self.tagdict, self.classes = load(loc) + self.model.classes = self.classes + + def encode_json_obj(self): + return self.model.weights, self.tagdict, list(self.classes) + + @classmethod + def decode_json_obj(cls, obj): + tagger = cls(load=False) + tagger.model.weights, tagger.tagdict, tagger.classes = obj + tagger.classes = set(tagger.classes) + tagger.model.classes = tagger.classes + return tagger + + def normalize(self, word): + """ + Normalization used in pre-processing. + - All words are lower cased + - Groups of digits of length 4 are represented as !YEAR; + - Other digits are represented as !DIGITS + + :rtype: str + """ + if "-" in word and word[0] != "-": + return "!HYPHEN" + if word.isdigit() and len(word) == 4: + return "!YEAR" + if word and word[0].isdigit(): + return "!DIGITS" + return word.lower() + + def _get_features(self, i, word, context, prev, prev2): + """Map tokens into a feature representation, implemented as a + {hashable: int} dict. If the features change, a new model must be + trained. + """ + + def add(name, *args): + features[" ".join((name,) + tuple(args))] += 1 + + i += len(self.START) + features = defaultdict(int) + # It's useful to have a constant feature, which acts sort of like a prior + add("bias") + add("i suffix", word[-3:]) + add("i pref1", word[0] if word else "") + add("i-1 tag", prev) + add("i-2 tag", prev2) + add("i tag+i-2 tag", prev, prev2) + add("i word", context[i]) + add("i-1 tag+i word", prev, context[i]) + add("i-1 word", context[i - 1]) + add("i-1 suffix", context[i - 1][-3:]) + add("i-2 word", context[i - 2]) + add("i+1 word", context[i + 1]) + add("i+1 suffix", context[i + 1][-3:]) + add("i+2 word", context[i + 2]) + return features + + def _make_tagdict(self, sentences): + """ + Make a tag dictionary for single-tag words. + :param sentences: A list of list of (word, tag) tuples. + """ + counts = defaultdict(lambda: defaultdict(int)) + for sentence in sentences: + self._sentences.append(sentence) + for word, tag in sentence: + counts[word][tag] += 1 + self.classes.add(tag) + freq_thresh = 20 + ambiguity_thresh = 0.97 + for word, tag_freqs in counts.items(): + tag, mode = max(tag_freqs.items(), key=lambda item: item[1]) + n = sum(tag_freqs.values()) + # Don't add rare words to the tag dictionary + # Only add quite unambiguous words + if n >= freq_thresh and (mode / n) >= ambiguity_thresh: + self.tagdict[word] = tag + + +def _pc(n, d): + return (n / d) * 100 + + +def _load_data_conll_format(filename): + print("Read from file: ", filename) + with open(filename, "rb") as fin: + sentences = [] + sentence = [] + for line in fin.readlines(): + line = line.strip() + # print line + if len(line) == 0: + sentences.append(sentence) + sentence = [] + continue + tokens = line.split("\t") + word = tokens[1] + tag = tokens[4] + sentence.append((word, tag)) + return sentences + + +def _get_pretrain_model(): + # Train and test on English part of ConLL data (WSJ part of Penn Treebank) + # Train: section 2-11 + # Test : section 23 + tagger = PerceptronTagger() + training = _load_data_conll_format("english_ptb_train.conll") + testing = _load_data_conll_format("english_ptb_test.conll") + print("Size of training and testing (sentence)", len(training), len(testing)) + # Train and save the model + tagger.train(training, PICKLE) + print("Accuracy : ", tagger.accuracy(testing)) + + +if __name__ == "__main__": + # _get_pretrain_model() + pass diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/senna.py b/env-llmeval/lib/python3.10/site-packages/nltk/tag/senna.py new file mode 100644 index 0000000000000000000000000000000000000000..7b52b7ee0a7bc01614c3a2a397a6ffce47835999 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tag/senna.py @@ -0,0 +1,134 @@ +# Natural Language Toolkit: Senna POS Tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Rami Al-Rfou' +# URL: +# For license information, see LICENSE.TXT + +""" +Senna POS tagger, NER Tagger, Chunk Tagger + +The input is: + +- path to the directory that contains SENNA executables. If the path is incorrect, + SennaTagger will automatically search for executable file specified in SENNA environment variable +- (optionally) the encoding of the input data (default:utf-8) + +Note: Unit tests for this module can be found in test/unit/test_senna.py + +>>> from nltk.tag import SennaTagger +>>> tagger = SennaTagger('/usr/share/senna-v3.0') # doctest: +SKIP +>>> tagger.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP +[('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), +('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'NN'), ('?', '.')] + +>>> from nltk.tag import SennaChunkTagger +>>> chktagger = SennaChunkTagger('/usr/share/senna-v3.0') # doctest: +SKIP +>>> chktagger.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP +[('What', 'B-NP'), ('is', 'B-VP'), ('the', 'B-NP'), ('airspeed', 'I-NP'), +('of', 'B-PP'), ('an', 'B-NP'), ('unladen', 'I-NP'), ('swallow', 'I-NP'), +('?', 'O')] + +>>> from nltk.tag import SennaNERTagger +>>> nertagger = SennaNERTagger('/usr/share/senna-v3.0') # doctest: +SKIP +>>> nertagger.tag('Shakespeare theatre was in London .'.split()) # doctest: +SKIP +[('Shakespeare', 'B-PER'), ('theatre', 'O'), ('was', 'O'), ('in', 'O'), +('London', 'B-LOC'), ('.', 'O')] +>>> nertagger.tag('UN headquarters are in NY , USA .'.split()) # doctest: +SKIP +[('UN', 'B-ORG'), ('headquarters', 'O'), ('are', 'O'), ('in', 'O'), +('NY', 'B-LOC'), (',', 'O'), ('USA', 'B-LOC'), ('.', 'O')] +""" + +from nltk.classify import Senna + + +class SennaTagger(Senna): + def __init__(self, path, encoding="utf-8"): + super().__init__(path, ["pos"], encoding) + + def tag_sents(self, sentences): + """ + Applies the tag method over a list of sentences. This method will return + for each sentence a list of tuples of (word, tag). + """ + tagged_sents = super().tag_sents(sentences) + for i in range(len(tagged_sents)): + for j in range(len(tagged_sents[i])): + annotations = tagged_sents[i][j] + tagged_sents[i][j] = (annotations["word"], annotations["pos"]) + return tagged_sents + + +class SennaChunkTagger(Senna): + def __init__(self, path, encoding="utf-8"): + super().__init__(path, ["chk"], encoding) + + def tag_sents(self, sentences): + """ + Applies the tag method over a list of sentences. This method will return + for each sentence a list of tuples of (word, tag). + """ + tagged_sents = super().tag_sents(sentences) + for i in range(len(tagged_sents)): + for j in range(len(tagged_sents[i])): + annotations = tagged_sents[i][j] + tagged_sents[i][j] = (annotations["word"], annotations["chk"]) + return tagged_sents + + def bio_to_chunks(self, tagged_sent, chunk_type): + """ + Extracts the chunks in a BIO chunk-tagged sentence. + + >>> from nltk.tag import SennaChunkTagger + >>> chktagger = SennaChunkTagger('/usr/share/senna-v3.0') # doctest: +SKIP + >>> sent = 'What is the airspeed of an unladen swallow ?'.split() + >>> tagged_sent = chktagger.tag(sent) # doctest: +SKIP + >>> tagged_sent # doctest: +SKIP + [('What', 'B-NP'), ('is', 'B-VP'), ('the', 'B-NP'), ('airspeed', 'I-NP'), + ('of', 'B-PP'), ('an', 'B-NP'), ('unladen', 'I-NP'), ('swallow', 'I-NP'), + ('?', 'O')] + >>> list(chktagger.bio_to_chunks(tagged_sent, chunk_type='NP')) # doctest: +SKIP + [('What', '0'), ('the airspeed', '2-3'), ('an unladen swallow', '5-6-7')] + + :param tagged_sent: A list of tuples of word and BIO chunk tag. + :type tagged_sent: list(tuple) + :param tagged_sent: The chunk tag that users want to extract, e.g. 'NP' or 'VP' + :type tagged_sent: str + + :return: An iterable of tuples of chunks that users want to extract + and their corresponding indices. + :rtype: iter(tuple(str)) + """ + current_chunk = [] + current_chunk_position = [] + for idx, word_pos in enumerate(tagged_sent): + word, pos = word_pos + if "-" + chunk_type in pos: # Append the word to the current_chunk. + current_chunk.append(word) + current_chunk_position.append(idx) + else: + if current_chunk: # Flush the full chunk when out of an NP. + _chunk_str = " ".join(current_chunk) + _chunk_pos_str = "-".join(map(str, current_chunk_position)) + yield _chunk_str, _chunk_pos_str + current_chunk = [] + current_chunk_position = [] + if current_chunk: # Flush the last chunk. + yield " ".join(current_chunk), "-".join(map(str, current_chunk_position)) + + +class SennaNERTagger(Senna): + def __init__(self, path, encoding="utf-8"): + super().__init__(path, ["ner"], encoding) + + def tag_sents(self, sentences): + """ + Applies the tag method over a list of sentences. This method will return + for each sentence a list of tuples of (word, tag). + """ + tagged_sents = super().tag_sents(sentences) + for i in range(len(tagged_sents)): + for j in range(len(tagged_sents[i])): + annotations = tagged_sents[i][j] + tagged_sents[i][j] = (annotations["word"], annotations["ner"]) + return tagged_sents diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/sequential.py b/env-llmeval/lib/python3.10/site-packages/nltk/tag/sequential.py new file mode 100644 index 0000000000000000000000000000000000000000..3fb85c9fade8079ad5fd4ba7a517939741cb2440 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tag/sequential.py @@ -0,0 +1,755 @@ +# Natural Language Toolkit: Sequential Backoff Taggers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# Tiago Tresoldi (original affix tagger) +# URL: +# For license information, see LICENSE.TXT + +""" +Classes for tagging sentences sequentially, left to right. The +abstract base class SequentialBackoffTagger serves as the base +class for all the taggers in this module. Tagging of individual words +is performed by the method ``choose_tag()``, which is defined by +subclasses of SequentialBackoffTagger. If a tagger is unable to +determine a tag for the specified token, then its backoff tagger is +consulted instead. Any SequentialBackoffTagger may serve as a +backoff tagger for any other SequentialBackoffTagger. +""" +import ast +import re +from abc import abstractmethod +from typing import List, Optional, Tuple + +from nltk import jsontags +from nltk.classify import NaiveBayesClassifier +from nltk.probability import ConditionalFreqDist +from nltk.tag.api import FeaturesetTaggerI, TaggerI + + +###################################################################### +# Abstract Base Classes +###################################################################### +class SequentialBackoffTagger(TaggerI): + """ + An abstract base class for taggers that tags words sequentially, + left to right. Tagging of individual words is performed by the + ``choose_tag()`` method, which should be defined by subclasses. If + a tagger is unable to determine a tag for the specified token, + then its backoff tagger is consulted. + + :ivar _taggers: A list of all the taggers that should be tried to + tag a token (i.e., self and its backoff taggers). + """ + + def __init__(self, backoff=None): + if backoff is None: + self._taggers = [self] + else: + self._taggers = [self] + backoff._taggers + + @property + def backoff(self): + """The backoff tagger for this tagger.""" + return self._taggers[1] if len(self._taggers) > 1 else None + + def tag(self, tokens): + # docs inherited from TaggerI + tags = [] + for i in range(len(tokens)): + tags.append(self.tag_one(tokens, i, tags)) + return list(zip(tokens, tags)) + + def tag_one(self, tokens, index, history): + """ + Determine an appropriate tag for the specified token, and + return that tag. If this tagger is unable to determine a tag + for the specified token, then its backoff tagger is consulted. + + :rtype: str + :type tokens: list + :param tokens: The list of words that are being tagged. + :type index: int + :param index: The index of the word whose tag should be + returned. + :type history: list(str) + :param history: A list of the tags for all words before *index*. + """ + tag = None + for tagger in self._taggers: + tag = tagger.choose_tag(tokens, index, history) + if tag is not None: + break + return tag + + @abstractmethod + def choose_tag(self, tokens, index, history): + """ + Decide which tag should be used for the specified token, and + return that tag. If this tagger is unable to determine a tag + for the specified token, return None -- do not consult + the backoff tagger. This method should be overridden by + subclasses of SequentialBackoffTagger. + + :rtype: str + :type tokens: list + :param tokens: The list of words that are being tagged. + :type index: int + :param index: The index of the word whose tag should be + returned. + :type history: list(str) + :param history: A list of the tags for all words before *index*. + """ + + +class ContextTagger(SequentialBackoffTagger): + """ + An abstract base class for sequential backoff taggers that choose + a tag for a token based on the value of its "context". Different + subclasses are used to define different contexts. + + A ContextTagger chooses the tag for a token by calculating the + token's context, and looking up the corresponding tag in a table. + This table can be constructed manually; or it can be automatically + constructed based on a training corpus, using the ``_train()`` + factory method. + + :ivar _context_to_tag: Dictionary mapping contexts to tags. + """ + + def __init__(self, context_to_tag, backoff=None): + """ + :param context_to_tag: A dictionary mapping contexts to tags. + :param backoff: The backoff tagger that should be used for this tagger. + """ + super().__init__(backoff) + self._context_to_tag = context_to_tag if context_to_tag else {} + + @abstractmethod + def context(self, tokens, index, history): + """ + :return: the context that should be used to look up the tag + for the specified token; or None if the specified token + should not be handled by this tagger. + :rtype: (hashable) + """ + + def choose_tag(self, tokens, index, history): + context = self.context(tokens, index, history) + return self._context_to_tag.get(context) + + def size(self): + """ + :return: The number of entries in the table used by this + tagger to map from contexts to tags. + """ + return len(self._context_to_tag) + + def __repr__(self): + return f"<{self.__class__.__name__}: size={self.size()}>" + + def _train(self, tagged_corpus, cutoff=0, verbose=False): + """ + Initialize this ContextTagger's ``_context_to_tag`` table + based on the given training data. In particular, for each + context ``c`` in the training data, set + ``_context_to_tag[c]`` to the most frequent tag for that + context. However, exclude any contexts that are already + tagged perfectly by the backoff tagger(s). + + The old value of ``self._context_to_tag`` (if any) is discarded. + + :param tagged_corpus: A tagged corpus. Each item should be + a list of (word, tag tuples. + :param cutoff: If the most likely tag for a context occurs + fewer than cutoff times, then exclude it from the + context-to-tag table for the new tagger. + """ + + token_count = hit_count = 0 + + # A context is considered 'useful' if it's not already tagged + # perfectly by the backoff tagger. + useful_contexts = set() + + # Count how many times each tag occurs in each context. + fd = ConditionalFreqDist() + for sentence in tagged_corpus: + tokens, tags = zip(*sentence) + for index, (token, tag) in enumerate(sentence): + # Record the event. + token_count += 1 + context = self.context(tokens, index, tags[:index]) + if context is None: + continue + fd[context][tag] += 1 + # If the backoff got it wrong, this context is useful: + if self.backoff is None or tag != self.backoff.tag_one( + tokens, index, tags[:index] + ): + useful_contexts.add(context) + + # Build the context_to_tag table -- for each context, figure + # out what the most likely tag is. Only include contexts that + # we've seen at least `cutoff` times. + for context in useful_contexts: + best_tag = fd[context].max() + hits = fd[context][best_tag] + if hits > cutoff: + self._context_to_tag[context] = best_tag + hit_count += hits + + # Display some stats, if requested. + if verbose: + size = len(self._context_to_tag) + backoff = 100 - (hit_count * 100.0) / token_count + pruning = 100 - (size * 100.0) / len(fd.conditions()) + print("[Trained Unigram tagger:", end=" ") + print( + "size={}, backoff={:.2f}%, pruning={:.2f}%]".format( + size, backoff, pruning + ) + ) + + +###################################################################### +# Tagger Classes +###################################################################### + + +@jsontags.register_tag +class DefaultTagger(SequentialBackoffTagger): + """ + A tagger that assigns the same tag to every token. + + >>> from nltk.tag import DefaultTagger + >>> default_tagger = DefaultTagger('NN') + >>> list(default_tagger.tag('This is a test'.split())) + [('This', 'NN'), ('is', 'NN'), ('a', 'NN'), ('test', 'NN')] + + This tagger is recommended as a backoff tagger, in cases where + a more powerful tagger is unable to assign a tag to the word + (e.g. because the word was not seen during training). + + :param tag: The tag to assign to each token + :type tag: str + """ + + json_tag = "nltk.tag.sequential.DefaultTagger" + + def __init__(self, tag): + self._tag = tag + super().__init__(None) + + def encode_json_obj(self): + return self._tag + + @classmethod + def decode_json_obj(cls, obj): + tag = obj + return cls(tag) + + def choose_tag(self, tokens, index, history): + return self._tag # ignore token and history + + def __repr__(self): + return f"" + + +@jsontags.register_tag +class NgramTagger(ContextTagger): + """ + A tagger that chooses a token's tag based on its word string and + on the preceding n word's tags. In particular, a tuple + (tags[i-n:i-1], words[i]) is looked up in a table, and the + corresponding tag is returned. N-gram taggers are typically + trained on a tagged corpus. + + Train a new NgramTagger using the given training data or + the supplied model. In particular, construct a new tagger + whose table maps from each context (tag[i-n:i-1], word[i]) + to the most frequent tag for that context. But exclude any + contexts that are already tagged perfectly by the backoff + tagger. + + :param train: A tagged corpus consisting of a list of tagged + sentences, where each sentence is a list of (word, tag) tuples. + :param backoff: A backoff tagger, to be used by the new + tagger if it encounters an unknown context. + :param cutoff: If the most likely tag for a context occurs + fewer than *cutoff* times, then exclude it from the + context-to-tag table for the new tagger. + """ + + json_tag = "nltk.tag.sequential.NgramTagger" + + def __init__( + self, n, train=None, model=None, backoff=None, cutoff=0, verbose=False + ): + self._n = n + self._check_params(train, model) + + super().__init__(model, backoff) + + if train: + self._train(train, cutoff, verbose) + + def encode_json_obj(self): + _context_to_tag = {repr(k): v for k, v in self._context_to_tag.items()} + if "NgramTagger" in self.__class__.__name__: + return self._n, _context_to_tag, self.backoff + else: + return _context_to_tag, self.backoff + + @classmethod + def decode_json_obj(cls, obj): + try: + _n, _context_to_tag, backoff = obj + except ValueError: + _context_to_tag, backoff = obj + + if not _context_to_tag: + return backoff + + _context_to_tag = {ast.literal_eval(k): v for k, v in _context_to_tag.items()} + + if "NgramTagger" in cls.__name__: + return cls(_n, model=_context_to_tag, backoff=backoff) + else: + return cls(model=_context_to_tag, backoff=backoff) + + def context(self, tokens, index, history): + tag_context = tuple(history[max(0, index - self._n + 1) : index]) + return tag_context, tokens[index] + + +@jsontags.register_tag +class UnigramTagger(NgramTagger): + """ + Unigram Tagger + + The UnigramTagger finds the most likely tag for each word in a training + corpus, and then uses that information to assign tags to new tokens. + + >>> from nltk.corpus import brown + >>> from nltk.tag import UnigramTagger + >>> test_sent = brown.sents(categories='news')[0] + >>> unigram_tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500]) + >>> for tok, tag in unigram_tagger.tag(test_sent): + ... print("({}, {}), ".format(tok, tag)) # doctest: +NORMALIZE_WHITESPACE + (The, AT), (Fulton, NP-TL), (County, NN-TL), (Grand, JJ-TL), + (Jury, NN-TL), (said, VBD), (Friday, NR), (an, AT), + (investigation, NN), (of, IN), (Atlanta's, NP$), (recent, JJ), + (primary, NN), (election, NN), (produced, VBD), (``, ``), + (no, AT), (evidence, NN), ('', ''), (that, CS), (any, DTI), + (irregularities, NNS), (took, VBD), (place, NN), (., .), + + :param train: The corpus of training data, a list of tagged sentences + :type train: list(list(tuple(str, str))) + :param model: The tagger model + :type model: dict + :param backoff: Another tagger which this tagger will consult when it is + unable to tag a word + :type backoff: TaggerI + :param cutoff: The number of instances of training data the tagger must see + in order not to use the backoff tagger + :type cutoff: int + """ + + json_tag = "nltk.tag.sequential.UnigramTagger" + + def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False): + super().__init__(1, train, model, backoff, cutoff, verbose) + + def context(self, tokens, index, history): + return tokens[index] + + +@jsontags.register_tag +class BigramTagger(NgramTagger): + """ + A tagger that chooses a token's tag based its word string and on + the preceding words' tag. In particular, a tuple consisting + of the previous tag and the word is looked up in a table, and + the corresponding tag is returned. + + :param train: The corpus of training data, a list of tagged sentences + :type train: list(list(tuple(str, str))) + :param model: The tagger model + :type model: dict + :param backoff: Another tagger which this tagger will consult when it is + unable to tag a word + :type backoff: TaggerI + :param cutoff: The number of instances of training data the tagger must see + in order not to use the backoff tagger + :type cutoff: int + """ + + json_tag = "nltk.tag.sequential.BigramTagger" + + def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False): + super().__init__(2, train, model, backoff, cutoff, verbose) + + +@jsontags.register_tag +class TrigramTagger(NgramTagger): + """ + A tagger that chooses a token's tag based its word string and on + the preceding two words' tags. In particular, a tuple consisting + of the previous two tags and the word is looked up in a table, and + the corresponding tag is returned. + + :param train: The corpus of training data, a list of tagged sentences + :type train: list(list(tuple(str, str))) + :param model: The tagger model + :type model: dict + :param backoff: Another tagger which this tagger will consult when it is + unable to tag a word + :type backoff: TaggerI + :param cutoff: The number of instances of training data the tagger must see + in order not to use the backoff tagger + :type cutoff: int + """ + + json_tag = "nltk.tag.sequential.TrigramTagger" + + def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False): + super().__init__(3, train, model, backoff, cutoff, verbose) + + +@jsontags.register_tag +class AffixTagger(ContextTagger): + """ + A tagger that chooses a token's tag based on a leading or trailing + substring of its word string. (It is important to note that these + substrings are not necessarily "true" morphological affixes). In + particular, a fixed-length substring of the word is looked up in a + table, and the corresponding tag is returned. Affix taggers are + typically constructed by training them on a tagged corpus. + + Construct a new affix tagger. + + :param affix_length: The length of the affixes that should be + considered during training and tagging. Use negative + numbers for suffixes. + :param min_stem_length: Any words whose length is less than + min_stem_length+abs(affix_length) will be assigned a + tag of None by this tagger. + """ + + json_tag = "nltk.tag.sequential.AffixTagger" + + def __init__( + self, + train=None, + model=None, + affix_length=-3, + min_stem_length=2, + backoff=None, + cutoff=0, + verbose=False, + ): + + self._check_params(train, model) + + super().__init__(model, backoff) + + self._affix_length = affix_length + self._min_word_length = min_stem_length + abs(affix_length) + + if train: + self._train(train, cutoff, verbose) + + def encode_json_obj(self): + return ( + self._affix_length, + self._min_word_length, + self._context_to_tag, + self.backoff, + ) + + @classmethod + def decode_json_obj(cls, obj): + _affix_length, _min_word_length, _context_to_tag, backoff = obj + return cls( + affix_length=_affix_length, + min_stem_length=_min_word_length - abs(_affix_length), + model=_context_to_tag, + backoff=backoff, + ) + + def context(self, tokens, index, history): + token = tokens[index] + if len(token) < self._min_word_length: + return None + elif self._affix_length > 0: + return token[: self._affix_length] + else: + return token[self._affix_length :] + + +@jsontags.register_tag +class RegexpTagger(SequentialBackoffTagger): + r""" + Regular Expression Tagger + + The RegexpTagger assigns tags to tokens by comparing their + word strings to a series of regular expressions. The following tagger + uses word suffixes to make guesses about the correct Brown Corpus part + of speech tag: + + >>> from nltk.corpus import brown + >>> from nltk.tag import RegexpTagger + >>> test_sent = brown.sents(categories='news')[0] + >>> regexp_tagger = RegexpTagger( + ... [(r'^-?[0-9]+(\.[0-9]+)?$', 'CD'), # cardinal numbers + ... (r'(The|the|A|a|An|an)$', 'AT'), # articles + ... (r'.*able$', 'JJ'), # adjectives + ... (r'.*ness$', 'NN'), # nouns formed from adjectives + ... (r'.*ly$', 'RB'), # adverbs + ... (r'.*s$', 'NNS'), # plural nouns + ... (r'.*ing$', 'VBG'), # gerunds + ... (r'.*ed$', 'VBD'), # past tense verbs + ... (r'.*', 'NN') # nouns (default) + ... ]) + >>> regexp_tagger + + >>> regexp_tagger.tag(test_sent) # doctest: +NORMALIZE_WHITESPACE + [('The', 'AT'), ('Fulton', 'NN'), ('County', 'NN'), ('Grand', 'NN'), ('Jury', 'NN'), + ('said', 'NN'), ('Friday', 'NN'), ('an', 'AT'), ('investigation', 'NN'), ('of', 'NN'), + ("Atlanta's", 'NNS'), ('recent', 'NN'), ('primary', 'NN'), ('election', 'NN'), + ('produced', 'VBD'), ('``', 'NN'), ('no', 'NN'), ('evidence', 'NN'), ("''", 'NN'), + ('that', 'NN'), ('any', 'NN'), ('irregularities', 'NNS'), ('took', 'NN'), + ('place', 'NN'), ('.', 'NN')] + + :type regexps: list(tuple(str, str)) + :param regexps: A list of ``(regexp, tag)`` pairs, each of + which indicates that a word matching ``regexp`` should + be tagged with ``tag``. The pairs will be evaluated in + order. If none of the regexps match a word, then the + optional backoff tagger is invoked, else it is + assigned the tag None. + """ + + json_tag = "nltk.tag.sequential.RegexpTagger" + + def __init__( + self, regexps: List[Tuple[str, str]], backoff: Optional[TaggerI] = None + ): + super().__init__(backoff) + self._regexps = [] + for regexp, tag in regexps: + try: + self._regexps.append((re.compile(regexp), tag)) + except Exception as e: + raise Exception( + f"Invalid RegexpTagger regexp: {e}\n- regexp: {regexp!r}\n- tag: {tag!r}" + ) from e + + def encode_json_obj(self): + return [(regexp.pattern, tag) for regexp, tag in self._regexps], self.backoff + + @classmethod + def decode_json_obj(cls, obj): + regexps, backoff = obj + return cls(regexps, backoff) + + def choose_tag(self, tokens, index, history): + for regexp, tag in self._regexps: + if re.match(regexp, tokens[index]): + return tag + return None + + def __repr__(self): + return f"" + + +class ClassifierBasedTagger(SequentialBackoffTagger, FeaturesetTaggerI): + """ + A sequential tagger that uses a classifier to choose the tag for + each token in a sentence. The featureset input for the classifier + is generated by a feature detector function:: + + feature_detector(tokens, index, history) -> featureset + + Where tokens is the list of unlabeled tokens in the sentence; + index is the index of the token for which feature detection + should be performed; and history is list of the tags for all + tokens before index. + + Construct a new classifier-based sequential tagger. + + :param feature_detector: A function used to generate the + featureset input for the classifier:: + feature_detector(tokens, index, history) -> featureset + + :param train: A tagged corpus consisting of a list of tagged + sentences, where each sentence is a list of (word, tag) tuples. + + :param backoff: A backoff tagger, to be used by the new tagger + if it encounters an unknown context. + + :param classifier_builder: A function used to train a new + classifier based on the data in *train*. It should take + one argument, a list of labeled featuresets (i.e., + (featureset, label) tuples). + + :param classifier: The classifier that should be used by the + tagger. This is only useful if you want to manually + construct the classifier; normally, you would use *train* + instead. + + :param backoff: A backoff tagger, used if this tagger is + unable to determine a tag for a given token. + + :param cutoff_prob: If specified, then this tagger will fall + back on its backoff tagger if the probability of the most + likely tag is less than *cutoff_prob*. + """ + + def __init__( + self, + feature_detector=None, + train=None, + classifier_builder=NaiveBayesClassifier.train, + classifier=None, + backoff=None, + cutoff_prob=None, + verbose=False, + ): + self._check_params(train, classifier) + + super().__init__(backoff) + + if (train and classifier) or (not train and not classifier): + raise ValueError( + "Must specify either training data or " "trained classifier." + ) + + if feature_detector is not None: + self._feature_detector = feature_detector + # The feature detector function, used to generate a featureset + # or each token: feature_detector(tokens, index, history) -> featureset + + self._cutoff_prob = cutoff_prob + """Cutoff probability for tagging -- if the probability of the + most likely tag is less than this, then use backoff.""" + + self._classifier = classifier + """The classifier used to choose a tag for each token.""" + + if train: + self._train(train, classifier_builder, verbose) + + def choose_tag(self, tokens, index, history): + # Use our feature detector to get the featureset. + featureset = self.feature_detector(tokens, index, history) + + # Use the classifier to pick a tag. If a cutoff probability + # was specified, then check that the tag's probability is + # higher than that cutoff first; otherwise, return None. + if self._cutoff_prob is None: + return self._classifier.classify(featureset) + + pdist = self._classifier.prob_classify(featureset) + tag = pdist.max() + return tag if pdist.prob(tag) >= self._cutoff_prob else None + + def _train(self, tagged_corpus, classifier_builder, verbose): + """ + Build a new classifier, based on the given training data + *tagged_corpus*. + """ + + classifier_corpus = [] + if verbose: + print("Constructing training corpus for classifier.") + + for sentence in tagged_corpus: + history = [] + untagged_sentence, tags = zip(*sentence) + for index in range(len(sentence)): + featureset = self.feature_detector(untagged_sentence, index, history) + classifier_corpus.append((featureset, tags[index])) + history.append(tags[index]) + + if verbose: + print(f"Training classifier ({len(classifier_corpus)} instances)") + self._classifier = classifier_builder(classifier_corpus) + + def __repr__(self): + return f"" + + def feature_detector(self, tokens, index, history): + """ + Return the feature detector that this tagger uses to generate + featuresets for its classifier. The feature detector is a + function with the signature:: + + feature_detector(tokens, index, history) -> featureset + + See ``classifier()`` + """ + return self._feature_detector(tokens, index, history) + + def classifier(self): + """ + Return the classifier that this tagger uses to choose a tag + for each word in a sentence. The input for this classifier is + generated using this tagger's feature detector. + See ``feature_detector()`` + """ + return self._classifier + + +class ClassifierBasedPOSTagger(ClassifierBasedTagger): + """ + A classifier based part of speech tagger. + """ + + def feature_detector(self, tokens, index, history): + word = tokens[index] + if index == 0: + prevword = prevprevword = None + prevtag = prevprevtag = None + elif index == 1: + prevword = tokens[index - 1].lower() + prevprevword = None + prevtag = history[index - 1] + prevprevtag = None + else: + prevword = tokens[index - 1].lower() + prevprevword = tokens[index - 2].lower() + prevtag = history[index - 1] + prevprevtag = history[index - 2] + + if re.match(r"[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$", word): + shape = "number" + elif re.match(r"\W+$", word): + shape = "punct" + elif re.match("[A-Z][a-z]+$", word): + shape = "upcase" + elif re.match("[a-z]+$", word): + shape = "downcase" + elif re.match(r"\w+$", word): + shape = "mixedcase" + else: + shape = "other" + + features = { + "prevtag": prevtag, + "prevprevtag": prevprevtag, + "word": word, + "word.lower": word.lower(), + "suffix3": word.lower()[-3:], + "suffix2": word.lower()[-2:], + "suffix1": word.lower()[-1:], + "prevprevword": prevprevword, + "prevword": prevword, + "prevtag+word": f"{prevtag}+{word.lower()}", + "prevprevtag+word": f"{prevprevtag}+{word.lower()}", + "prevword+word": f"{prevword}+{word.lower()}", + "shape": shape, + } + return features diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/stanford.py b/env-llmeval/lib/python3.10/site-packages/nltk/tag/stanford.py new file mode 100644 index 0000000000000000000000000000000000000000..7c21e2dd20dec5c3b242d0e5007a4bf51d8ef8f8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tag/stanford.py @@ -0,0 +1,236 @@ +# Natural Language Toolkit: Interface to the Stanford Part-of-speech and Named-Entity Taggers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Nitin Madnani +# Rami Al-Rfou' +# URL: +# For license information, see LICENSE.TXT + +""" +A module for interfacing with the Stanford taggers. + +Tagger models need to be downloaded from https://nlp.stanford.edu/software +and the STANFORD_MODELS environment variable set (a colon-separated +list of paths). + +For more details see the documentation for StanfordPOSTagger and StanfordNERTagger. +""" + +import os +import tempfile +import warnings +from abc import abstractmethod +from subprocess import PIPE + +from nltk.internals import _java_options, config_java, find_file, find_jar, java +from nltk.tag.api import TaggerI + +_stanford_url = "https://nlp.stanford.edu/software" + + +class StanfordTagger(TaggerI): + """ + An interface to Stanford taggers. Subclasses must define: + + - ``_cmd`` property: A property that returns the command that will be + executed. + - ``_SEPARATOR``: Class constant that represents that character that + is used to separate the tokens from their tags. + - ``_JAR`` file: Class constant that represents the jar file name. + """ + + _SEPARATOR = "" + _JAR = "" + + def __init__( + self, + model_filename, + path_to_jar=None, + encoding="utf8", + verbose=False, + java_options="-mx1000m", + ): + # Raise deprecation warning. + warnings.warn( + str( + "\nThe StanfordTokenizer will " + "be deprecated in version 3.2.6.\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead." + ), + DeprecationWarning, + stacklevel=2, + ) + + if not self._JAR: + warnings.warn( + "The StanfordTagger class is not meant to be " + "instantiated directly. Did you mean " + "StanfordPOSTagger or StanfordNERTagger?" + ) + self._stanford_jar = find_jar( + self._JAR, path_to_jar, searchpath=(), url=_stanford_url, verbose=verbose + ) + + self._stanford_model = find_file( + model_filename, env_vars=("STANFORD_MODELS",), verbose=verbose + ) + + self._encoding = encoding + self.java_options = java_options + + @property + @abstractmethod + def _cmd(self): + """ + A property that returns the command that will be executed. + """ + + def tag(self, tokens): + # This function should return list of tuple rather than list of list + return sum(self.tag_sents([tokens]), []) + + def tag_sents(self, sentences): + encoding = self._encoding + default_options = " ".join(_java_options) + config_java(options=self.java_options, verbose=False) + + # Create a temporary input file + _input_fh, self._input_file_path = tempfile.mkstemp(text=True) + + cmd = list(self._cmd) + cmd.extend(["-encoding", encoding]) + + # Write the actual sentences to the temporary input file + _input_fh = os.fdopen(_input_fh, "wb") + _input = "\n".join(" ".join(x) for x in sentences) + if isinstance(_input, str) and encoding: + _input = _input.encode(encoding) + _input_fh.write(_input) + _input_fh.close() + + # Run the tagger and get the output + stanpos_output, _stderr = java( + cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE + ) + stanpos_output = stanpos_output.decode(encoding) + + # Delete the temporary file + os.unlink(self._input_file_path) + + # Return java configurations to their default values + config_java(options=default_options, verbose=False) + + return self.parse_output(stanpos_output, sentences) + + def parse_output(self, text, sentences=None): + # Output the tagged sentences + tagged_sentences = [] + for tagged_sentence in text.strip().split("\n"): + sentence = [] + for tagged_word in tagged_sentence.strip().split(): + word_tags = tagged_word.strip().split(self._SEPARATOR) + sentence.append( + ("".join(word_tags[:-1]), word_tags[-1].replace("0", "").upper()) + ) + tagged_sentences.append(sentence) + return tagged_sentences + + +class StanfordPOSTagger(StanfordTagger): + """ + A class for pos tagging with Stanford Tagger. The input is the paths to: + - a model trained on training data + - (optionally) the path to the stanford tagger jar file. If not specified here, + then this jar file must be specified in the CLASSPATH environment variable. + - (optionally) the encoding of the training data (default: UTF-8) + + Example: + + >>> from nltk.tag import StanfordPOSTagger + >>> st = StanfordPOSTagger('english-bidirectional-distsim.tagger') # doctest: +SKIP + >>> st.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')] + """ + + _SEPARATOR = "_" + _JAR = "stanford-postagger.jar" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def _cmd(self): + return [ + "edu.stanford.nlp.tagger.maxent.MaxentTagger", + "-model", + self._stanford_model, + "-textFile", + self._input_file_path, + "-tokenize", + "false", + "-outputFormatOptions", + "keepEmptySentences", + ] + + +class StanfordNERTagger(StanfordTagger): + """ + A class for Named-Entity Tagging with Stanford Tagger. The input is the paths to: + + - a model trained on training data + - (optionally) the path to the stanford tagger jar file. If not specified here, + then this jar file must be specified in the CLASSPATH environment variable. + - (optionally) the encoding of the training data (default: UTF-8) + + Example: + + >>> from nltk.tag import StanfordNERTagger + >>> st = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz') # doctest: +SKIP + >>> st.tag('Rami Eid is studying at Stony Brook University in NY'.split()) # doctest: +SKIP + [('Rami', 'PERSON'), ('Eid', 'PERSON'), ('is', 'O'), ('studying', 'O'), + ('at', 'O'), ('Stony', 'ORGANIZATION'), ('Brook', 'ORGANIZATION'), + ('University', 'ORGANIZATION'), ('in', 'O'), ('NY', 'LOCATION')] + """ + + _SEPARATOR = "/" + _JAR = "stanford-ner.jar" + _FORMAT = "slashTags" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def _cmd(self): + # Adding -tokenizerFactory edu.stanford.nlp.process.WhitespaceTokenizer -tokenizerOptions tokenizeNLs=false for not using stanford Tokenizer + return [ + "edu.stanford.nlp.ie.crf.CRFClassifier", + "-loadClassifier", + self._stanford_model, + "-textFile", + self._input_file_path, + "-outputFormat", + self._FORMAT, + "-tokenizerFactory", + "edu.stanford.nlp.process.WhitespaceTokenizer", + "-tokenizerOptions", + '"tokenizeNLs=false"', + ] + + def parse_output(self, text, sentences): + if self._FORMAT == "slashTags": + # Joint together to a big list + tagged_sentences = [] + for tagged_sentence in text.strip().split("\n"): + for tagged_word in tagged_sentence.strip().split(): + word_tags = tagged_word.strip().split(self._SEPARATOR) + tagged_sentences.append(("".join(word_tags[:-1]), word_tags[-1])) + + # Separate it according to the input + result = [] + start = 0 + for sent in sentences: + result.append(tagged_sentences[start : start + len(sent)]) + start += len(sent) + return result + + raise NotImplementedError diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/tnt.py b/env-llmeval/lib/python3.10/site-packages/nltk/tag/tnt.py new file mode 100644 index 0000000000000000000000000000000000000000..a505104d812532af561ee3d3d9d80611f78db2cd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tag/tnt.py @@ -0,0 +1,579 @@ +# Natural Language Toolkit: TnT Tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Sam Huston +# +# URL: +# For license information, see LICENSE.TXT + +""" +Implementation of 'TnT - A Statisical Part of Speech Tagger' +by Thorsten Brants + +https://aclanthology.org/A00-1031.pdf +""" + +from math import log +from operator import itemgetter + +from nltk.probability import ConditionalFreqDist, FreqDist +from nltk.tag.api import TaggerI + + +class TnT(TaggerI): + """ + TnT - Statistical POS tagger + + IMPORTANT NOTES: + + * DOES NOT AUTOMATICALLY DEAL WITH UNSEEN WORDS + + - It is possible to provide an untrained POS tagger to + create tags for unknown words, see __init__ function + + * SHOULD BE USED WITH SENTENCE-DELIMITED INPUT + + - Due to the nature of this tagger, it works best when + trained over sentence delimited input. + - However it still produces good results if the training + data and testing data are separated on all punctuation eg: [,.?!] + - Input for training is expected to be a list of sentences + where each sentence is a list of (word, tag) tuples + - Input for tag function is a single sentence + Input for tagdata function is a list of sentences + Output is of a similar form + + * Function provided to process text that is unsegmented + + - Please see basic_sent_chop() + + + TnT uses a second order Markov model to produce tags for + a sequence of input, specifically: + + argmax [Proj(P(t_i|t_i-1,t_i-2)P(w_i|t_i))] P(t_T+1 | t_T) + + IE: the maximum projection of a set of probabilities + + The set of possible tags for a given word is derived + from the training data. It is the set of all tags + that exact word has been assigned. + + To speed up and get more precision, we can use log addition + to instead multiplication, specifically: + + argmax [Sigma(log(P(t_i|t_i-1,t_i-2))+log(P(w_i|t_i)))] + + log(P(t_T+1|t_T)) + + The probability of a tag for a given word is the linear + interpolation of 3 markov models; a zero-order, first-order, + and a second order model. + + P(t_i| t_i-1, t_i-2) = l1*P(t_i) + l2*P(t_i| t_i-1) + + l3*P(t_i| t_i-1, t_i-2) + + A beam search is used to limit the memory usage of the algorithm. + The degree of the beam can be changed using N in the initialization. + N represents the maximum number of possible solutions to maintain + while tagging. + + It is possible to differentiate the tags which are assigned to + capitalized words. However this does not result in a significant + gain in the accuracy of the results. + """ + + def __init__(self, unk=None, Trained=False, N=1000, C=False): + """ + Construct a TnT statistical tagger. Tagger must be trained + before being used to tag input. + + :param unk: instance of a POS tagger, conforms to TaggerI + :type unk: TaggerI + :param Trained: Indication that the POS tagger is trained or not + :type Trained: bool + :param N: Beam search degree (see above) + :type N: int + :param C: Capitalization flag + :type C: bool + + Initializer, creates frequency distributions to be used + for tagging + + _lx values represent the portion of the tri/bi/uni taggers + to be used to calculate the probability + + N value is the number of possible solutions to maintain + while tagging. A good value for this is 1000 + + C is a boolean value which specifies to use or + not use the Capitalization of the word as additional + information for tagging. + NOTE: using capitalization may not increase the accuracy + of the tagger + """ + + self._uni = FreqDist() + self._bi = ConditionalFreqDist() + self._tri = ConditionalFreqDist() + self._wd = ConditionalFreqDist() + self._eos = ConditionalFreqDist() + self._l1 = 0.0 + self._l2 = 0.0 + self._l3 = 0.0 + self._N = N + self._C = C + self._T = Trained + + self._unk = unk + + # statistical tools (ignore or delete me) + self.unknown = 0 + self.known = 0 + + def train(self, data): + """ + Uses a set of tagged data to train the tagger. + If an unknown word tagger is specified, + it is trained on the same data. + + :param data: List of lists of (word, tag) tuples + :type data: tuple(str) + """ + + # Ensure that local C flag is initialized before use + C = False + + if self._unk is not None and self._T == False: + self._unk.train(data) + + for sent in data: + history = [("BOS", False), ("BOS", False)] + for w, t in sent: + + # if capitalization is requested, + # and the word begins with a capital + # set local flag C to True + if self._C and w[0].isupper(): + C = True + + self._wd[w][t] += 1 + self._uni[(t, C)] += 1 + self._bi[history[1]][(t, C)] += 1 + self._tri[tuple(history)][(t, C)] += 1 + + history.append((t, C)) + history.pop(0) + + # set local flag C to false for the next word + C = False + + self._eos[t]["EOS"] += 1 + + # compute lambda values from the trained frequency distributions + self._compute_lambda() + + def _compute_lambda(self): + """ + creates lambda values based upon training data + + NOTE: no need to explicitly reference C, + it is contained within the tag variable :: tag == (tag,C) + + for each tag trigram (t1, t2, t3) + depending on the maximum value of + - f(t1,t2,t3)-1 / f(t1,t2)-1 + - f(t2,t3)-1 / f(t2)-1 + - f(t3)-1 / N-1 + + increment l3,l2, or l1 by f(t1,t2,t3) + + ISSUES -- Resolutions: + if 2 values are equal, increment both lambda values + by (f(t1,t2,t3) / 2) + """ + + # temporary lambda variables + tl1 = 0.0 + tl2 = 0.0 + tl3 = 0.0 + + # for each t1,t2 in system + for history in self._tri.conditions(): + (h1, h2) = history + + # for each t3 given t1,t2 in system + # (NOTE: tag actually represents (tag,C)) + # However no effect within this function + for tag in self._tri[history].keys(): + + # if there has only been 1 occurrence of this tag in the data + # then ignore this trigram. + if self._uni[tag] == 1: + continue + + # safe_div provides a safe floating point division + # it returns -1 if the denominator is 0 + c3 = self._safe_div( + (self._tri[history][tag] - 1), (self._tri[history].N() - 1) + ) + c2 = self._safe_div((self._bi[h2][tag] - 1), (self._bi[h2].N() - 1)) + c1 = self._safe_div((self._uni[tag] - 1), (self._uni.N() - 1)) + + # if c1 is the maximum value: + if (c1 > c3) and (c1 > c2): + tl1 += self._tri[history][tag] + + # if c2 is the maximum value + elif (c2 > c3) and (c2 > c1): + tl2 += self._tri[history][tag] + + # if c3 is the maximum value + elif (c3 > c2) and (c3 > c1): + tl3 += self._tri[history][tag] + + # if c3, and c2 are equal and larger than c1 + elif (c3 == c2) and (c3 > c1): + tl2 += self._tri[history][tag] / 2.0 + tl3 += self._tri[history][tag] / 2.0 + + # if c1, and c2 are equal and larger than c3 + # this might be a dumb thing to do....(not sure yet) + elif (c2 == c1) and (c1 > c3): + tl1 += self._tri[history][tag] / 2.0 + tl2 += self._tri[history][tag] / 2.0 + + # otherwise there might be a problem + # eg: all values = 0 + else: + pass + + # Lambda normalisation: + # ensures that l1+l2+l3 = 1 + self._l1 = tl1 / (tl1 + tl2 + tl3) + self._l2 = tl2 / (tl1 + tl2 + tl3) + self._l3 = tl3 / (tl1 + tl2 + tl3) + + def _safe_div(self, v1, v2): + """ + Safe floating point division function, does not allow division by 0 + returns -1 if the denominator is 0 + """ + if v2 == 0: + return -1 + else: + return v1 / v2 + + def tagdata(self, data): + """ + Tags each sentence in a list of sentences + + :param data:list of list of words + :type data: [[string,],] + :return: list of list of (word, tag) tuples + + Invokes tag(sent) function for each sentence + compiles the results into a list of tagged sentences + each tagged sentence is a list of (word, tag) tuples + """ + res = [] + for sent in data: + res1 = self.tag(sent) + res.append(res1) + return res + + def tag(self, data): + """ + Tags a single sentence + + :param data: list of words + :type data: [string,] + + :return: [(word, tag),] + + Calls recursive function '_tagword' + to produce a list of tags + + Associates the sequence of returned tags + with the correct words in the input sequence + + returns a list of (word, tag) tuples + """ + + current_state = [(["BOS", "BOS"], 0.0)] + + sent = list(data) + + tags = self._tagword(sent, current_state) + + res = [] + for i in range(len(sent)): + # unpack and discard the C flags + (t, C) = tags[i + 2] + res.append((sent[i], t)) + + return res + + def _tagword(self, sent, current_states): + """ + :param sent : List of words remaining in the sentence + :type sent : [word,] + :param current_states : List of possible tag combinations for + the sentence so far, and the log probability + associated with each tag combination + :type current_states : [([tag, ], logprob), ] + + Tags the first word in the sentence and + recursively tags the reminder of sentence + + Uses formula specified above to calculate the probability + of a particular tag + """ + + # if this word marks the end of the sentence, + # return the most probable tag + if sent == []: + (h, logp) = current_states[0] + return h + + # otherwise there are more words to be tagged + word = sent[0] + sent = sent[1:] + new_states = [] + + # if the Capitalisation is requested, + # initialise the flag for this word + C = False + if self._C and word[0].isupper(): + C = True + + # if word is known + # compute the set of possible tags + # and their associated log probabilities + if word in self._wd: + self.known += 1 + + for (history, curr_sent_logprob) in current_states: + logprobs = [] + + for t in self._wd[word].keys(): + tC = (t, C) + p_uni = self._uni.freq(tC) + p_bi = self._bi[history[-1]].freq(tC) + p_tri = self._tri[tuple(history[-2:])].freq(tC) + p_wd = self._wd[word][t] / self._uni[tC] + p = self._l1 * p_uni + self._l2 * p_bi + self._l3 * p_tri + p2 = log(p, 2) + log(p_wd, 2) + + # compute the result of appending each tag to this history + new_states.append((history + [tC], curr_sent_logprob + p2)) + + # otherwise a new word, set of possible tags is unknown + else: + self.unknown += 1 + + # since a set of possible tags, + # and the probability of each specific tag + # can not be returned from most classifiers: + # specify that any unknown words are tagged with certainty + p = 1 + + # if no unknown word tagger has been specified + # then use the tag 'Unk' + if self._unk is None: + tag = ("Unk", C) + + # otherwise apply the unknown word tagger + else: + [(_w, t)] = list(self._unk.tag([word])) + tag = (t, C) + + for (history, logprob) in current_states: + history.append(tag) + + new_states = current_states + + # now have computed a set of possible new_states + + # sort states by log prob + # set is now ordered greatest to least log probability + new_states.sort(reverse=True, key=itemgetter(1)) + + # del everything after N (threshold) + # this is the beam search cut + if len(new_states) > self._N: + new_states = new_states[: self._N] + + # compute the tags for the rest of the sentence + # return the best list of tags for the sentence + return self._tagword(sent, new_states) + + +######################################## +# helper function -- basic sentence tokenizer +######################################## + + +def basic_sent_chop(data, raw=True): + """ + Basic method for tokenizing input into sentences + for this tagger: + + :param data: list of tokens (words or (word, tag) tuples) + :type data: str or tuple(str, str) + :param raw: boolean flag marking the input data + as a list of words or a list of tagged words + :type raw: bool + :return: list of sentences + sentences are a list of tokens + tokens are the same as the input + + Function takes a list of tokens and separates the tokens into lists + where each list represents a sentence fragment + This function can separate both tagged and raw sequences into + basic sentences. + + Sentence markers are the set of [,.!?] + + This is a simple method which enhances the performance of the TnT + tagger. Better sentence tokenization will further enhance the results. + """ + + new_data = [] + curr_sent = [] + sent_mark = [",", ".", "?", "!"] + + if raw: + for word in data: + if word in sent_mark: + curr_sent.append(word) + new_data.append(curr_sent) + curr_sent = [] + else: + curr_sent.append(word) + + else: + for (word, tag) in data: + if word in sent_mark: + curr_sent.append((word, tag)) + new_data.append(curr_sent) + curr_sent = [] + else: + curr_sent.append((word, tag)) + return new_data + + +def demo(): + from nltk.corpus import brown + + sents = list(brown.tagged_sents()) + test = list(brown.sents()) + + tagger = TnT() + tagger.train(sents[200:1000]) + + tagged_data = tagger.tagdata(test[100:120]) + + for j in range(len(tagged_data)): + s = tagged_data[j] + t = sents[j + 100] + for i in range(len(s)): + print(s[i], "--", t[i]) + print() + + +def demo2(): + from nltk.corpus import treebank + + d = list(treebank.tagged_sents()) + + t = TnT(N=1000, C=False) + s = TnT(N=1000, C=True) + t.train(d[(11) * 100 :]) + s.train(d[(11) * 100 :]) + + for i in range(10): + tacc = t.accuracy(d[i * 100 : ((i + 1) * 100)]) + tp_un = t.unknown / (t.known + t.unknown) + tp_kn = t.known / (t.known + t.unknown) + t.unknown = 0 + t.known = 0 + + print("Capitalization off:") + print("Accuracy:", tacc) + print("Percentage known:", tp_kn) + print("Percentage unknown:", tp_un) + print("Accuracy over known words:", (tacc / tp_kn)) + + sacc = s.accuracy(d[i * 100 : ((i + 1) * 100)]) + sp_un = s.unknown / (s.known + s.unknown) + sp_kn = s.known / (s.known + s.unknown) + s.unknown = 0 + s.known = 0 + + print("Capitalization on:") + print("Accuracy:", sacc) + print("Percentage known:", sp_kn) + print("Percentage unknown:", sp_un) + print("Accuracy over known words:", (sacc / sp_kn)) + + +def demo3(): + from nltk.corpus import brown, treebank + + d = list(treebank.tagged_sents()) + e = list(brown.tagged_sents()) + + d = d[:1000] + e = e[:1000] + + d10 = int(len(d) * 0.1) + e10 = int(len(e) * 0.1) + + tknacc = 0 + sknacc = 0 + tallacc = 0 + sallacc = 0 + tknown = 0 + sknown = 0 + + for i in range(10): + + t = TnT(N=1000, C=False) + s = TnT(N=1000, C=False) + + dtest = d[(i * d10) : ((i + 1) * d10)] + etest = e[(i * e10) : ((i + 1) * e10)] + + dtrain = d[: (i * d10)] + d[((i + 1) * d10) :] + etrain = e[: (i * e10)] + e[((i + 1) * e10) :] + + t.train(dtrain) + s.train(etrain) + + tacc = t.accuracy(dtest) + tp_un = t.unknown / (t.known + t.unknown) + tp_kn = t.known / (t.known + t.unknown) + tknown += tp_kn + t.unknown = 0 + t.known = 0 + + sacc = s.accuracy(etest) + sp_un = s.unknown / (s.known + s.unknown) + sp_kn = s.known / (s.known + s.unknown) + sknown += sp_kn + s.unknown = 0 + s.known = 0 + + tknacc += tacc / tp_kn + sknacc += sacc / tp_kn + tallacc += tacc + sallacc += sacc + + # print(i+1, (tacc / tp_kn), i+1, (sacc / tp_kn), i+1, tacc, i+1, sacc) + + print("brown: acc over words known:", 10 * tknacc) + print(" : overall accuracy:", 10 * tallacc) + print(" : words known:", 10 * tknown) + print("treebank: acc over words known:", 10 * sknacc) + print(" : overall accuracy:", 10 * sallacc) + print(" : words known:", 10 * sknown) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tag/util.py b/env-llmeval/lib/python3.10/site-packages/nltk/tag/util.py new file mode 100644 index 0000000000000000000000000000000000000000..e35b98195f2b7b448775a49795e0f34d612624a6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tag/util.py @@ -0,0 +1,72 @@ +# Natural Language Toolkit: Tagger Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + + +def str2tuple(s, sep="/"): + """ + Given the string representation of a tagged token, return the + corresponding tuple representation. The rightmost occurrence of + *sep* in *s* will be used to divide *s* into a word string and + a tag string. If *sep* does not occur in *s*, return (s, None). + + >>> from nltk.tag.util import str2tuple + >>> str2tuple('fly/NN') + ('fly', 'NN') + + :type s: str + :param s: The string representation of a tagged token. + :type sep: str + :param sep: The separator string used to separate word strings + from tags. + """ + loc = s.rfind(sep) + if loc >= 0: + return (s[:loc], s[loc + len(sep) :].upper()) + else: + return (s, None) + + +def tuple2str(tagged_token, sep="/"): + """ + Given the tuple representation of a tagged token, return the + corresponding string representation. This representation is + formed by concatenating the token's word string, followed by the + separator, followed by the token's tag. (If the tag is None, + then just return the bare word string.) + + >>> from nltk.tag.util import tuple2str + >>> tagged_token = ('fly', 'NN') + >>> tuple2str(tagged_token) + 'fly/NN' + + :type tagged_token: tuple(str, str) + :param tagged_token: The tuple representation of a tagged token. + :type sep: str + :param sep: The separator string used to separate word strings + from tags. + """ + word, tag = tagged_token + if tag is None: + return word + else: + assert sep not in tag, "tag may not contain sep!" + return f"{word}{sep}{tag}" + + +def untag(tagged_sentence): + """ + Given a tagged sentence, return an untagged version of that + sentence. I.e., return a list containing the first element + of each tuple in *tagged_sentence*. + + >>> from nltk.tag.util import untag + >>> untag([('John', 'NNP'), ('saw', 'VBD'), ('Mary', 'NNP')]) + ['John', 'saw', 'Mary'] + + """ + return [w for (w, t) in tagged_sentence] diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfa4c111dc3d6058932f336510fcdb8f92a5d9d2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_preprocessing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_preprocessing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2f70b1824e9a7339f968a2d81ada037b6bc344d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_preprocessing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_vocabulary.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_vocabulary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77f0f74e3ce2045bfa64ab81324fbac379aea8bf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_vocabulary.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/test_counter.py b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/test_counter.py new file mode 100644 index 0000000000000000000000000000000000000000..f28b361cb76121f76d633d709aca6b5e32acb14d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/test_counter.py @@ -0,0 +1,116 @@ +# Natural Language Toolkit: Language Model Unit Tests +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT + +import unittest + +import pytest + +from nltk import FreqDist +from nltk.lm import NgramCounter +from nltk.util import everygrams + + +class TestNgramCounter: + """Tests for NgramCounter that only involve lookup, no modification.""" + + @classmethod + def setup_class(self): + text = [list("abcd"), list("egdbe")] + self.trigram_counter = NgramCounter( + everygrams(sent, max_len=3) for sent in text + ) + self.bigram_counter = NgramCounter(everygrams(sent, max_len=2) for sent in text) + self.case = unittest.TestCase() + + def test_N(self): + assert self.bigram_counter.N() == 16 + assert self.trigram_counter.N() == 21 + + def test_counter_len_changes_with_lookup(self): + assert len(self.bigram_counter) == 2 + self.bigram_counter[50] + assert len(self.bigram_counter) == 3 + + def test_ngram_order_access_unigrams(self): + assert self.bigram_counter[1] == self.bigram_counter.unigrams + + def test_ngram_conditional_freqdist(self): + case = unittest.TestCase() + expected_trigram_contexts = [ + ("a", "b"), + ("b", "c"), + ("e", "g"), + ("g", "d"), + ("d", "b"), + ] + expected_bigram_contexts = [("a",), ("b",), ("d",), ("e",), ("c",), ("g",)] + + bigrams = self.trigram_counter[2] + trigrams = self.trigram_counter[3] + + self.case.assertCountEqual(expected_bigram_contexts, bigrams.conditions()) + self.case.assertCountEqual(expected_trigram_contexts, trigrams.conditions()) + + def test_bigram_counts_seen_ngrams(self): + assert self.bigram_counter[["a"]]["b"] == 1 + assert self.bigram_counter[["b"]]["c"] == 1 + + def test_bigram_counts_unseen_ngrams(self): + assert self.bigram_counter[["b"]]["z"] == 0 + + def test_unigram_counts_seen_words(self): + assert self.bigram_counter["b"] == 2 + + def test_unigram_counts_completely_unseen_words(self): + assert self.bigram_counter["z"] == 0 + + +class TestNgramCounterTraining: + @classmethod + def setup_class(self): + self.counter = NgramCounter() + self.case = unittest.TestCase() + + @pytest.mark.parametrize("case", ["", [], None]) + def test_empty_inputs(self, case): + test = NgramCounter(case) + assert 2 not in test + assert test[1] == FreqDist() + + def test_train_on_unigrams(self): + words = list("abcd") + counter = NgramCounter([[(w,) for w in words]]) + + assert not counter[3] + assert not counter[2] + self.case.assertCountEqual(words, counter[1].keys()) + + def test_train_on_illegal_sentences(self): + str_sent = ["Check", "this", "out", "!"] + list_sent = [["Check", "this"], ["this", "out"], ["out", "!"]] + + with pytest.raises(TypeError): + NgramCounter([str_sent]) + + with pytest.raises(TypeError): + NgramCounter([list_sent]) + + def test_train_on_bigrams(self): + bigram_sent = [("a", "b"), ("c", "d")] + counter = NgramCounter([bigram_sent]) + assert not bool(counter[3]) + + def test_train_on_mix(self): + mixed_sent = [("a", "b"), ("c", "d"), ("e", "f", "g"), ("h",)] + counter = NgramCounter([mixed_sent]) + unigrams = ["h"] + bigram_contexts = [("a",), ("c",)] + trigram_contexts = [("e", "f")] + + self.case.assertCountEqual(unigrams, counter[1].keys()) + self.case.assertCountEqual(bigram_contexts, counter[2].keys()) + self.case.assertCountEqual(trigram_contexts, counter[3].keys()) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/__init__.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0059c1e19003bc946f699ca5895f9932ed4ec341 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__init__.py @@ -0,0 +1,32 @@ +# Natural Language Toolkit: Machine Translation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird , Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +Experimental features for machine translation. +These interfaces are prone to change. + +isort:skip_file +""" + +from nltk.translate.api import AlignedSent, Alignment, PhraseTable +from nltk.translate.ibm_model import IBMModel +from nltk.translate.ibm1 import IBMModel1 +from nltk.translate.ibm2 import IBMModel2 +from nltk.translate.ibm3 import IBMModel3 +from nltk.translate.ibm4 import IBMModel4 +from nltk.translate.ibm5 import IBMModel5 +from nltk.translate.bleu_score import sentence_bleu as bleu +from nltk.translate.ribes_score import sentence_ribes as ribes +from nltk.translate.meteor_score import meteor_score as meteor +from nltk.translate.metrics import alignment_error_rate +from nltk.translate.stack_decoder import StackDecoder +from nltk.translate.nist_score import sentence_nist as nist +from nltk.translate.chrf_score import sentence_chrf as chrf +from nltk.translate.gale_church import trace +from nltk.translate.gdfa import grow_diag_final_and +from nltk.translate.gleu_score import sentence_gleu as gleu +from nltk.translate.phrase_based import extract diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm3.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b80b8e42bd4619de4e0ac338edbf95f0a0545b2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm3.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/nist_score.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/nist_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8bae6ef20dafb21205a8659735a0578624043fd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/nist_score.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/phrase_based.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/phrase_based.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ce46ff4d9bf1f7dd73e5a35e99a55656a294aee Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/phrase_based.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/api.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/api.py new file mode 100644 index 0000000000000000000000000000000000000000..cf00f2b52f00cd7bf6df82d9b8d4557bb0592079 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/api.py @@ -0,0 +1,334 @@ +# Natural Language Toolkit: API for alignment and translation objects +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Will Zhang +# Guan Gui +# Steven Bird +# Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +import subprocess +from collections import namedtuple + + +class AlignedSent: + """ + Return an aligned sentence object, which encapsulates two sentences + along with an ``Alignment`` between them. + + Typically used in machine translation to represent a sentence and + its translation. + + >>> from nltk.translate import AlignedSent, Alignment + >>> algnsent = AlignedSent(['klein', 'ist', 'das', 'Haus'], + ... ['the', 'house', 'is', 'small'], Alignment.fromstring('0-3 1-2 2-0 3-1')) + >>> algnsent.words + ['klein', 'ist', 'das', 'Haus'] + >>> algnsent.mots + ['the', 'house', 'is', 'small'] + >>> algnsent.alignment + Alignment([(0, 3), (1, 2), (2, 0), (3, 1)]) + >>> from nltk.corpus import comtrans + >>> print(comtrans.aligned_sents()[54]) + 'So why should EU arm...'> + >>> print(comtrans.aligned_sents()[54].alignment) + 0-0 0-1 1-0 2-2 3-4 3-5 4-7 5-8 6-3 7-9 8-9 9-10 9-11 10-12 11-6 12-6 13-13 + + :param words: Words in the target language sentence + :type words: list(str) + :param mots: Words in the source language sentence + :type mots: list(str) + :param alignment: Word-level alignments between ``words`` and ``mots``. + Each alignment is represented as a 2-tuple (words_index, mots_index). + :type alignment: Alignment + """ + + def __init__(self, words, mots, alignment=None): + self._words = words + self._mots = mots + if alignment is None: + self.alignment = Alignment([]) + else: + assert type(alignment) is Alignment + self.alignment = alignment + + @property + def words(self): + return self._words + + @property + def mots(self): + return self._mots + + def _get_alignment(self): + return self._alignment + + def _set_alignment(self, alignment): + _check_alignment(len(self.words), len(self.mots), alignment) + self._alignment = alignment + + alignment = property(_get_alignment, _set_alignment) + + def __repr__(self): + """ + Return a string representation for this ``AlignedSent``. + + :rtype: str + """ + words = "[%s]" % (", ".join("'%s'" % w for w in self._words)) + mots = "[%s]" % (", ".join("'%s'" % w for w in self._mots)) + + return f"AlignedSent({words}, {mots}, {self._alignment!r})" + + def _to_dot(self): + """ + Dot representation of the aligned sentence + """ + s = "graph align {\n" + s += "node[shape=plaintext]\n" + + # Declare node + for w in self._words: + s += f'"{w}_source" [label="{w}"] \n' + + for w in self._mots: + s += f'"{w}_target" [label="{w}"] \n' + + # Alignment + for u, v in self._alignment: + s += f'"{self._words[u]}_source" -- "{self._mots[v]}_target" \n' + + # Connect the source words + for i in range(len(self._words) - 1): + s += '"{}_source" -- "{}_source" [style=invis]\n'.format( + self._words[i], + self._words[i + 1], + ) + + # Connect the target words + for i in range(len(self._mots) - 1): + s += '"{}_target" -- "{}_target" [style=invis]\n'.format( + self._mots[i], + self._mots[i + 1], + ) + + # Put it in the same rank + s += "{rank = same; %s}\n" % (" ".join('"%s_source"' % w for w in self._words)) + s += "{rank = same; %s}\n" % (" ".join('"%s_target"' % w for w in self._mots)) + + s += "}" + + return s + + def _repr_svg_(self): + """ + Ipython magic : show SVG representation of this ``AlignedSent``. + """ + dot_string = self._to_dot().encode("utf8") + output_format = "svg" + try: + process = subprocess.Popen( + ["dot", "-T%s" % output_format], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + except OSError as e: + raise Exception("Cannot find the dot binary from Graphviz package") from e + out, err = process.communicate(dot_string) + + return out.decode("utf8") + + def __str__(self): + """ + Return a human-readable string representation for this ``AlignedSent``. + + :rtype: str + """ + source = " ".join(self._words)[:20] + "..." + target = " ".join(self._mots)[:20] + "..." + return f" '{target}'>" + + def invert(self): + """ + Return the aligned sentence pair, reversing the directionality + + :rtype: AlignedSent + """ + return AlignedSent(self._mots, self._words, self._alignment.invert()) + + +class Alignment(frozenset): + """ + A storage class for representing alignment between two sequences, s1, s2. + In general, an alignment is a set of tuples of the form (i, j, ...) + representing an alignment between the i-th element of s1 and the + j-th element of s2. Tuples are extensible (they might contain + additional data, such as a boolean to indicate sure vs possible alignments). + + >>> from nltk.translate import Alignment + >>> a = Alignment([(0, 0), (0, 1), (1, 2), (2, 2)]) + >>> a.invert() + Alignment([(0, 0), (1, 0), (2, 1), (2, 2)]) + >>> print(a.invert()) + 0-0 1-0 2-1 2-2 + >>> a[0] + [(0, 1), (0, 0)] + >>> a.invert()[2] + [(2, 1), (2, 2)] + >>> b = Alignment([(0, 0), (0, 1)]) + >>> b.issubset(a) + True + >>> c = Alignment.fromstring('0-0 0-1') + >>> b == c + True + """ + + def __new__(cls, pairs): + self = frozenset.__new__(cls, pairs) + self._len = max(p[0] for p in self) if self != frozenset([]) else 0 + self._index = None + return self + + @classmethod + def fromstring(cls, s): + """ + Read a giza-formatted string and return an Alignment object. + + >>> Alignment.fromstring('0-0 2-1 9-2 21-3 10-4 7-5') + Alignment([(0, 0), (2, 1), (7, 5), (9, 2), (10, 4), (21, 3)]) + + :type s: str + :param s: the positional alignments in giza format + :rtype: Alignment + :return: An Alignment object corresponding to the string representation ``s``. + """ + + return Alignment([_giza2pair(a) for a in s.split()]) + + def __getitem__(self, key): + """ + Look up the alignments that map from a given index or slice. + """ + if not self._index: + self._build_index() + return self._index.__getitem__(key) + + def invert(self): + """ + Return an Alignment object, being the inverted mapping. + """ + return Alignment(((p[1], p[0]) + p[2:]) for p in self) + + def range(self, positions=None): + """ + Work out the range of the mapping from the given positions. + If no positions are specified, compute the range of the entire mapping. + """ + image = set() + if not self._index: + self._build_index() + if not positions: + positions = list(range(len(self._index))) + for p in positions: + image.update(f for _, f in self._index[p]) + return sorted(image) + + def __repr__(self): + """ + Produce a Giza-formatted string representing the alignment. + """ + return "Alignment(%r)" % sorted(self) + + def __str__(self): + """ + Produce a Giza-formatted string representing the alignment. + """ + return " ".join("%d-%d" % p[:2] for p in sorted(self)) + + def _build_index(self): + """ + Build a list self._index such that self._index[i] is a list + of the alignments originating from word i. + """ + self._index = [[] for _ in range(self._len + 1)] + for p in self: + self._index[p[0]].append(p) + + +def _giza2pair(pair_string): + i, j = pair_string.split("-") + return int(i), int(j) + + +def _naacl2pair(pair_string): + i, j, p = pair_string.split("-") + return int(i), int(j) + + +def _check_alignment(num_words, num_mots, alignment): + """ + Check whether the alignments are legal. + + :param num_words: the number of source language words + :type num_words: int + :param num_mots: the number of target language words + :type num_mots: int + :param alignment: alignment to be checked + :type alignment: Alignment + :raise IndexError: if alignment falls outside the sentence + """ + + assert type(alignment) is Alignment + + if not all(0 <= pair[0] < num_words for pair in alignment): + raise IndexError("Alignment is outside boundary of words") + if not all(pair[1] is None or 0 <= pair[1] < num_mots for pair in alignment): + raise IndexError("Alignment is outside boundary of mots") + + +PhraseTableEntry = namedtuple("PhraseTableEntry", ["trg_phrase", "log_prob"]) + + +class PhraseTable: + """ + In-memory store of translations for a given phrase, and the log + probability of the those translations + """ + + def __init__(self): + self.src_phrases = dict() + + def translations_for(self, src_phrase): + """ + Get the translations for a source language phrase + + :param src_phrase: Source language phrase of interest + :type src_phrase: tuple(str) + + :return: A list of target language phrases that are translations + of ``src_phrase``, ordered in decreasing order of + likelihood. Each list element is a tuple of the target + phrase and its log probability. + :rtype: list(PhraseTableEntry) + """ + return self.src_phrases[src_phrase] + + def add(self, src_phrase, trg_phrase, log_prob): + """ + :type src_phrase: tuple(str) + :type trg_phrase: tuple(str) + + :param log_prob: Log probability that given ``src_phrase``, + ``trg_phrase`` is its translation + :type log_prob: float + """ + entry = PhraseTableEntry(trg_phrase=trg_phrase, log_prob=log_prob) + if src_phrase not in self.src_phrases: + self.src_phrases[src_phrase] = [] + self.src_phrases[src_phrase].append(entry) + self.src_phrases[src_phrase].sort(key=lambda e: e.log_prob, reverse=True) + + def __contains__(self, src_phrase): + return src_phrase in self.src_phrases diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm1.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm1.py new file mode 100644 index 0000000000000000000000000000000000000000..badb896968633d0db99f9b8fb2a7679b65d9a534 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm1.py @@ -0,0 +1,251 @@ +# Natural Language Toolkit: IBM Model 1 +# +# Copyright (C) 2001-2013 NLTK Project +# Author: Chin Yee Lee +# Hengfeng Li +# Ruxin Hou +# Calvin Tanujaya Lim +# Based on earlier version by: +# Will Zhang +# Guan Gui +# URL: +# For license information, see LICENSE.TXT + +""" +Lexical translation model that ignores word order. + +In IBM Model 1, word order is ignored for simplicity. As long as the +word alignments are equivalent, it doesn't matter where the word occurs +in the source or target sentence. Thus, the following three alignments +are equally likely:: + + Source: je mange du jambon + Target: i eat some ham + Alignment: (0,0) (1,1) (2,2) (3,3) + + Source: je mange du jambon + Target: some ham eat i + Alignment: (0,2) (1,3) (2,1) (3,1) + + Source: du jambon je mange + Target: eat i some ham + Alignment: (0,3) (1,2) (2,0) (3,1) + +Note that an alignment is represented here as +(word_index_in_target, word_index_in_source). + +The EM algorithm used in Model 1 is: + +:E step: In the training data, count how many times a source language + word is translated into a target language word, weighted by + the prior probability of the translation. + +:M step: Estimate the new probability of translation based on the + counts from the Expectation step. + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:s: A word in the source language +:t: A word in the target language + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict + +from nltk.translate import AlignedSent, Alignment, IBMModel +from nltk.translate.ibm_model import Counts + + +class IBMModel1(IBMModel): + """ + Lexical translation model that ignores word order + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'ist', 'ja', 'groß'], ['the', 'house', 'is', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + + >>> ibm1 = IBMModel1(bitext, 5) + + >>> print(round(ibm1.translation_table['buch']['book'], 3)) + 0.889 + >>> print(round(ibm1.translation_table['das']['book'], 3)) + 0.062 + >>> print(round(ibm1.translation_table['buch'][None], 3)) + 0.113 + >>> print(round(ibm1.translation_table['ja'][None], 3)) + 0.073 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, 2), (4, 3)]) + + """ + + def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, the following entry must be present: + ``translation_table``. + See ``IBMModel`` for the type and purpose of this table. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + + if probability_tables is None: + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + self.align_all(sentence_aligned_corpus) + + def set_uniform_probabilities(self, sentence_aligned_corpus): + initial_prob = 1 / len(self.trg_vocab) + if initial_prob < IBMModel.MIN_PROB: + warnings.warn( + "Target language vocabulary is too large (" + + str(len(self.trg_vocab)) + + " words). " + "Results may be less accurate." + ) + + for t in self.trg_vocab: + self.translation_table[t] = defaultdict(lambda: initial_prob) + + def train(self, parallel_corpus): + counts = Counts() + for aligned_sentence in parallel_corpus: + trg_sentence = aligned_sentence.words + src_sentence = [None] + aligned_sentence.mots + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_all_alignments(src_sentence, trg_sentence) + + # E step (b): Collect counts + for t in trg_sentence: + for s in src_sentence: + count = self.prob_alignment_point(s, t) + normalized_count = count / total_count[t] + counts.t_given_s[t][s] += normalized_count + counts.any_t_given_s[s] += normalized_count + + # M step: Update probabilities with maximum likelihood estimate + self.maximize_lexical_translation_probabilities(counts) + + def prob_all_alignments(self, src_sentence, trg_sentence): + """ + Computes the probability of all possible word alignments, + expressed as a marginal distribution over target words t + + Each entry in the return value represents the contribution to + the total alignment probability by the target word t. + + To obtain probability(alignment | src_sentence, trg_sentence), + simply sum the entries in the return value. + + :return: Probability of t for all s in ``src_sentence`` + :rtype: dict(str): float + """ + alignment_prob_for_t = defaultdict(lambda: 0.0) + for t in trg_sentence: + for s in src_sentence: + alignment_prob_for_t[t] += self.prob_alignment_point(s, t) + return alignment_prob_for_t + + def prob_alignment_point(self, s, t): + """ + Probability that word ``t`` in the target sentence is aligned to + word ``s`` in the source sentence + """ + return self.translation_table[t][s] + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + prob = 1.0 + + for j, i in enumerate(alignment_info.alignment): + if j == 0: + continue # skip the dummy zeroeth element + trg_word = alignment_info.trg_sentence[j] + src_word = alignment_info.src_sentence[i] + prob *= self.translation_table[trg_word][src_word] + + return max(prob, IBMModel.MIN_PROB) + + def align_all(self, parallel_corpus): + for sentence_pair in parallel_corpus: + self.align(sentence_pair) + + def align(self, sentence_pair): + """ + Determines the best word alignment for one sentence pair from + the corpus that the model was trained on. + + The best alignment will be set in ``sentence_pair`` when the + method returns. In contrast with the internal implementation of + IBM models, the word indices in the ``Alignment`` are zero- + indexed, not one-indexed. + + :param sentence_pair: A sentence in the source language and its + counterpart sentence in the target language + :type sentence_pair: AlignedSent + """ + best_alignment = [] + + for j, trg_word in enumerate(sentence_pair.words): + # Initialize trg_word to align with the NULL token + best_prob = max(self.translation_table[trg_word][None], IBMModel.MIN_PROB) + best_alignment_point = None + for i, src_word in enumerate(sentence_pair.mots): + align_prob = self.translation_table[trg_word][src_word] + if align_prob >= best_prob: # prefer newer word in case of tie + best_prob = align_prob + best_alignment_point = i + + best_alignment.append((j, best_alignment_point)) + + sentence_pair.alignment = Alignment(best_alignment) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm3.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm3.py new file mode 100644 index 0000000000000000000000000000000000000000..f295dee0b563bbcb9a5b9557c8d1602942a75bc3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm3.py @@ -0,0 +1,346 @@ +# Natural Language Toolkit: IBM Model 3 +# +# Copyright (C) 2001-2013 NLTK Project +# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim +# URL: +# For license information, see LICENSE.TXT + +""" +Translation model that considers how a word can be aligned to +multiple words in another language. + +IBM Model 3 improves on Model 2 by directly modeling the phenomenon +where a word in one language may be translated into zero or more words +in another. This is expressed by the fertility probability, +n(phi | source word). + +If a source word translates into more than one word, it is possible to +generate sentences that have the same alignment in multiple ways. This +is modeled by a distortion step. The distortion probability, d(j|i,l,m), +predicts a target word position, given its aligned source word's +position. The distortion probability replaces the alignment probability +of Model 2. + +The fertility probability is not applicable for NULL. Target words that +align to NULL are assumed to be distributed uniformly in the target +sentence. The existence of these words is modeled by p1, the probability +that a target word produced by a real source word requires another +target word that is produced by NULL. + +The EM algorithm used in Model 3 is: + +:E step: In the training data, collect counts, weighted by prior + probabilities. + + - (a) count how many times a source language word is translated + into a target language word + - (b) count how many times a particular position in the target + sentence is aligned to a particular position in the source + sentence + - (c) count how many times a source word is aligned to phi number + of target words + - (d) count how many times NULL is aligned to a target word + +:M step: Estimate new probabilities based on the counts from the E step + +Because there are too many possible alignments, only the most probable +ones are considered. First, the best alignment is determined using prior +probabilities. Then, a hill climbing approach is used to find other good +candidates. + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:l: Number of words in the source sentence, excluding NULL +:m: Number of words in the target sentence +:s: A word in the source language +:t: A word in the target language +:phi: Fertility, the number of target words produced by a source word +:p1: Probability that a target word produced by a source word is + accompanied by another target word that is aligned to NULL +:p0: 1 - p1 + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict +from math import factorial + +from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel2 +from nltk.translate.ibm_model import Counts + + +class IBMModel3(IBMModel): + """ + Translation model that considers how a word can be aligned to + multiple words in another language + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'war', 'ja', 'groß'], ['the', 'house', 'was', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['ein', 'haus', 'ist', 'klein'], ['a', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + >>> bitext.append(AlignedSent(['ich', 'fasse', 'das', 'buch', 'zusammen'], ['i', 'summarize', 'the', 'book'])) + >>> bitext.append(AlignedSent(['fasse', 'zusammen'], ['summarize'])) + + >>> ibm3 = IBMModel3(bitext, 5) + + >>> print(round(ibm3.translation_table['buch']['book'], 3)) + 1.0 + >>> print(round(ibm3.translation_table['das']['book'], 3)) + 0.0 + >>> print(round(ibm3.translation_table['ja'][None], 3)) + 1.0 + + >>> print(round(ibm3.distortion_table[1][1][2][2], 3)) + 1.0 + >>> print(round(ibm3.distortion_table[1][2][2][2], 3)) + 0.0 + >>> print(round(ibm3.distortion_table[2][2][4][5], 3)) + 0.75 + + >>> print(round(ibm3.fertility_table[2]['summarize'], 3)) + 1.0 + >>> print(round(ibm3.fertility_table[1]['book'], 3)) + 1.0 + + >>> print(round(ibm3.p1, 3)) + 0.054 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, None), (4, 3)]) + + """ + + def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model, a distortion model, a fertility model, and a + model for generating NULL-aligned words. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, all the following entries must be present: + ``translation_table``, ``alignment_table``, + ``fertility_table``, ``p1``, ``distortion_table``. + See ``IBMModel`` for the type and purpose of these tables. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + self.reset_probabilities() + + if probability_tables is None: + # Get translation and alignment probabilities from IBM Model 2 + ibm2 = IBMModel2(sentence_aligned_corpus, iterations) + self.translation_table = ibm2.translation_table + self.alignment_table = ibm2.alignment_table + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + self.alignment_table = probability_tables["alignment_table"] + self.fertility_table = probability_tables["fertility_table"] + self.p1 = probability_tables["p1"] + self.distortion_table = probability_tables["distortion_table"] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + def reset_probabilities(self): + super().reset_probabilities() + self.distortion_table = defaultdict( + lambda: defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + ) + ) + """ + dict[int][int][int][int]: float. Probability(j | i,l,m). + Values accessed as ``distortion_table[j][i][l][m]``. + """ + + def set_uniform_probabilities(self, sentence_aligned_corpus): + # d(j | i,l,m) = 1 / m for all i, j, l, m + l_m_combinations = set() + for aligned_sentence in sentence_aligned_corpus: + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + if (l, m) not in l_m_combinations: + l_m_combinations.add((l, m)) + initial_prob = 1 / m + if initial_prob < IBMModel.MIN_PROB: + warnings.warn( + "A target sentence is too long (" + + str(m) + + " words). Results may be less accurate." + ) + for j in range(1, m + 1): + for i in range(0, l + 1): + self.distortion_table[j][i][l][m] = initial_prob + + # simple initialization, taken from GIZA++ + self.fertility_table[0] = defaultdict(lambda: 0.2) + self.fertility_table[1] = defaultdict(lambda: 0.65) + self.fertility_table[2] = defaultdict(lambda: 0.1) + self.fertility_table[3] = defaultdict(lambda: 0.04) + MAX_FERTILITY = 10 + initial_fert_prob = 0.01 / (MAX_FERTILITY - 4) + for phi in range(4, MAX_FERTILITY): + self.fertility_table[phi] = defaultdict(lambda: initial_fert_prob) + + self.p1 = 0.5 + + def train(self, parallel_corpus): + counts = Model3Counts() + for aligned_sentence in parallel_corpus: + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + + # Sample the alignment space + sampled_alignments, best_alignment = self.sample(aligned_sentence) + # Record the most probable alignment + aligned_sentence.alignment = Alignment( + best_alignment.zero_indexed_alignment() + ) + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_of_alignments(sampled_alignments) + + # E step (b): Collect counts + for alignment_info in sampled_alignments: + count = self.prob_t_a_given_s(alignment_info) + normalized_count = count / total_count + + for j in range(1, m + 1): + counts.update_lexical_translation( + normalized_count, alignment_info, j + ) + counts.update_distortion(normalized_count, alignment_info, j, l, m) + + counts.update_null_generation(normalized_count, alignment_info) + counts.update_fertility(normalized_count, alignment_info) + + # M step: Update probabilities with maximum likelihood estimates + # If any probability is less than MIN_PROB, clamp it to MIN_PROB + existing_alignment_table = self.alignment_table + self.reset_probabilities() + self.alignment_table = existing_alignment_table # don't retrain + + self.maximize_lexical_translation_probabilities(counts) + self.maximize_distortion_probabilities(counts) + self.maximize_fertility_probabilities(counts) + self.maximize_null_generation_probabilities(counts) + + def maximize_distortion_probabilities(self, counts): + MIN_PROB = IBMModel.MIN_PROB + for j, i_s in counts.distortion.items(): + for i, src_sentence_lengths in i_s.items(): + for l, trg_sentence_lengths in src_sentence_lengths.items(): + for m in trg_sentence_lengths: + estimate = ( + counts.distortion[j][i][l][m] + / counts.distortion_for_any_j[i][l][m] + ) + self.distortion_table[j][i][l][m] = max(estimate, MIN_PROB) + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + src_sentence = alignment_info.src_sentence + trg_sentence = alignment_info.trg_sentence + l = len(src_sentence) - 1 # exclude NULL + m = len(trg_sentence) - 1 + p1 = self.p1 + p0 = 1 - p1 + + probability = 1.0 + MIN_PROB = IBMModel.MIN_PROB + + # Combine NULL insertion probability + null_fertility = alignment_info.fertility_of_i(0) + probability *= pow(p1, null_fertility) * pow(p0, m - 2 * null_fertility) + if probability < MIN_PROB: + return MIN_PROB + + # Compute combination (m - null_fertility) choose null_fertility + for i in range(1, null_fertility + 1): + probability *= (m - null_fertility - i + 1) / i + if probability < MIN_PROB: + return MIN_PROB + + # Combine fertility probabilities + for i in range(1, l + 1): + fertility = alignment_info.fertility_of_i(i) + probability *= ( + factorial(fertility) * self.fertility_table[fertility][src_sentence[i]] + ) + if probability < MIN_PROB: + return MIN_PROB + + # Combine lexical and distortion probabilities + for j in range(1, m + 1): + t = trg_sentence[j] + i = alignment_info.alignment[j] + s = src_sentence[i] + + probability *= ( + self.translation_table[t][s] * self.distortion_table[j][i][l][m] + ) + if probability < MIN_PROB: + return MIN_PROB + + return probability + + +class Model3Counts(Counts): + """ + Data object to store counts of various parameters during training. + Includes counts for distortion. + """ + + def __init__(self): + super().__init__() + self.distortion = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.0))) + ) + self.distortion_for_any_j = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + + def update_distortion(self, count, alignment_info, j, l, m): + i = alignment_info.alignment[j] + self.distortion[j][i][l][m] += count + self.distortion_for_any_j[i][l][m] += count diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm4.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm4.py new file mode 100644 index 0000000000000000000000000000000000000000..c7686939ac5027d6e16147cc82611cd4519ea51e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm4.py @@ -0,0 +1,490 @@ +# Natural Language Toolkit: IBM Model 4 +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +Translation model that reorders output words based on their type and +distance from other related words in the output sentence. + +IBM Model 4 improves the distortion model of Model 3, motivated by the +observation that certain words tend to be re-ordered in a predictable +way relative to one another. For example, in English +usually has its order flipped as in French. + +Model 4 requires words in the source and target vocabularies to be +categorized into classes. This can be linguistically driven, like parts +of speech (adjective, nouns, prepositions, etc). Word classes can also +be obtained by statistical methods. The original IBM Model 4 uses an +information theoretic approach to group words into 50 classes for each +vocabulary. + +Terminology +----------- + +:Cept: + A source word with non-zero fertility i.e. aligned to one or more + target words. +:Tablet: + The set of target word(s) aligned to a cept. +:Head of cept: + The first word of the tablet of that cept. +:Center of cept: + The average position of the words in that cept's tablet. If the + value is not an integer, the ceiling is taken. + For example, for a tablet with words in positions 2, 5, 6 in the + target sentence, the center of the corresponding cept is + ceil((2 + 5 + 6) / 3) = 5 +:Displacement: + For a head word, defined as (position of head word - position of + previous cept's center). Can be positive or negative. + For a non-head word, defined as (position of non-head word - + position of previous word in the same tablet). Always positive, + because successive words in a tablet are assumed to appear to the + right of the previous word. + +In contrast to Model 3 which reorders words in a tablet independently of +other words, Model 4 distinguishes between three cases. + +1. Words generated by NULL are distributed uniformly. +2. For a head word t, its position is modeled by the probability + d_head(displacement | word_class_s(s),word_class_t(t)), + where s is the previous cept, and word_class_s and word_class_t maps + s and t to a source and target language word class respectively. +3. For a non-head word t, its position is modeled by the probability + d_non_head(displacement | word_class_t(t)) + +The EM algorithm used in Model 4 is: + +:E step: In the training data, collect counts, weighted by prior + probabilities. + + - (a) count how many times a source language word is translated + into a target language word + - (b) for a particular word class, count how many times a head + word is located at a particular displacement from the + previous cept's center + - (c) for a particular word class, count how many times a + non-head word is located at a particular displacement from + the previous target word + - (d) count how many times a source word is aligned to phi number + of target words + - (e) count how many times NULL is aligned to a target word + +:M step: Estimate new probabilities based on the counts from the E step + +Like Model 3, there are too many possible alignments to consider. Thus, +a hill climbing approach is used to sample good candidates. + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:l: Number of words in the source sentence, excluding NULL +:m: Number of words in the target sentence +:s: A word in the source language +:t: A word in the target language +:phi: Fertility, the number of target words produced by a source word +:p1: Probability that a target word produced by a source word is + accompanied by another target word that is aligned to NULL +:p0: 1 - p1 +:dj: Displacement, Δj + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict +from math import factorial + +from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel3 +from nltk.translate.ibm_model import Counts, longest_target_sentence_length + + +class IBMModel4(IBMModel): + """ + Translation model that reorders output words based on their type and + their distance from other related words in the output sentence + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'war', 'ja', 'groß'], ['the', 'house', 'was', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['ein', 'haus', 'ist', 'klein'], ['a', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + >>> bitext.append(AlignedSent(['ich', 'fasse', 'das', 'buch', 'zusammen'], ['i', 'summarize', 'the', 'book'])) + >>> bitext.append(AlignedSent(['fasse', 'zusammen'], ['summarize'])) + >>> src_classes = {'the': 0, 'a': 0, 'small': 1, 'big': 1, 'house': 2, 'book': 2, 'is': 3, 'was': 3, 'i': 4, 'summarize': 5 } + >>> trg_classes = {'das': 0, 'ein': 0, 'haus': 1, 'buch': 1, 'klein': 2, 'groß': 2, 'ist': 3, 'war': 3, 'ja': 4, 'ich': 5, 'fasse': 6, 'zusammen': 6 } + + >>> ibm4 = IBMModel4(bitext, 5, src_classes, trg_classes) + + >>> print(round(ibm4.translation_table['buch']['book'], 3)) + 1.0 + >>> print(round(ibm4.translation_table['das']['book'], 3)) + 0.0 + >>> print(round(ibm4.translation_table['ja'][None], 3)) + 1.0 + + >>> print(round(ibm4.head_distortion_table[1][0][1], 3)) + 1.0 + >>> print(round(ibm4.head_distortion_table[2][0][1], 3)) + 0.0 + >>> print(round(ibm4.non_head_distortion_table[3][6], 3)) + 0.5 + + >>> print(round(ibm4.fertility_table[2]['summarize'], 3)) + 1.0 + >>> print(round(ibm4.fertility_table[1]['book'], 3)) + 1.0 + + >>> print(round(ibm4.p1, 3)) + 0.033 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, None), (4, 3)]) + + """ + + def __init__( + self, + sentence_aligned_corpus, + iterations, + source_word_classes, + target_word_classes, + probability_tables=None, + ): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model, distortion models, a fertility model, and a + model for generating NULL-aligned words. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param source_word_classes: Lookup table that maps a source word + to its word class, the latter represented by an integer id + :type source_word_classes: dict[str]: int + + :param target_word_classes: Lookup table that maps a target word + to its word class, the latter represented by an integer id + :type target_word_classes: dict[str]: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, all the following entries must be present: + ``translation_table``, ``alignment_table``, + ``fertility_table``, ``p1``, ``head_distortion_table``, + ``non_head_distortion_table``. See ``IBMModel`` and + ``IBMModel4`` for the type and purpose of these tables. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + self.reset_probabilities() + self.src_classes = source_word_classes + self.trg_classes = target_word_classes + + if probability_tables is None: + # Get probabilities from IBM model 3 + ibm3 = IBMModel3(sentence_aligned_corpus, iterations) + self.translation_table = ibm3.translation_table + self.alignment_table = ibm3.alignment_table + self.fertility_table = ibm3.fertility_table + self.p1 = ibm3.p1 + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + self.alignment_table = probability_tables["alignment_table"] + self.fertility_table = probability_tables["fertility_table"] + self.p1 = probability_tables["p1"] + self.head_distortion_table = probability_tables["head_distortion_table"] + self.non_head_distortion_table = probability_tables[ + "non_head_distortion_table" + ] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + def reset_probabilities(self): + super().reset_probabilities() + self.head_distortion_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + ) + """ + dict[int][int][int]: float. Probability(displacement of head + word | word class of previous cept,target word class). + Values accessed as ``distortion_table[dj][src_class][trg_class]``. + """ + + self.non_head_distortion_table = defaultdict( + lambda: defaultdict(lambda: self.MIN_PROB) + ) + """ + dict[int][int]: float. Probability(displacement of non-head + word | target word class). + Values accessed as ``distortion_table[dj][trg_class]``. + """ + + def set_uniform_probabilities(self, sentence_aligned_corpus): + """ + Set distortion probabilities uniformly to + 1 / cardinality of displacement values + """ + max_m = longest_target_sentence_length(sentence_aligned_corpus) + + # The maximum displacement is m-1, when a word is in the last + # position m of the target sentence and the previously placed + # word is in the first position. + # Conversely, the minimum displacement is -(m-1). + # Thus, the displacement range is (m-1) - (-(m-1)). Note that + # displacement cannot be zero and is not included in the range. + if max_m <= 1: + initial_prob = IBMModel.MIN_PROB + else: + initial_prob = 1 / (2 * (max_m - 1)) + if initial_prob < IBMModel.MIN_PROB: + warnings.warn( + "A target sentence is too long (" + + str(max_m) + + " words). Results may be less accurate." + ) + + for dj in range(1, max_m): + self.head_distortion_table[dj] = defaultdict( + lambda: defaultdict(lambda: initial_prob) + ) + self.head_distortion_table[-dj] = defaultdict( + lambda: defaultdict(lambda: initial_prob) + ) + self.non_head_distortion_table[dj] = defaultdict(lambda: initial_prob) + self.non_head_distortion_table[-dj] = defaultdict(lambda: initial_prob) + + def train(self, parallel_corpus): + counts = Model4Counts() + for aligned_sentence in parallel_corpus: + m = len(aligned_sentence.words) + + # Sample the alignment space + sampled_alignments, best_alignment = self.sample(aligned_sentence) + # Record the most probable alignment + aligned_sentence.alignment = Alignment( + best_alignment.zero_indexed_alignment() + ) + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_of_alignments(sampled_alignments) + + # E step (b): Collect counts + for alignment_info in sampled_alignments: + count = self.prob_t_a_given_s(alignment_info) + normalized_count = count / total_count + + for j in range(1, m + 1): + counts.update_lexical_translation( + normalized_count, alignment_info, j + ) + counts.update_distortion( + normalized_count, + alignment_info, + j, + self.src_classes, + self.trg_classes, + ) + + counts.update_null_generation(normalized_count, alignment_info) + counts.update_fertility(normalized_count, alignment_info) + + # M step: Update probabilities with maximum likelihood estimates + # If any probability is less than MIN_PROB, clamp it to MIN_PROB + existing_alignment_table = self.alignment_table + self.reset_probabilities() + self.alignment_table = existing_alignment_table # don't retrain + + self.maximize_lexical_translation_probabilities(counts) + self.maximize_distortion_probabilities(counts) + self.maximize_fertility_probabilities(counts) + self.maximize_null_generation_probabilities(counts) + + def maximize_distortion_probabilities(self, counts): + head_d_table = self.head_distortion_table + for dj, src_classes in counts.head_distortion.items(): + for s_cls, trg_classes in src_classes.items(): + for t_cls in trg_classes: + estimate = ( + counts.head_distortion[dj][s_cls][t_cls] + / counts.head_distortion_for_any_dj[s_cls][t_cls] + ) + head_d_table[dj][s_cls][t_cls] = max(estimate, IBMModel.MIN_PROB) + + non_head_d_table = self.non_head_distortion_table + for dj, trg_classes in counts.non_head_distortion.items(): + for t_cls in trg_classes: + estimate = ( + counts.non_head_distortion[dj][t_cls] + / counts.non_head_distortion_for_any_dj[t_cls] + ) + non_head_d_table[dj][t_cls] = max(estimate, IBMModel.MIN_PROB) + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + return IBMModel4.model4_prob_t_a_given_s(alignment_info, self) + + @staticmethod # exposed for Model 5 to use + def model4_prob_t_a_given_s(alignment_info, ibm_model): + probability = 1.0 + MIN_PROB = IBMModel.MIN_PROB + + def null_generation_term(): + # Binomial distribution: B(m - null_fertility, p1) + value = 1.0 + p1 = ibm_model.p1 + p0 = 1 - p1 + null_fertility = alignment_info.fertility_of_i(0) + m = len(alignment_info.trg_sentence) - 1 + value *= pow(p1, null_fertility) * pow(p0, m - 2 * null_fertility) + if value < MIN_PROB: + return MIN_PROB + + # Combination: (m - null_fertility) choose null_fertility + for i in range(1, null_fertility + 1): + value *= (m - null_fertility - i + 1) / i + return value + + def fertility_term(): + value = 1.0 + src_sentence = alignment_info.src_sentence + for i in range(1, len(src_sentence)): + fertility = alignment_info.fertility_of_i(i) + value *= ( + factorial(fertility) + * ibm_model.fertility_table[fertility][src_sentence[i]] + ) + if value < MIN_PROB: + return MIN_PROB + return value + + def lexical_translation_term(j): + t = alignment_info.trg_sentence[j] + i = alignment_info.alignment[j] + s = alignment_info.src_sentence[i] + return ibm_model.translation_table[t][s] + + def distortion_term(j): + t = alignment_info.trg_sentence[j] + i = alignment_info.alignment[j] + if i == 0: + # case 1: t is aligned to NULL + return 1.0 + if alignment_info.is_head_word(j): + # case 2: t is the first word of a tablet + previous_cept = alignment_info.previous_cept(j) + src_class = None + if previous_cept is not None: + previous_s = alignment_info.src_sentence[previous_cept] + src_class = ibm_model.src_classes[previous_s] + trg_class = ibm_model.trg_classes[t] + dj = j - alignment_info.center_of_cept(previous_cept) + return ibm_model.head_distortion_table[dj][src_class][trg_class] + + # case 3: t is a subsequent word of a tablet + previous_position = alignment_info.previous_in_tablet(j) + trg_class = ibm_model.trg_classes[t] + dj = j - previous_position + return ibm_model.non_head_distortion_table[dj][trg_class] + + # end nested functions + + # Abort computation whenever probability falls below MIN_PROB at + # any point, since MIN_PROB can be considered as zero + probability *= null_generation_term() + if probability < MIN_PROB: + return MIN_PROB + + probability *= fertility_term() + if probability < MIN_PROB: + return MIN_PROB + + for j in range(1, len(alignment_info.trg_sentence)): + probability *= lexical_translation_term(j) + if probability < MIN_PROB: + return MIN_PROB + + probability *= distortion_term(j) + if probability < MIN_PROB: + return MIN_PROB + + return probability + + +class Model4Counts(Counts): + """ + Data object to store counts of various parameters during training. + Includes counts for distortion. + """ + + def __init__(self): + super().__init__() + self.head_distortion = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + self.head_distortion_for_any_dj = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.non_head_distortion = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.non_head_distortion_for_any_dj = defaultdict(lambda: 0.0) + + def update_distortion(self, count, alignment_info, j, src_classes, trg_classes): + i = alignment_info.alignment[j] + t = alignment_info.trg_sentence[j] + if i == 0: + # case 1: t is aligned to NULL + pass + elif alignment_info.is_head_word(j): + # case 2: t is the first word of a tablet + previous_cept = alignment_info.previous_cept(j) + if previous_cept is not None: + previous_src_word = alignment_info.src_sentence[previous_cept] + src_class = src_classes[previous_src_word] + else: + src_class = None + trg_class = trg_classes[t] + dj = j - alignment_info.center_of_cept(previous_cept) + self.head_distortion[dj][src_class][trg_class] += count + self.head_distortion_for_any_dj[src_class][trg_class] += count + else: + # case 3: t is a subsequent word of a tablet + previous_j = alignment_info.previous_in_tablet(j) + trg_class = trg_classes[t] + dj = j - previous_j + self.non_head_distortion[dj][trg_class] += count + self.non_head_distortion_for_any_dj[trg_class] += count diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm_model.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm_model.py new file mode 100644 index 0000000000000000000000000000000000000000..ed9a49408638605f8d4d627883d51e04816877d7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm_model.py @@ -0,0 +1,549 @@ +# Natural Language Toolkit: IBM Model Core +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +Common methods and classes for all IBM models. See ``IBMModel1``, +``IBMModel2``, ``IBMModel3``, ``IBMModel4``, and ``IBMModel5`` +for specific implementations. + +The IBM models are a series of generative models that learn lexical +translation probabilities, p(target language word|source language word), +given a sentence-aligned parallel corpus. + +The models increase in sophistication from model 1 to 5. Typically, the +output of lower models is used to seed the higher models. All models +use the Expectation-Maximization (EM) algorithm to learn various +probability tables. + +Words in a sentence are one-indexed. The first word of a sentence has +position 1, not 0. Index 0 is reserved in the source sentence for the +NULL token. The concept of position does not apply to NULL, but it is +indexed at 0 by convention. + +Each target word is aligned to exactly one source word or the NULL +token. + +References: +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +from bisect import insort_left +from collections import defaultdict +from copy import deepcopy +from math import ceil + + +def longest_target_sentence_length(sentence_aligned_corpus): + """ + :param sentence_aligned_corpus: Parallel corpus under consideration + :type sentence_aligned_corpus: list(AlignedSent) + :return: Number of words in the longest target language sentence + of ``sentence_aligned_corpus`` + """ + max_m = 0 + for aligned_sentence in sentence_aligned_corpus: + m = len(aligned_sentence.words) + max_m = max(m, max_m) + return max_m + + +class IBMModel: + """ + Abstract base class for all IBM models + """ + + # Avoid division by zero and precision errors by imposing a minimum + # value for probabilities. Note that this approach is theoretically + # incorrect, since it may create probabilities that sum to more + # than 1. In practice, the contribution of probabilities with MIN_PROB + # is tiny enough that the value of MIN_PROB can be treated as zero. + MIN_PROB = 1.0e-12 # GIZA++ is more liberal and uses 1.0e-7 + + def __init__(self, sentence_aligned_corpus): + self.init_vocab(sentence_aligned_corpus) + self.reset_probabilities() + + def reset_probabilities(self): + self.translation_table = defaultdict( + lambda: defaultdict(lambda: IBMModel.MIN_PROB) + ) + """ + dict[str][str]: float. Probability(target word | source word). + Values accessed as ``translation_table[target_word][source_word]``. + """ + + self.alignment_table = defaultdict( + lambda: defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: IBMModel.MIN_PROB)) + ) + ) + """ + dict[int][int][int][int]: float. Probability(i | j,l,m). + Values accessed as ``alignment_table[i][j][l][m]``. + Used in model 2 and hill climbing in models 3 and above + """ + + self.fertility_table = defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + """ + dict[int][str]: float. Probability(fertility | source word). + Values accessed as ``fertility_table[fertility][source_word]``. + Used in model 3 and higher. + """ + + self.p1 = 0.5 + """ + Probability that a generated word requires another target word + that is aligned to NULL. + Used in model 3 and higher. + """ + + def set_uniform_probabilities(self, sentence_aligned_corpus): + """ + Initialize probability tables to a uniform distribution + + Derived classes should implement this accordingly. + """ + pass + + def init_vocab(self, sentence_aligned_corpus): + src_vocab = set() + trg_vocab = set() + for aligned_sentence in sentence_aligned_corpus: + trg_vocab.update(aligned_sentence.words) + src_vocab.update(aligned_sentence.mots) + # Add the NULL token + src_vocab.add(None) + + self.src_vocab = src_vocab + """ + set(str): All source language words used in training + """ + + self.trg_vocab = trg_vocab + """ + set(str): All target language words used in training + """ + + def sample(self, sentence_pair): + """ + Sample the most probable alignments from the entire alignment + space + + First, determine the best alignment according to IBM Model 2. + With this initial alignment, use hill climbing to determine the + best alignment according to a higher IBM Model. Add this + alignment and its neighbors to the sample set. Repeat this + process with other initial alignments obtained by pegging an + alignment point. + + Hill climbing may be stuck in a local maxima, hence the pegging + and trying out of different alignments. + + :param sentence_pair: Source and target language sentence pair + to generate a sample of alignments from + :type sentence_pair: AlignedSent + + :return: A set of best alignments represented by their ``AlignmentInfo`` + and the best alignment of the set for convenience + :rtype: set(AlignmentInfo), AlignmentInfo + """ + sampled_alignments = set() + l = len(sentence_pair.mots) + m = len(sentence_pair.words) + + # Start from the best model 2 alignment + initial_alignment = self.best_model2_alignment(sentence_pair) + potential_alignment = self.hillclimb(initial_alignment) + sampled_alignments.update(self.neighboring(potential_alignment)) + best_alignment = potential_alignment + + # Start from other model 2 alignments, + # with the constraint that j is aligned (pegged) to i + for j in range(1, m + 1): + for i in range(0, l + 1): + initial_alignment = self.best_model2_alignment(sentence_pair, j, i) + potential_alignment = self.hillclimb(initial_alignment, j) + neighbors = self.neighboring(potential_alignment, j) + sampled_alignments.update(neighbors) + if potential_alignment.score > best_alignment.score: + best_alignment = potential_alignment + + return sampled_alignments, best_alignment + + def best_model2_alignment(self, sentence_pair, j_pegged=None, i_pegged=0): + """ + Finds the best alignment according to IBM Model 2 + + Used as a starting point for hill climbing in Models 3 and + above, because it is easier to compute than the best alignments + in higher models + + :param sentence_pair: Source and target language sentence pair + to be word-aligned + :type sentence_pair: AlignedSent + + :param j_pegged: If specified, the alignment point of j_pegged + will be fixed to i_pegged + :type j_pegged: int + + :param i_pegged: Alignment point to j_pegged + :type i_pegged: int + """ + src_sentence = [None] + sentence_pair.mots + trg_sentence = ["UNUSED"] + sentence_pair.words # 1-indexed + + l = len(src_sentence) - 1 # exclude NULL + m = len(trg_sentence) - 1 + + alignment = [0] * (m + 1) # init all alignments to NULL + cepts = [[] for i in range(l + 1)] # init all cepts to empty list + + for j in range(1, m + 1): + if j == j_pegged: + # use the pegged alignment instead of searching for best one + best_i = i_pegged + else: + best_i = 0 + max_alignment_prob = IBMModel.MIN_PROB + t = trg_sentence[j] + + for i in range(0, l + 1): + s = src_sentence[i] + alignment_prob = ( + self.translation_table[t][s] * self.alignment_table[i][j][l][m] + ) + + if alignment_prob >= max_alignment_prob: + max_alignment_prob = alignment_prob + best_i = i + + alignment[j] = best_i + cepts[best_i].append(j) + + return AlignmentInfo( + tuple(alignment), tuple(src_sentence), tuple(trg_sentence), cepts + ) + + def hillclimb(self, alignment_info, j_pegged=None): + """ + Starting from the alignment in ``alignment_info``, look at + neighboring alignments iteratively for the best one + + There is no guarantee that the best alignment in the alignment + space will be found, because the algorithm might be stuck in a + local maximum. + + :param j_pegged: If specified, the search will be constrained to + alignments where ``j_pegged`` remains unchanged + :type j_pegged: int + + :return: The best alignment found from hill climbing + :rtype: AlignmentInfo + """ + alignment = alignment_info # alias with shorter name + max_probability = self.prob_t_a_given_s(alignment) + + while True: + old_alignment = alignment + for neighbor_alignment in self.neighboring(alignment, j_pegged): + neighbor_probability = self.prob_t_a_given_s(neighbor_alignment) + + if neighbor_probability > max_probability: + alignment = neighbor_alignment + max_probability = neighbor_probability + + if alignment == old_alignment: + # Until there are no better alignments + break + + alignment.score = max_probability + return alignment + + def neighboring(self, alignment_info, j_pegged=None): + """ + Determine the neighbors of ``alignment_info``, obtained by + moving or swapping one alignment point + + :param j_pegged: If specified, neighbors that have a different + alignment point from j_pegged will not be considered + :type j_pegged: int + + :return: A set neighboring alignments represented by their + ``AlignmentInfo`` + :rtype: set(AlignmentInfo) + """ + neighbors = set() + + l = len(alignment_info.src_sentence) - 1 # exclude NULL + m = len(alignment_info.trg_sentence) - 1 + original_alignment = alignment_info.alignment + original_cepts = alignment_info.cepts + + for j in range(1, m + 1): + if j != j_pegged: + # Add alignments that differ by one alignment point + for i in range(0, l + 1): + new_alignment = list(original_alignment) + new_cepts = deepcopy(original_cepts) + old_i = original_alignment[j] + + # update alignment + new_alignment[j] = i + + # update cepts + insort_left(new_cepts[i], j) + new_cepts[old_i].remove(j) + + new_alignment_info = AlignmentInfo( + tuple(new_alignment), + alignment_info.src_sentence, + alignment_info.trg_sentence, + new_cepts, + ) + neighbors.add(new_alignment_info) + + for j in range(1, m + 1): + if j != j_pegged: + # Add alignments that have two alignment points swapped + for other_j in range(1, m + 1): + if other_j != j_pegged and other_j != j: + new_alignment = list(original_alignment) + new_cepts = deepcopy(original_cepts) + other_i = original_alignment[other_j] + i = original_alignment[j] + + # update alignments + new_alignment[j] = other_i + new_alignment[other_j] = i + + # update cepts + new_cepts[other_i].remove(other_j) + insort_left(new_cepts[other_i], j) + new_cepts[i].remove(j) + insort_left(new_cepts[i], other_j) + + new_alignment_info = AlignmentInfo( + tuple(new_alignment), + alignment_info.src_sentence, + alignment_info.trg_sentence, + new_cepts, + ) + neighbors.add(new_alignment_info) + + return neighbors + + def maximize_lexical_translation_probabilities(self, counts): + for t, src_words in counts.t_given_s.items(): + for s in src_words: + estimate = counts.t_given_s[t][s] / counts.any_t_given_s[s] + self.translation_table[t][s] = max(estimate, IBMModel.MIN_PROB) + + def maximize_fertility_probabilities(self, counts): + for phi, src_words in counts.fertility.items(): + for s in src_words: + estimate = counts.fertility[phi][s] / counts.fertility_for_any_phi[s] + self.fertility_table[phi][s] = max(estimate, IBMModel.MIN_PROB) + + def maximize_null_generation_probabilities(self, counts): + p1_estimate = counts.p1 / (counts.p1 + counts.p0) + p1_estimate = max(p1_estimate, IBMModel.MIN_PROB) + # Clip p1 if it is too large, because p0 = 1 - p1 should not be + # smaller than MIN_PROB + self.p1 = min(p1_estimate, 1 - IBMModel.MIN_PROB) + + def prob_of_alignments(self, alignments): + probability = 0 + for alignment_info in alignments: + probability += self.prob_t_a_given_s(alignment_info) + return probability + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + + All required information is assumed to be in ``alignment_info`` + and self. + + Derived classes should override this method + """ + return 0.0 + + +class AlignmentInfo: + """ + Helper data object for training IBM Models 3 and up + + Read-only. For a source sentence and its counterpart in the target + language, this class holds information about the sentence pair's + alignment, cepts, and fertility. + + Warning: Alignments are one-indexed here, in contrast to + nltk.translate.Alignment and AlignedSent, which are zero-indexed + This class is not meant to be used outside of IBM models. + """ + + def __init__(self, alignment, src_sentence, trg_sentence, cepts): + if not isinstance(alignment, tuple): + raise TypeError( + "The alignment must be a tuple because it is used " + "to uniquely identify AlignmentInfo objects." + ) + + self.alignment = alignment + """ + tuple(int): Alignment function. ``alignment[j]`` is the position + in the source sentence that is aligned to the position j in the + target sentence. + """ + + self.src_sentence = src_sentence + """ + tuple(str): Source sentence referred to by this object. + Should include NULL token (None) in index 0. + """ + + self.trg_sentence = trg_sentence + """ + tuple(str): Target sentence referred to by this object. + Should have a dummy element in index 0 so that the first word + starts from index 1. + """ + + self.cepts = cepts + """ + list(list(int)): The positions of the target words, in + ascending order, aligned to a source word position. For example, + cepts[4] = (2, 3, 7) means that words in positions 2, 3 and 7 + of the target sentence are aligned to the word in position 4 of + the source sentence + """ + + self.score = None + """ + float: Optional. Probability of alignment, as defined by the + IBM model that assesses this alignment + """ + + def fertility_of_i(self, i): + """ + Fertility of word in position ``i`` of the source sentence + """ + return len(self.cepts[i]) + + def is_head_word(self, j): + """ + :return: Whether the word in position ``j`` of the target + sentence is a head word + """ + i = self.alignment[j] + return self.cepts[i][0] == j + + def center_of_cept(self, i): + """ + :return: The ceiling of the average positions of the words in + the tablet of cept ``i``, or 0 if ``i`` is None + """ + if i is None: + return 0 + + average_position = sum(self.cepts[i]) / len(self.cepts[i]) + return int(ceil(average_position)) + + def previous_cept(self, j): + """ + :return: The previous cept of ``j``, or None if ``j`` belongs to + the first cept + """ + i = self.alignment[j] + if i == 0: + raise ValueError( + "Words aligned to NULL cannot have a previous " + "cept because NULL has no position" + ) + previous_cept = i - 1 + while previous_cept > 0 and self.fertility_of_i(previous_cept) == 0: + previous_cept -= 1 + + if previous_cept <= 0: + previous_cept = None + return previous_cept + + def previous_in_tablet(self, j): + """ + :return: The position of the previous word that is in the same + tablet as ``j``, or None if ``j`` is the first word of the + tablet + """ + i = self.alignment[j] + tablet_position = self.cepts[i].index(j) + if tablet_position == 0: + return None + return self.cepts[i][tablet_position - 1] + + def zero_indexed_alignment(self): + """ + :return: Zero-indexed alignment, suitable for use in external + ``nltk.translate`` modules like ``nltk.translate.Alignment`` + :rtype: list(tuple) + """ + zero_indexed_alignment = [] + for j in range(1, len(self.trg_sentence)): + i = self.alignment[j] - 1 + if i < 0: + i = None # alignment to NULL token + zero_indexed_alignment.append((j - 1, i)) + return zero_indexed_alignment + + def __eq__(self, other): + return self.alignment == other.alignment + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.alignment) + + +class Counts: + """ + Data object to store counts of various parameters during training + """ + + def __init__(self): + self.t_given_s = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.any_t_given_s = defaultdict(lambda: 0.0) + self.p0 = 0.0 + self.p1 = 0.0 + self.fertility = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.fertility_for_any_phi = defaultdict(lambda: 0.0) + + def update_lexical_translation(self, count, alignment_info, j): + i = alignment_info.alignment[j] + t = alignment_info.trg_sentence[j] + s = alignment_info.src_sentence[i] + self.t_given_s[t][s] += count + self.any_t_given_s[s] += count + + def update_null_generation(self, count, alignment_info): + m = len(alignment_info.trg_sentence) - 1 + fertility_of_null = alignment_info.fertility_of_i(0) + self.p1 += fertility_of_null * count + self.p0 += (m - 2 * fertility_of_null) * count + + def update_fertility(self, count, alignment_info): + for i in range(0, len(alignment_info.src_sentence)): + s = alignment_info.src_sentence[i] + phi = alignment_info.fertility_of_i(i) + self.fertility[phi][s] += count + self.fertility_for_any_phi[s] += count diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/meteor_score.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/meteor_score.py new file mode 100644 index 0000000000000000000000000000000000000000..847f2ad19205816f71caff5623b1d992ef2dbfda --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/meteor_score.py @@ -0,0 +1,409 @@ +# Natural Language Toolkit: Machine Translation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Uday Krishna +# Contributor: Tom Aarsen +# URL: +# For license information, see LICENSE.TXT + + +from itertools import chain, product +from typing import Callable, Iterable, List, Tuple + +from nltk.corpus import WordNetCorpusReader, wordnet +from nltk.stem.api import StemmerI +from nltk.stem.porter import PorterStemmer + + +def _generate_enums( + hypothesis: Iterable[str], + reference: Iterable[str], + preprocess: Callable[[str], str] = str.lower, +) -> Tuple[List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Takes in pre-tokenized inputs for hypothesis and reference and returns + enumerated word lists for each of them + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :preprocess: preprocessing method (default str.lower) + :return: enumerated words list + """ + if isinstance(hypothesis, str): + raise TypeError( + f'"hypothesis" expects pre-tokenized hypothesis (Iterable[str]): {hypothesis}' + ) + + if isinstance(reference, str): + raise TypeError( + f'"reference" expects pre-tokenized reference (Iterable[str]): {reference}' + ) + + enum_hypothesis_list = list(enumerate(map(preprocess, hypothesis))) + enum_reference_list = list(enumerate(map(preprocess, reference))) + return enum_hypothesis_list, enum_reference_list + + +def exact_match( + hypothesis: Iterable[str], reference: Iterable[str] +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + matches exact words in hypothesis and reference + and returns a word mapping based on the enumerated + word id between hypothesis and reference + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, + enumerated unmatched reference tuples + """ + enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) + return _match_enums(enum_hypothesis_list, enum_reference_list) + + +def _match_enums( + enum_hypothesis_list: List[Tuple[int, str]], + enum_reference_list: List[Tuple[int, str]], +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + matches exact words in hypothesis and reference and returns + a word mapping between enum_hypothesis_list and enum_reference_list + based on the enumerated word id. + + :param enum_hypothesis_list: enumerated hypothesis list + :param enum_reference_list: enumerated reference list + :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, + enumerated unmatched reference tuples + """ + word_match = [] + for i in range(len(enum_hypothesis_list))[::-1]: + for j in range(len(enum_reference_list))[::-1]: + if enum_hypothesis_list[i][1] == enum_reference_list[j][1]: + word_match.append( + (enum_hypothesis_list[i][0], enum_reference_list[j][0]) + ) + enum_hypothesis_list.pop(i) + enum_reference_list.pop(j) + break + return word_match, enum_hypothesis_list, enum_reference_list + + +def _enum_stem_match( + enum_hypothesis_list: List[Tuple[int, str]], + enum_reference_list: List[Tuple[int, str]], + stemmer: StemmerI = PorterStemmer(), +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Stems each word and matches them in hypothesis and reference + and returns a word mapping between enum_hypothesis_list and + enum_reference_list based on the enumerated word id. The function also + returns a enumerated list of unmatched words for hypothesis and reference. + + :param enum_hypothesis_list: enumerated hypothesis list + :param enum_reference_list: enumerated reference list + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, + enumerated unmatched reference tuples + """ + stemmed_enum_hypothesis_list = [ + (word_pair[0], stemmer.stem(word_pair[1])) for word_pair in enum_hypothesis_list + ] + + stemmed_enum_reference_list = [ + (word_pair[0], stemmer.stem(word_pair[1])) for word_pair in enum_reference_list + ] + + return _match_enums(stemmed_enum_hypothesis_list, stemmed_enum_reference_list) + + +def stem_match( + hypothesis: Iterable[str], + reference: Iterable[str], + stemmer: StemmerI = PorterStemmer(), +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Stems each word and matches them in hypothesis and reference + and returns a word mapping between hypothesis and reference + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, + enumerated unmatched reference tuples + """ + enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) + return _enum_stem_match(enum_hypothesis_list, enum_reference_list, stemmer=stemmer) + + +def _enum_wordnetsyn_match( + enum_hypothesis_list: List[Tuple[int, str]], + enum_reference_list: List[Tuple[int, str]], + wordnet: WordNetCorpusReader = wordnet, +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Matches each word in reference to a word in hypothesis + if any synonym of a hypothesis word is the exact match + to the reference word. + + :param enum_hypothesis_list: enumerated hypothesis list + :param enum_reference_list: enumerated reference list + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + """ + word_match = [] + for i in range(len(enum_hypothesis_list))[::-1]: + hypothesis_syns = set( + chain.from_iterable( + ( + lemma.name() + for lemma in synset.lemmas() + if lemma.name().find("_") < 0 + ) + for synset in wordnet.synsets(enum_hypothesis_list[i][1]) + ) + ).union({enum_hypothesis_list[i][1]}) + for j in range(len(enum_reference_list))[::-1]: + if enum_reference_list[j][1] in hypothesis_syns: + word_match.append( + (enum_hypothesis_list[i][0], enum_reference_list[j][0]) + ) + enum_hypothesis_list.pop(i) + enum_reference_list.pop(j) + break + return word_match, enum_hypothesis_list, enum_reference_list + + +def wordnetsyn_match( + hypothesis: Iterable[str], + reference: Iterable[str], + wordnet: WordNetCorpusReader = wordnet, +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Matches each word in reference to a word in hypothesis if any synonym + of a hypothesis word is the exact match to the reference word. + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :return: list of mapped tuples + """ + enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) + return _enum_wordnetsyn_match( + enum_hypothesis_list, enum_reference_list, wordnet=wordnet + ) + + +def _enum_align_words( + enum_hypothesis_list: List[Tuple[int, str]], + enum_reference_list: List[Tuple[int, str]], + stemmer: StemmerI = PorterStemmer(), + wordnet: WordNetCorpusReader = wordnet, +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Aligns/matches words in the hypothesis to reference by sequentially + applying exact match, stemmed match and wordnet based synonym match. + in case there are multiple matches the match which has the least number + of crossing is chosen. Takes enumerated list as input instead of + string input + + :param enum_hypothesis_list: enumerated hypothesis list + :param enum_reference_list: enumerated reference list + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :return: sorted list of matched tuples, unmatched hypothesis list, + unmatched reference list + """ + exact_matches, enum_hypothesis_list, enum_reference_list = _match_enums( + enum_hypothesis_list, enum_reference_list + ) + + stem_matches, enum_hypothesis_list, enum_reference_list = _enum_stem_match( + enum_hypothesis_list, enum_reference_list, stemmer=stemmer + ) + + wns_matches, enum_hypothesis_list, enum_reference_list = _enum_wordnetsyn_match( + enum_hypothesis_list, enum_reference_list, wordnet=wordnet + ) + + return ( + sorted( + exact_matches + stem_matches + wns_matches, key=lambda wordpair: wordpair[0] + ), + enum_hypothesis_list, + enum_reference_list, + ) + + +def align_words( + hypothesis: Iterable[str], + reference: Iterable[str], + stemmer: StemmerI = PorterStemmer(), + wordnet: WordNetCorpusReader = wordnet, +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Aligns/matches words in the hypothesis to reference by sequentially + applying exact match, stemmed match and wordnet based synonym match. + In case there are multiple matches the match which has the least number + of crossing is chosen. + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :return: sorted list of matched tuples, unmatched hypothesis list, unmatched reference list + """ + enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) + return _enum_align_words( + enum_hypothesis_list, enum_reference_list, stemmer=stemmer, wordnet=wordnet + ) + + +def _count_chunks(matches: List[Tuple[int, int]]) -> int: + """ + Counts the fewest possible number of chunks such that matched unigrams + of each chunk are adjacent to each other. This is used to calculate the + fragmentation part of the metric. + + :param matches: list containing a mapping of matched words (output of align_words) + :return: Number of chunks a sentence is divided into post alignment + """ + i = 0 + chunks = 1 + while i < len(matches) - 1: + if (matches[i + 1][0] == matches[i][0] + 1) and ( + matches[i + 1][1] == matches[i][1] + 1 + ): + i += 1 + continue + i += 1 + chunks += 1 + return chunks + + +def single_meteor_score( + reference: Iterable[str], + hypothesis: Iterable[str], + preprocess: Callable[[str], str] = str.lower, + stemmer: StemmerI = PorterStemmer(), + wordnet: WordNetCorpusReader = wordnet, + alpha: float = 0.9, + beta: float = 3.0, + gamma: float = 0.5, +) -> float: + """ + Calculates METEOR score for single hypothesis and reference as per + "Meteor: An Automatic Metric for MT Evaluation with HighLevels of + Correlation with Human Judgments" by Alon Lavie and Abhaya Agarwal, + in Proceedings of ACL. + https://www.cs.cmu.edu/~alavie/METEOR/pdf/Lavie-Agarwal-2007-METEOR.pdf + + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures', 'that', 'the', 'military', 'always', 'obeys', 'the', 'commands', 'of', 'the', 'party'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures', 'that', 'the', 'military', 'will', 'forever', 'heed', 'Party', 'commands'] + + + >>> round(single_meteor_score(reference1, hypothesis1),4) + 0.6944 + + If there is no words match during the alignment the method returns the + score as 0. We can safely return a zero instead of raising a + division by zero error as no match usually implies a bad translation. + + >>> round(single_meteor_score(['this', 'is', 'a', 'cat'], ['non', 'matching', 'hypothesis']),4) + 0.0 + + :param reference: pre-tokenized reference + :param hypothesis: pre-tokenized hypothesis + :param preprocess: preprocessing function (default str.lower) + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :param alpha: parameter for controlling relative weights of precision and recall. + :param beta: parameter for controlling shape of penalty as a + function of as a function of fragmentation. + :param gamma: relative weight assigned to fragmentation penalty. + :return: The sentence-level METEOR score. + """ + enum_hypothesis, enum_reference = _generate_enums( + hypothesis, reference, preprocess=preprocess + ) + translation_length = len(enum_hypothesis) + reference_length = len(enum_reference) + matches, _, _ = _enum_align_words( + enum_hypothesis, enum_reference, stemmer=stemmer, wordnet=wordnet + ) + matches_count = len(matches) + try: + precision = float(matches_count) / translation_length + recall = float(matches_count) / reference_length + fmean = (precision * recall) / (alpha * precision + (1 - alpha) * recall) + chunk_count = float(_count_chunks(matches)) + frag_frac = chunk_count / matches_count + except ZeroDivisionError: + return 0.0 + penalty = gamma * frag_frac**beta + return (1 - penalty) * fmean + + +def meteor_score( + references: Iterable[Iterable[str]], + hypothesis: Iterable[str], + preprocess: Callable[[str], str] = str.lower, + stemmer: StemmerI = PorterStemmer(), + wordnet: WordNetCorpusReader = wordnet, + alpha: float = 0.9, + beta: float = 3.0, + gamma: float = 0.5, +) -> float: + """ + Calculates METEOR score for hypothesis with multiple references as + described in "Meteor: An Automatic Metric for MT Evaluation with + HighLevels of Correlation with Human Judgments" by Alon Lavie and + Abhaya Agarwal, in Proceedings of ACL. + https://www.cs.cmu.edu/~alavie/METEOR/pdf/Lavie-Agarwal-2007-METEOR.pdf + + + In case of multiple references the best score is chosen. This method + iterates over single_meteor_score and picks the best pair among all + the references for a given hypothesis + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures', 'that', 'the', 'military', 'always', 'obeys', 'the', 'commands', 'of', 'the', 'party'] + >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', 'forever', 'hearing', 'the', 'activity', 'guidebook', 'that', 'party', 'direct'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures', 'that', 'the', 'military', 'will', 'forever', 'heed', 'Party', 'commands'] + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', 'guarantees', 'the', 'military', 'forces', 'always', 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', 'army', 'always', 'to', 'heed', 'the', 'directions', 'of', 'the', 'party'] + + >>> round(meteor_score([reference1, reference2, reference3], hypothesis1),4) + 0.6944 + + If there is no words match during the alignment the method returns the + score as 0. We can safely return a zero instead of raising a + division by zero error as no match usually implies a bad translation. + + >>> round(meteor_score([['this', 'is', 'a', 'cat']], ['non', 'matching', 'hypothesis']),4) + 0.0 + + :param references: pre-tokenized reference sentences + :param hypothesis: a pre-tokenized hypothesis sentence + :param preprocess: preprocessing function (default str.lower) + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :param alpha: parameter for controlling relative weights of precision and recall. + :param beta: parameter for controlling shape of penalty as a function + of as a function of fragmentation. + :param gamma: relative weight assigned to fragmentation penalty. + :return: The sentence-level METEOR score. + """ + return max( + single_meteor_score( + reference, + hypothesis, + preprocess=preprocess, + stemmer=stemmer, + wordnet=wordnet, + alpha=alpha, + beta=beta, + gamma=gamma, + ) + for reference in references + ) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/ribes_score.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/ribes_score.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d0bb5f14590082fb74e4a2c3613a40b6e168f1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/ribes_score.py @@ -0,0 +1,330 @@ +# Natural Language Toolkit: RIBES Score +# +# Copyright (C) 2001-2023 NLTK Project +# Contributors: Katsuhito Sudoh, Liling Tan, Kasramvd, J.F.Sebastian +# Mark Byers, ekhumoro, P. Ortiz +# URL: +# For license information, see LICENSE.TXT +""" RIBES score implementation """ + +import math +from itertools import islice + +from nltk.util import choose, ngrams + + +def sentence_ribes(references, hypothesis, alpha=0.25, beta=0.10): + """ + The RIBES (Rank-based Intuitive Bilingual Evaluation Score) from + Hideki Isozaki, Tsutomu Hirao, Kevin Duh, Katsuhito Sudoh and + Hajime Tsukada. 2010. "Automatic Evaluation of Translation Quality for + Distant Language Pairs". In Proceedings of EMNLP. + https://www.aclweb.org/anthology/D/D10/D10-1092.pdf + + The generic RIBES scores used in shared task, e.g. Workshop for + Asian Translation (WAT) uses the following RIBES calculations: + + RIBES = kendall_tau * (alpha**p1) * (beta**bp) + + Please note that this re-implementation differs from the official + RIBES implementation and though it emulates the results as describe + in the original paper, there are further optimization implemented + in the official RIBES script. + + Users are encouraged to use the official RIBES script instead of this + implementation when evaluating your machine translation system. Refer + to https://www.kecl.ntt.co.jp/icl/lirg/ribes/ for the official script. + + :param references: a list of reference sentences + :type references: list(list(str)) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + :param alpha: hyperparameter used as a prior for the unigram precision. + :type alpha: float + :param beta: hyperparameter used as a prior for the brevity penalty. + :type beta: float + :return: The best ribes score from one of the references. + :rtype: float + """ + best_ribes = -1.0 + # Calculates RIBES for each reference and returns the best score. + for reference in references: + # Collects the *worder* from the ranked correlation alignments. + worder = word_rank_alignment(reference, hypothesis) + nkt = kendall_tau(worder) + + # Calculates the brevity penalty + bp = min(1.0, math.exp(1.0 - len(reference) / len(hypothesis))) + + # Calculates the unigram precision, *p1* + p1 = len(worder) / len(hypothesis) + + _ribes = nkt * (p1**alpha) * (bp**beta) + + if _ribes > best_ribes: # Keeps the best score. + best_ribes = _ribes + + return best_ribes + + +def corpus_ribes(list_of_references, hypotheses, alpha=0.25, beta=0.10): + """ + This function "calculates RIBES for a system output (hypothesis) with + multiple references, and returns "best" score among multi-references and + individual scores. The scores are corpus-wise, i.e., averaged by the number + of sentences." (c.f. RIBES version 1.03.1 code). + + Different from BLEU's micro-average precision, RIBES calculates the + macro-average precision by averaging the best RIBES score for each pair of + hypothesis and its corresponding references + + >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] + >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', + ... 'interested', 'in', 'world', 'history'] + >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', + ... 'because', 'he', 'read', 'the', 'book'] + + >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] + >>> hypotheses = [hyp1, hyp2] + >>> round(corpus_ribes(list_of_references, hypotheses),4) + 0.3597 + + :param references: a corpus of lists of reference sentences, w.r.t. hypotheses + :type references: list(list(list(str))) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param alpha: hyperparameter used as a prior for the unigram precision. + :type alpha: float + :param beta: hyperparameter used as a prior for the brevity penalty. + :type beta: float + :return: The best ribes score from one of the references. + :rtype: float + """ + corpus_best_ribes = 0.0 + # Iterate through each hypothesis and their corresponding references. + for references, hypothesis in zip(list_of_references, hypotheses): + corpus_best_ribes += sentence_ribes(references, hypothesis, alpha, beta) + return corpus_best_ribes / len(hypotheses) + + +def position_of_ngram(ngram, sentence): + """ + This function returns the position of the first instance of the ngram + appearing in a sentence. + + Note that one could also use string as follows but the code is a little + convoluted with type casting back and forth: + + char_pos = ' '.join(sent)[:' '.join(sent).index(' '.join(ngram))] + word_pos = char_pos.count(' ') + + Another way to conceive this is: + + return next(i for i, ng in enumerate(ngrams(sentence, len(ngram))) + if ng == ngram) + + :param ngram: The ngram that needs to be searched + :type ngram: tuple + :param sentence: The list of tokens to search from. + :type sentence: list(str) + """ + # Iterates through the ngrams in sentence. + for i, sublist in enumerate(ngrams(sentence, len(ngram))): + # Returns the index of the word when ngram matches. + if ngram == sublist: + return i + + +def word_rank_alignment(reference, hypothesis, character_based=False): + """ + This is the word rank alignment algorithm described in the paper to produce + the *worder* list, i.e. a list of word indices of the hypothesis word orders + w.r.t. the list of reference words. + + Below is (H0, R0) example from the Isozaki et al. 2010 paper, + note the examples are indexed from 1 but the results here are indexed from 0: + + >>> ref = str('he was interested in world history because he ' + ... 'read the book').split() + >>> hyp = str('he read the book because he was interested in world ' + ... 'history').split() + >>> word_rank_alignment(ref, hyp) + [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] + + The (H1, R1) example from the paper, note the 0th index: + + >>> ref = 'John hit Bob yesterday'.split() + >>> hyp = 'Bob hit John yesterday'.split() + >>> word_rank_alignment(ref, hyp) + [2, 1, 0, 3] + + Here is the (H2, R2) example from the paper, note the 0th index here too: + + >>> ref = 'the boy read the book'.split() + >>> hyp = 'the book was read by the boy'.split() + >>> word_rank_alignment(ref, hyp) + [3, 4, 2, 0, 1] + + :param reference: a reference sentence + :type reference: list(str) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + """ + worder = [] + hyp_len = len(hypothesis) + # Stores a list of possible ngrams from the reference sentence. + # This is used for matching context window later in the algorithm. + ref_ngrams = [] + hyp_ngrams = [] + for n in range(1, len(reference) + 1): + for ng in ngrams(reference, n): + ref_ngrams.append(ng) + for ng in ngrams(hypothesis, n): + hyp_ngrams.append(ng) + for i, h_word in enumerate(hypothesis): + # If word is not in the reference, continue. + if h_word not in reference: + continue + # If we can determine one-to-one word correspondence for unigrams that + # only appear once in both the reference and hypothesis. + elif hypothesis.count(h_word) == reference.count(h_word) == 1: + worder.append(reference.index(h_word)) + else: + max_window_size = max(i, hyp_len - i + 1) + for window in range(1, max_window_size): + if i + window < hyp_len: # If searching the right context is possible. + # Retrieve the right context window. + right_context_ngram = tuple(islice(hypothesis, i, i + window + 1)) + num_times_in_ref = ref_ngrams.count(right_context_ngram) + num_times_in_hyp = hyp_ngrams.count(right_context_ngram) + # If ngram appears only once in both ref and hyp. + if num_times_in_ref == num_times_in_hyp == 1: + # Find the position of ngram that matched the reference. + pos = position_of_ngram(right_context_ngram, reference) + worder.append(pos) # Add the positions of the ngram. + break + if window <= i: # If searching the left context is possible. + # Retrieve the left context window. + left_context_ngram = tuple(islice(hypothesis, i - window, i + 1)) + num_times_in_ref = ref_ngrams.count(left_context_ngram) + num_times_in_hyp = hyp_ngrams.count(left_context_ngram) + if num_times_in_ref == num_times_in_hyp == 1: + # Find the position of ngram that matched the reference. + pos = position_of_ngram(left_context_ngram, reference) + # Add the positions of the ngram. + worder.append(pos + len(left_context_ngram) - 1) + break + return worder + + +def find_increasing_sequences(worder): + """ + Given the *worder* list, this function groups monotonic +1 sequences. + + >>> worder = [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] + >>> list(find_increasing_sequences(worder)) + [(7, 8, 9, 10), (0, 1, 2, 3, 4, 5)] + + :param worder: The worder list output from word_rank_alignment + :param type: list(int) + """ + items = iter(worder) + a, b = None, next(items, None) + result = [b] + while b is not None: + a, b = b, next(items, None) + if b is not None and a + 1 == b: + result.append(b) + else: + if len(result) > 1: + yield tuple(result) + result = [b] + + +def kendall_tau(worder, normalize=True): + """ + Calculates the Kendall's Tau correlation coefficient given the *worder* + list of word alignments from word_rank_alignment(), using the formula: + + tau = 2 * num_increasing_pairs / num_possible_pairs -1 + + Note that the no. of increasing pairs can be discontinuous in the *worder* + list and each each increasing sequence can be tabulated as choose(len(seq), 2) + no. of increasing pairs, e.g. + + >>> worder = [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] + >>> number_possible_pairs = choose(len(worder), 2) + >>> round(kendall_tau(worder, normalize=False),3) + -0.236 + >>> round(kendall_tau(worder),3) + 0.382 + + :param worder: The worder list output from word_rank_alignment + :type worder: list(int) + :param normalize: Flag to indicate normalization to between 0.0 and 1.0. + :type normalize: boolean + :return: The Kendall's Tau correlation coefficient. + :rtype: float + """ + worder_len = len(worder) + # With worder_len < 2, `choose(worder_len, 2)` will be 0. + # As we divide by this, it will give a ZeroDivisionError. + # To avoid this, we can just return the lowest possible score. + if worder_len < 2: + tau = -1 + else: + # Extract the groups of increasing/monotonic sequences. + increasing_sequences = find_increasing_sequences(worder) + # Calculate no. of increasing_pairs in *worder* list. + num_increasing_pairs = sum(choose(len(seq), 2) for seq in increasing_sequences) + # Calculate no. of possible pairs. + num_possible_pairs = choose(worder_len, 2) + # Kendall's Tau computation. + tau = 2 * num_increasing_pairs / num_possible_pairs - 1 + if normalize: # If normalized, the tau output falls between 0.0 to 1.0 + return (tau + 1) / 2 + else: # Otherwise, the tau outputs falls between -1.0 to +1.0 + return tau + + +def spearman_rho(worder, normalize=True): + """ + Calculates the Spearman's Rho correlation coefficient given the *worder* + list of word alignment from word_rank_alignment(), using the formula: + + rho = 1 - sum(d**2) / choose(len(worder)+1, 3) + + Given that d is the sum of difference between the *worder* list of indices + and the original word indices from the reference sentence. + + Using the (H0,R0) and (H5, R5) example from the paper + + >>> worder = [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] + >>> round(spearman_rho(worder, normalize=False), 3) + -0.591 + >>> round(spearman_rho(worder), 3) + 0.205 + + :param worder: The worder list output from word_rank_alignment + :param type: list(int) + """ + worder_len = len(worder) + sum_d_square = sum((wi - i) ** 2 for wi, i in zip(worder, range(worder_len))) + rho = 1 - sum_d_square / choose(worder_len + 1, 3) + + if normalize: # If normalized, the rho output falls between 0.0 to 1.0 + return (rho + 1) / 2 + else: # Otherwise, the rho outputs falls between -1.0 to +1.0 + return rho diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/stack_decoder.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/stack_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..29c6c99ff8d39848e3e17d413e9b40296bd5dc71 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/stack_decoder.py @@ -0,0 +1,515 @@ +# Natural Language Toolkit: Stack decoder +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +A decoder that uses stacks to implement phrase-based translation. + +In phrase-based translation, the source sentence is segmented into +phrases of one or more words, and translations for those phrases are +used to build the target sentence. + +Hypothesis data structures are used to keep track of the source words +translated so far and the partial output. A hypothesis can be expanded +by selecting an untranslated phrase, looking up its translation in a +phrase table, and appending that translation to the partial output. +Translation is complete when a hypothesis covers all source words. + +The search space is huge because the source sentence can be segmented +in different ways, the source phrases can be selected in any order, +and there could be multiple translations for the same source phrase in +the phrase table. To make decoding tractable, stacks are used to limit +the number of candidate hypotheses by doing histogram and/or threshold +pruning. + +Hypotheses with the same number of words translated are placed in the +same stack. In histogram pruning, each stack has a size limit, and +the hypothesis with the lowest score is removed when the stack is full. +In threshold pruning, hypotheses that score below a certain threshold +of the best hypothesis in that stack are removed. + +Hypothesis scoring can include various factors such as phrase +translation probability, language model probability, length of +translation, cost of remaining words to be translated, and so on. + + +References: +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. +""" + +import warnings +from collections import defaultdict +from math import log + + +class StackDecoder: + """ + Phrase-based stack decoder for machine translation + + >>> from nltk.translate import PhraseTable + >>> phrase_table = PhraseTable() + >>> phrase_table.add(('niemand',), ('nobody',), log(0.8)) + >>> phrase_table.add(('niemand',), ('no', 'one'), log(0.2)) + >>> phrase_table.add(('erwartet',), ('expects',), log(0.8)) + >>> phrase_table.add(('erwartet',), ('expecting',), log(0.2)) + >>> phrase_table.add(('niemand', 'erwartet'), ('one', 'does', 'not', 'expect'), log(0.1)) + >>> phrase_table.add(('die', 'spanische', 'inquisition'), ('the', 'spanish', 'inquisition'), log(0.8)) + >>> phrase_table.add(('!',), ('!',), log(0.8)) + + >>> # nltk.model should be used here once it is implemented + >>> from collections import defaultdict + >>> language_prob = defaultdict(lambda: -999.0) + >>> language_prob[('nobody',)] = log(0.5) + >>> language_prob[('expects',)] = log(0.4) + >>> language_prob[('the', 'spanish', 'inquisition')] = log(0.2) + >>> language_prob[('!',)] = log(0.1) + >>> language_model = type('',(object,),{'probability_change': lambda self, context, phrase: language_prob[phrase], 'probability': lambda self, phrase: language_prob[phrase]})() + + >>> stack_decoder = StackDecoder(phrase_table, language_model) + + >>> stack_decoder.translate(['niemand', 'erwartet', 'die', 'spanische', 'inquisition', '!']) + ['nobody', 'expects', 'the', 'spanish', 'inquisition', '!'] + + """ + + def __init__(self, phrase_table, language_model): + """ + :param phrase_table: Table of translations for source language + phrases and the log probabilities for those translations. + :type phrase_table: PhraseTable + + :param language_model: Target language model. Must define a + ``probability_change`` method that calculates the change in + log probability of a sentence, if a given string is appended + to it. + This interface is experimental and will likely be replaced + with nltk.model once it is implemented. + :type language_model: object + """ + self.phrase_table = phrase_table + self.language_model = language_model + + self.word_penalty = 0.0 + """ + float: Influences the translation length exponentially. + If positive, shorter translations are preferred. + If negative, longer translations are preferred. + If zero, no penalty is applied. + """ + + self.beam_threshold = 0.0 + """ + float: Hypotheses that score below this factor of the best + hypothesis in a stack are dropped from consideration. + Value between 0.0 and 1.0. + """ + + self.stack_size = 100 + """ + int: Maximum number of hypotheses to consider in a stack. + Higher values increase the likelihood of a good translation, + but increases processing time. + """ + + self.__distortion_factor = 0.5 + self.__compute_log_distortion() + + @property + def distortion_factor(self): + """ + float: Amount of reordering of source phrases. + Lower values favour monotone translation, suitable when + word order is similar for both source and target languages. + Value between 0.0 and 1.0. Default 0.5. + """ + return self.__distortion_factor + + @distortion_factor.setter + def distortion_factor(self, d): + self.__distortion_factor = d + self.__compute_log_distortion() + + def __compute_log_distortion(self): + # cache log(distortion_factor) so we don't have to recompute it + # when scoring hypotheses + if self.__distortion_factor == 0.0: + self.__log_distortion_factor = log(1e-9) # 1e-9 is almost zero + else: + self.__log_distortion_factor = log(self.__distortion_factor) + + def translate(self, src_sentence): + """ + :param src_sentence: Sentence to be translated + :type src_sentence: list(str) + + :return: Translated sentence + :rtype: list(str) + """ + sentence = tuple(src_sentence) # prevent accidental modification + sentence_length = len(sentence) + stacks = [ + _Stack(self.stack_size, self.beam_threshold) + for _ in range(0, sentence_length + 1) + ] + empty_hypothesis = _Hypothesis() + stacks[0].push(empty_hypothesis) + + all_phrases = self.find_all_src_phrases(sentence) + future_score_table = self.compute_future_scores(sentence) + for stack in stacks: + for hypothesis in stack: + possible_expansions = StackDecoder.valid_phrases( + all_phrases, hypothesis + ) + for src_phrase_span in possible_expansions: + src_phrase = sentence[src_phrase_span[0] : src_phrase_span[1]] + for translation_option in self.phrase_table.translations_for( + src_phrase + ): + raw_score = self.expansion_score( + hypothesis, translation_option, src_phrase_span + ) + new_hypothesis = _Hypothesis( + raw_score=raw_score, + src_phrase_span=src_phrase_span, + trg_phrase=translation_option.trg_phrase, + previous=hypothesis, + ) + new_hypothesis.future_score = self.future_score( + new_hypothesis, future_score_table, sentence_length + ) + total_words = new_hypothesis.total_translated_words() + stacks[total_words].push(new_hypothesis) + + if not stacks[sentence_length]: + warnings.warn( + "Unable to translate all words. " + "The source sentence contains words not in " + "the phrase table" + ) + # Instead of returning empty output, perhaps a partial + # translation could be returned + return [] + + best_hypothesis = stacks[sentence_length].best() + return best_hypothesis.translation_so_far() + + def find_all_src_phrases(self, src_sentence): + """ + Finds all subsequences in src_sentence that have a phrase + translation in the translation table + + :type src_sentence: tuple(str) + + :return: Subsequences that have a phrase translation, + represented as a table of lists of end positions. + For example, if result[2] is [5, 6, 9], then there are + three phrases starting from position 2 in ``src_sentence``, + ending at positions 5, 6, and 9 exclusive. The list of + ending positions are in ascending order. + :rtype: list(list(int)) + """ + sentence_length = len(src_sentence) + phrase_indices = [[] for _ in src_sentence] + for start in range(0, sentence_length): + for end in range(start + 1, sentence_length + 1): + potential_phrase = src_sentence[start:end] + if potential_phrase in self.phrase_table: + phrase_indices[start].append(end) + return phrase_indices + + def compute_future_scores(self, src_sentence): + """ + Determines the approximate scores for translating every + subsequence in ``src_sentence`` + + Future scores can be used a look-ahead to determine the + difficulty of translating the remaining parts of a src_sentence. + + :type src_sentence: tuple(str) + + :return: Scores of subsequences referenced by their start and + end positions. For example, result[2][5] is the score of the + subsequence covering positions 2, 3, and 4. + :rtype: dict(int: (dict(int): float)) + """ + scores = defaultdict(lambda: defaultdict(lambda: float("-inf"))) + for seq_length in range(1, len(src_sentence) + 1): + for start in range(0, len(src_sentence) - seq_length + 1): + end = start + seq_length + phrase = src_sentence[start:end] + if phrase in self.phrase_table: + score = self.phrase_table.translations_for(phrase)[ + 0 + ].log_prob # pick best (first) translation + # Warning: API of language_model is subject to change + score += self.language_model.probability(phrase) + scores[start][end] = score + + # check if a better score can be obtained by combining + # two child subsequences + for mid in range(start + 1, end): + combined_score = scores[start][mid] + scores[mid][end] + if combined_score > scores[start][end]: + scores[start][end] = combined_score + return scores + + def future_score(self, hypothesis, future_score_table, sentence_length): + """ + Determines the approximate score for translating the + untranslated words in ``hypothesis`` + """ + score = 0.0 + for span in hypothesis.untranslated_spans(sentence_length): + score += future_score_table[span[0]][span[1]] + return score + + def expansion_score(self, hypothesis, translation_option, src_phrase_span): + """ + Calculate the score of expanding ``hypothesis`` with + ``translation_option`` + + :param hypothesis: Hypothesis being expanded + :type hypothesis: _Hypothesis + + :param translation_option: Information about the proposed expansion + :type translation_option: PhraseTableEntry + + :param src_phrase_span: Word position span of the source phrase + :type src_phrase_span: tuple(int, int) + """ + score = hypothesis.raw_score + score += translation_option.log_prob + # The API of language_model is subject to change; it could accept + # a string, a list of words, and/or some other type + score += self.language_model.probability_change( + hypothesis, translation_option.trg_phrase + ) + score += self.distortion_score(hypothesis, src_phrase_span) + score -= self.word_penalty * len(translation_option.trg_phrase) + return score + + def distortion_score(self, hypothesis, next_src_phrase_span): + if not hypothesis.src_phrase_span: + return 0.0 + next_src_phrase_start = next_src_phrase_span[0] + prev_src_phrase_end = hypothesis.src_phrase_span[1] + distortion_distance = next_src_phrase_start - prev_src_phrase_end + return abs(distortion_distance) * self.__log_distortion_factor + + @staticmethod + def valid_phrases(all_phrases_from, hypothesis): + """ + Extract phrases from ``all_phrases_from`` that contains words + that have not been translated by ``hypothesis`` + + :param all_phrases_from: Phrases represented by their spans, in + the same format as the return value of + ``find_all_src_phrases`` + :type all_phrases_from: list(list(int)) + + :type hypothesis: _Hypothesis + + :return: A list of phrases, represented by their spans, that + cover untranslated positions. + :rtype: list(tuple(int, int)) + """ + untranslated_spans = hypothesis.untranslated_spans(len(all_phrases_from)) + valid_phrases = [] + for available_span in untranslated_spans: + start = available_span[0] + available_end = available_span[1] + while start < available_end: + for phrase_end in all_phrases_from[start]: + if phrase_end > available_end: + # Subsequent elements in all_phrases_from[start] + # will also be > available_end, since the + # elements are in ascending order + break + valid_phrases.append((start, phrase_end)) + start += 1 + return valid_phrases + + +class _Hypothesis: + """ + Partial solution to a translation. + + Records the word positions of the phrase being translated, its + translation, raw score, and the cost of the untranslated parts of + the sentence. When the next phrase is selected to build upon the + partial solution, a new _Hypothesis object is created, with a back + pointer to the previous hypothesis. + + To find out which words have been translated so far, look at the + ``src_phrase_span`` in the hypothesis chain. Similarly, the + translation output can be found by traversing up the chain. + """ + + def __init__( + self, + raw_score=0.0, + src_phrase_span=(), + trg_phrase=(), + previous=None, + future_score=0.0, + ): + """ + :param raw_score: Likelihood of hypothesis so far. + Higher is better. Does not account for untranslated words. + :type raw_score: float + + :param src_phrase_span: Span of word positions covered by the + source phrase in this hypothesis expansion. For example, + (2, 5) means that the phrase is from the second word up to, + but not including the fifth word in the source sentence. + :type src_phrase_span: tuple(int) + + :param trg_phrase: Translation of the source phrase in this + hypothesis expansion + :type trg_phrase: tuple(str) + + :param previous: Previous hypothesis before expansion to this one + :type previous: _Hypothesis + + :param future_score: Approximate score for translating the + remaining words not covered by this hypothesis. Higher means + that the remaining words are easier to translate. + :type future_score: float + """ + self.raw_score = raw_score + self.src_phrase_span = src_phrase_span + self.trg_phrase = trg_phrase + self.previous = previous + self.future_score = future_score + + def score(self): + """ + Overall score of hypothesis after accounting for local and + global features + """ + return self.raw_score + self.future_score + + def untranslated_spans(self, sentence_length): + """ + Starting from each untranslated word, find the longest + continuous span of untranslated positions + + :param sentence_length: Length of source sentence being + translated by the hypothesis + :type sentence_length: int + + :rtype: list(tuple(int, int)) + """ + translated_positions = self.translated_positions() + translated_positions.sort() + translated_positions.append(sentence_length) # add sentinel position + + untranslated_spans = [] + start = 0 + # each untranslated span must end in one of the translated_positions + for end in translated_positions: + if start < end: + untranslated_spans.append((start, end)) + start = end + 1 + + return untranslated_spans + + def translated_positions(self): + """ + List of positions in the source sentence of words already + translated. The list is not sorted. + + :rtype: list(int) + """ + translated_positions = [] + current_hypothesis = self + while current_hypothesis.previous is not None: + translated_span = current_hypothesis.src_phrase_span + translated_positions.extend(range(translated_span[0], translated_span[1])) + current_hypothesis = current_hypothesis.previous + return translated_positions + + def total_translated_words(self): + return len(self.translated_positions()) + + def translation_so_far(self): + translation = [] + self.__build_translation(self, translation) + return translation + + def __build_translation(self, hypothesis, output): + if hypothesis.previous is None: + return + self.__build_translation(hypothesis.previous, output) + output.extend(hypothesis.trg_phrase) + + +class _Stack: + """ + Collection of _Hypothesis objects + """ + + def __init__(self, max_size=100, beam_threshold=0.0): + """ + :param beam_threshold: Hypotheses that score less than this + factor of the best hypothesis are discarded from the stack. + Value must be between 0.0 and 1.0. + :type beam_threshold: float + """ + self.max_size = max_size + self.items = [] + + if beam_threshold == 0.0: + self.__log_beam_threshold = float("-inf") + else: + self.__log_beam_threshold = log(beam_threshold) + + def push(self, hypothesis): + """ + Add ``hypothesis`` to the stack. + Removes lowest scoring hypothesis if the stack is full. + After insertion, hypotheses that score less than + ``beam_threshold`` times the score of the best hypothesis + are removed. + """ + self.items.append(hypothesis) + self.items.sort(key=lambda h: h.score(), reverse=True) + while len(self.items) > self.max_size: + self.items.pop() + self.threshold_prune() + + def threshold_prune(self): + if not self.items: + return + # log(score * beam_threshold) = log(score) + log(beam_threshold) + threshold = self.items[0].score() + self.__log_beam_threshold + for hypothesis in reversed(self.items): + if hypothesis.score() < threshold: + self.items.pop() + else: + break + + def best(self): + """ + :return: Hypothesis with the highest score in the stack + :rtype: _Hypothesis + """ + if self.items: + return self.items[0] + return None + + def __iter__(self): + return iter(self.items) + + def __contains__(self, hypothesis): + return hypothesis in self.items + + def __bool__(self): + return len(self.items) != 0 + + __nonzero__ = __bool__