diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..754b3d2d78286799b229700bb19bd21cb683b855 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__init__.py @@ -0,0 +1,24 @@ +# Natural Language Toolkit: Inference +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Dan Garrette +# Ewan Klein +# +# URL: +# For license information, see LICENSE.TXT + +""" +Classes and interfaces for theorem proving and model building. +""" + +from nltk.inference.api import ParallelProverBuilder, ParallelProverBuilderCommand +from nltk.inference.discourse import ( + CfgReadingCommand, + DiscourseTester, + DrtGlueReadingCommand, + ReadingCommand, +) +from nltk.inference.mace import Mace, MaceCommand +from nltk.inference.prover9 import Prover9, Prover9Command +from nltk.inference.resolution import ResolutionProver, ResolutionProverCommand +from nltk.inference.tableau import TableauProver, TableauProverCommand diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37bfa8517b11d365e189b621fa785d4d09ce5ecd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0849b94e8934db5973792625755359c1bd274176 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/discourse.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/discourse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..984bafb22100c2b058f7b543dc78477d45c5d9ef Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/discourse.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/mace.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/mace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35147d6c6c6b0b6dde424bb8ee0daacbfbd61026 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/mace.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/nonmonotonic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/nonmonotonic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b35d78075547e4788caea916f9af7921604b2085 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/nonmonotonic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/prover9.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/prover9.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d1c86cdeed15e017b3b2e97212248e718413ef1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/prover9.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/resolution.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/resolution.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00bf6dddb40ff566d55f84c0bdbb47bac965daef Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/resolution.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/api.py b/llmeval-env/lib/python3.10/site-packages/nltk/inference/api.py new file mode 100644 index 0000000000000000000000000000000000000000..12f1c099941280c1a72f40f957330dc5497a1b27 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/inference/api.py @@ -0,0 +1,614 @@ +# Natural Language Toolkit: Classifier Interface +# +# Author: Ewan Klein +# Dan Garrette +# +# URL: +# For license information, see LICENSE.TXT + +""" +Interfaces and base classes for theorem provers and model builders. + +``Prover`` is a standard interface for a theorem prover which tries to prove a goal from a +list of assumptions. + +``ModelBuilder`` is a standard interface for a model builder. Given just a set of assumptions. +the model builder tries to build a model for the assumptions. Given a set of assumptions and a +goal *G*, the model builder tries to find a counter-model, in the sense of a model that will satisfy +the assumptions plus the negation of *G*. +""" + +import threading +import time +from abc import ABCMeta, abstractmethod + + +class Prover(metaclass=ABCMeta): + """ + Interface for trying to prove a goal from assumptions. Both the goal and + the assumptions are constrained to be formulas of ``logic.Expression``. + """ + + def prove(self, goal=None, assumptions=None, verbose=False): + """ + :return: Whether the proof was successful or not. + :rtype: bool + """ + return self._prove(goal, assumptions, verbose)[0] + + @abstractmethod + def _prove(self, goal=None, assumptions=None, verbose=False): + """ + :return: Whether the proof was successful or not, along with the proof + :rtype: tuple: (bool, str) + """ + + +class ModelBuilder(metaclass=ABCMeta): + """ + Interface for trying to build a model of set of formulas. + Open formulas are assumed to be universally quantified. + Both the goal and the assumptions are constrained to be formulas + of ``logic.Expression``. + """ + + def build_model(self, goal=None, assumptions=None, verbose=False): + """ + Perform the actual model building. + :return: Whether a model was generated + :rtype: bool + """ + return self._build_model(goal, assumptions, verbose)[0] + + @abstractmethod + def _build_model(self, goal=None, assumptions=None, verbose=False): + """ + Perform the actual model building. + :return: Whether a model was generated, and the model itself + :rtype: tuple(bool, sem.Valuation) + """ + + +class TheoremToolCommand(metaclass=ABCMeta): + """ + This class holds a goal and a list of assumptions to be used in proving + or model building. + """ + + @abstractmethod + def add_assumptions(self, new_assumptions): + """ + Add new assumptions to the assumption list. + + :param new_assumptions: new assumptions + :type new_assumptions: list(sem.Expression) + """ + + @abstractmethod + def retract_assumptions(self, retracted, debug=False): + """ + Retract assumptions from the assumption list. + + :param debug: If True, give warning when ``retracted`` is not present on + assumptions list. + :type debug: bool + :param retracted: assumptions to be retracted + :type retracted: list(sem.Expression) + """ + + @abstractmethod + def assumptions(self): + """ + List the current assumptions. + + :return: list of ``Expression`` + """ + + @abstractmethod + def goal(self): + """ + Return the goal + + :return: ``Expression`` + """ + + @abstractmethod + def print_assumptions(self): + """ + Print the list of the current assumptions. + """ + + +class ProverCommand(TheoremToolCommand): + """ + This class holds a ``Prover``, a goal, and a list of assumptions. When + prove() is called, the ``Prover`` is executed with the goal and assumptions. + """ + + @abstractmethod + def prove(self, verbose=False): + """ + Perform the actual proof. + """ + + @abstractmethod + def proof(self, simplify=True): + """ + Return the proof string + :param simplify: bool simplify the proof? + :return: str + """ + + @abstractmethod + def get_prover(self): + """ + Return the prover object + :return: ``Prover`` + """ + + +class ModelBuilderCommand(TheoremToolCommand): + """ + This class holds a ``ModelBuilder``, a goal, and a list of assumptions. + When build_model() is called, the ``ModelBuilder`` is executed with the goal + and assumptions. + """ + + @abstractmethod + def build_model(self, verbose=False): + """ + Perform the actual model building. + :return: A model if one is generated; None otherwise. + :rtype: sem.Valuation + """ + + @abstractmethod + def model(self, format=None): + """ + Return a string representation of the model + + :param simplify: bool simplify the proof? + :return: str + """ + + @abstractmethod + def get_model_builder(self): + """ + Return the model builder object + :return: ``ModelBuilder`` + """ + + +class BaseTheoremToolCommand(TheoremToolCommand): + """ + This class holds a goal and a list of assumptions to be used in proving + or model building. + """ + + def __init__(self, goal=None, assumptions=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + """ + self._goal = goal + + if not assumptions: + self._assumptions = [] + else: + self._assumptions = list(assumptions) + + self._result = None + """A holder for the result, to prevent unnecessary re-proving""" + + def add_assumptions(self, new_assumptions): + """ + Add new assumptions to the assumption list. + + :param new_assumptions: new assumptions + :type new_assumptions: list(sem.Expression) + """ + self._assumptions.extend(new_assumptions) + self._result = None + + def retract_assumptions(self, retracted, debug=False): + """ + Retract assumptions from the assumption list. + + :param debug: If True, give warning when ``retracted`` is not present on + assumptions list. + :type debug: bool + :param retracted: assumptions to be retracted + :type retracted: list(sem.Expression) + """ + retracted = set(retracted) + result_list = list(filter(lambda a: a not in retracted, self._assumptions)) + if debug and result_list == self._assumptions: + print(Warning("Assumptions list has not been changed:")) + self.print_assumptions() + + self._assumptions = result_list + + self._result = None + + def assumptions(self): + """ + List the current assumptions. + + :return: list of ``Expression`` + """ + return self._assumptions + + def goal(self): + """ + Return the goal + + :return: ``Expression`` + """ + return self._goal + + def print_assumptions(self): + """ + Print the list of the current assumptions. + """ + for a in self.assumptions(): + print(a) + + +class BaseProverCommand(BaseTheoremToolCommand, ProverCommand): + """ + This class holds a ``Prover``, a goal, and a list of assumptions. When + prove() is called, the ``Prover`` is executed with the goal and assumptions. + """ + + def __init__(self, prover, goal=None, assumptions=None): + """ + :param prover: The theorem tool to execute with the assumptions + :type prover: Prover + :see: ``BaseTheoremToolCommand`` + """ + self._prover = prover + """The theorem tool to execute with the assumptions""" + + BaseTheoremToolCommand.__init__(self, goal, assumptions) + + self._proof = None + + def prove(self, verbose=False): + """ + Perform the actual proof. Store the result to prevent unnecessary + re-proving. + """ + if self._result is None: + self._result, self._proof = self._prover._prove( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def proof(self, simplify=True): + """ + Return the proof string + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call prove() first to get a proof!") + else: + return self.decorate_proof(self._proof, simplify) + + def decorate_proof(self, proof_string, simplify=True): + """ + Modify and return the proof string + :param proof_string: str the proof to decorate + :param simplify: bool simplify the proof? + :return: str + """ + return proof_string + + def get_prover(self): + return self._prover + + +class BaseModelBuilderCommand(BaseTheoremToolCommand, ModelBuilderCommand): + """ + This class holds a ``ModelBuilder``, a goal, and a list of assumptions. When + build_model() is called, the ``ModelBuilder`` is executed with the goal and + assumptions. + """ + + def __init__(self, modelbuilder, goal=None, assumptions=None): + """ + :param modelbuilder: The theorem tool to execute with the assumptions + :type modelbuilder: ModelBuilder + :see: ``BaseTheoremToolCommand`` + """ + self._modelbuilder = modelbuilder + """The theorem tool to execute with the assumptions""" + + BaseTheoremToolCommand.__init__(self, goal, assumptions) + + self._model = None + + def build_model(self, verbose=False): + """ + Attempt to build a model. Store the result to prevent unnecessary + re-building. + """ + if self._result is None: + self._result, self._model = self._modelbuilder._build_model( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def model(self, format=None): + """ + Return a string representation of the model + + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call build_model() first to " "get a model!") + else: + return self._decorate_model(self._model, format) + + def _decorate_model(self, valuation_str, format=None): + """ + :param valuation_str: str with the model builder's output + :param format: str indicating the format for displaying + :return: str + """ + return valuation_str + + def get_model_builder(self): + return self._modelbuilder + + +class TheoremToolCommandDecorator(TheoremToolCommand): + """ + A base decorator for the ``ProverCommandDecorator`` and + ``ModelBuilderCommandDecorator`` classes from which decorators can extend. + """ + + def __init__(self, command): + """ + :param command: ``TheoremToolCommand`` to decorate + """ + self._command = command + + # The decorator has its own versions of 'result' different from the + # underlying command + self._result = None + + def assumptions(self): + return self._command.assumptions() + + def goal(self): + return self._command.goal() + + def add_assumptions(self, new_assumptions): + self._command.add_assumptions(new_assumptions) + self._result = None + + def retract_assumptions(self, retracted, debug=False): + self._command.retract_assumptions(retracted, debug) + self._result = None + + def print_assumptions(self): + self._command.print_assumptions() + + +class ProverCommandDecorator(TheoremToolCommandDecorator, ProverCommand): + """ + A base decorator for the ``ProverCommand`` class from which other + prover command decorators can extend. + """ + + def __init__(self, proverCommand): + """ + :param proverCommand: ``ProverCommand`` to decorate + """ + TheoremToolCommandDecorator.__init__(self, proverCommand) + + # The decorator has its own versions of 'result' and 'proof' + # because they may be different from the underlying command + self._proof = None + + def prove(self, verbose=False): + if self._result is None: + prover = self.get_prover() + self._result, self._proof = prover._prove( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def proof(self, simplify=True): + """ + Return the proof string + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call prove() first to get a proof!") + else: + return self.decorate_proof(self._proof, simplify) + + def decorate_proof(self, proof_string, simplify=True): + """ + Modify and return the proof string + :param proof_string: str the proof to decorate + :param simplify: bool simplify the proof? + :return: str + """ + return self._command.decorate_proof(proof_string, simplify) + + def get_prover(self): + return self._command.get_prover() + + +class ModelBuilderCommandDecorator(TheoremToolCommandDecorator, ModelBuilderCommand): + """ + A base decorator for the ``ModelBuilderCommand`` class from which other + prover command decorators can extend. + """ + + def __init__(self, modelBuilderCommand): + """ + :param modelBuilderCommand: ``ModelBuilderCommand`` to decorate + """ + TheoremToolCommandDecorator.__init__(self, modelBuilderCommand) + + # The decorator has its own versions of 'result' and 'valuation' + # because they may be different from the underlying command + self._model = None + + def build_model(self, verbose=False): + """ + Attempt to build a model. Store the result to prevent unnecessary + re-building. + """ + if self._result is None: + modelbuilder = self.get_model_builder() + self._result, self._model = modelbuilder._build_model( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def model(self, format=None): + """ + Return a string representation of the model + + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call build_model() first to " "get a model!") + else: + return self._decorate_model(self._model, format) + + def _decorate_model(self, valuation_str, format=None): + """ + Modify and return the proof string + :param valuation_str: str with the model builder's output + :param format: str indicating the format for displaying + :return: str + """ + return self._command._decorate_model(valuation_str, format) + + def get_model_builder(self): + return self._command.get_prover() + + +class ParallelProverBuilder(Prover, ModelBuilder): + """ + This class stores both a prover and a model builder and when either + prove() or build_model() is called, then both theorem tools are run in + parallel. Whichever finishes first, the prover or the model builder, is the + result that will be used. + """ + + def __init__(self, prover, modelbuilder): + self._prover = prover + self._modelbuilder = modelbuilder + + def _prove(self, goal=None, assumptions=None, verbose=False): + return self._run(goal, assumptions, verbose), "" + + def _build_model(self, goal=None, assumptions=None, verbose=False): + return not self._run(goal, assumptions, verbose), "" + + def _run(self, goal, assumptions, verbose): + # Set up two thread, Prover and ModelBuilder to run in parallel + tp_thread = TheoremToolThread( + lambda: self._prover.prove(goal, assumptions, verbose), verbose, "TP" + ) + mb_thread = TheoremToolThread( + lambda: self._modelbuilder.build_model(goal, assumptions, verbose), + verbose, + "MB", + ) + + tp_thread.start() + mb_thread.start() + + while tp_thread.is_alive() and mb_thread.is_alive(): + # wait until either the prover or the model builder is done + pass + + if tp_thread.result is not None: + return tp_thread.result + elif mb_thread.result is not None: + return not mb_thread.result + else: + return None + + +class ParallelProverBuilderCommand(BaseProverCommand, BaseModelBuilderCommand): + """ + This command stores both a prover and a model builder and when either + prove() or build_model() is called, then both theorem tools are run in + parallel. Whichever finishes first, the prover or the model builder, is the + result that will be used. + + Because the theorem prover result is the opposite of the model builder + result, we will treat self._result as meaning "proof found/no model found". + """ + + def __init__(self, prover, modelbuilder, goal=None, assumptions=None): + BaseProverCommand.__init__(self, prover, goal, assumptions) + BaseModelBuilderCommand.__init__(self, modelbuilder, goal, assumptions) + + def prove(self, verbose=False): + return self._run(verbose) + + def build_model(self, verbose=False): + return not self._run(verbose) + + def _run(self, verbose): + # Set up two thread, Prover and ModelBuilder to run in parallel + tp_thread = TheoremToolThread( + lambda: BaseProverCommand.prove(self, verbose), verbose, "TP" + ) + mb_thread = TheoremToolThread( + lambda: BaseModelBuilderCommand.build_model(self, verbose), verbose, "MB" + ) + + tp_thread.start() + mb_thread.start() + + while tp_thread.is_alive() and mb_thread.is_alive(): + # wait until either the prover or the model builder is done + pass + + if tp_thread.result is not None: + self._result = tp_thread.result + elif mb_thread.result is not None: + self._result = not mb_thread.result + return self._result + + +class TheoremToolThread(threading.Thread): + def __init__(self, command, verbose, name=None): + threading.Thread.__init__(self) + self._command = command + self._result = None + self._verbose = verbose + self._name = name + + def run(self): + try: + self._result = self._command() + if self._verbose: + print( + "Thread %s finished with result %s at %s" + % (self._name, self._result, time.localtime(time.time())) + ) + except Exception as e: + print(e) + print("Thread %s completed abnormally" % (self._name)) + + @property + def result(self): + return self._result diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/discourse.py b/llmeval-env/lib/python3.10/site-packages/nltk/inference/discourse.py new file mode 100644 index 0000000000000000000000000000000000000000..9630234dcf3837d9da2b4213fe26d22491899932 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/inference/discourse.py @@ -0,0 +1,651 @@ +# Natural Language Toolkit: Discourse Processing +# +# Author: Ewan Klein +# Dan Garrette +# +# URL: +# For license information, see LICENSE.TXT + +r""" +Module for incrementally developing simple discourses, and checking for semantic ambiguity, +consistency and informativeness. + +Many of the ideas are based on the CURT family of programs of Blackburn and Bos +(see http://homepages.inf.ed.ac.uk/jbos/comsem/book1.html). + +Consistency checking is carried out by using the ``mace`` module to call the Mace4 model builder. +Informativeness checking is carried out with a call to ``Prover.prove()`` from +the ``inference`` module. + +``DiscourseTester`` is a constructor for discourses. +The basic data structure is a list of sentences, stored as ``self._sentences``. Each sentence in the list +is assigned a "sentence ID" (``sid``) of the form ``s``\ *i*. For example:: + + s0: A boxer walks + s1: Every boxer chases a girl + +Each sentence can be ambiguous between a number of readings, each of which receives a +"reading ID" (``rid``) of the form ``s``\ *i* -``r``\ *j*. For example:: + + s0 readings: + + s0-r1: some x.(boxer(x) & walk(x)) + s0-r0: some x.(boxerdog(x) & walk(x)) + +A "thread" is a list of readings, represented as a list of ``rid``\ s. +Each thread receives a "thread ID" (``tid``) of the form ``d``\ *i*. +For example:: + + d0: ['s0-r0', 's1-r0'] + +The set of all threads for a discourse is the Cartesian product of all the readings of the sequences of sentences. +(This is not intended to scale beyond very short discourses!) The method ``readings(filter=True)`` will only show +those threads which are consistent (taking into account any background assumptions). +""" + +import os +from abc import ABCMeta, abstractmethod +from functools import reduce +from operator import add, and_ + +from nltk.data import show_cfg +from nltk.inference.mace import MaceCommand +from nltk.inference.prover9 import Prover9Command +from nltk.parse import load_parser +from nltk.parse.malt import MaltParser +from nltk.sem.drt import AnaphoraResolutionException, resolve_anaphora +from nltk.sem.glue import DrtGlue +from nltk.sem.logic import Expression +from nltk.tag import RegexpTagger + + +class ReadingCommand(metaclass=ABCMeta): + @abstractmethod + def parse_to_readings(self, sentence): + """ + :param sentence: the sentence to read + :type sentence: str + """ + + def process_thread(self, sentence_readings): + """ + This method should be used to handle dependencies between readings such + as resolving anaphora. + + :param sentence_readings: readings to process + :type sentence_readings: list(Expression) + :return: the list of readings after processing + :rtype: list(Expression) + """ + return sentence_readings + + @abstractmethod + def combine_readings(self, readings): + """ + :param readings: readings to combine + :type readings: list(Expression) + :return: one combined reading + :rtype: Expression + """ + + @abstractmethod + def to_fol(self, expression): + """ + Convert this expression into a First-Order Logic expression. + + :param expression: an expression + :type expression: Expression + :return: a FOL version of the input expression + :rtype: Expression + """ + + +class CfgReadingCommand(ReadingCommand): + def __init__(self, gramfile=None): + """ + :param gramfile: name of file where grammar can be loaded + :type gramfile: str + """ + self._gramfile = ( + gramfile if gramfile else "grammars/book_grammars/discourse.fcfg" + ) + self._parser = load_parser(self._gramfile) + + def parse_to_readings(self, sentence): + """:see: ReadingCommand.parse_to_readings()""" + from nltk.sem import root_semrep + + tokens = sentence.split() + trees = self._parser.parse(tokens) + return [root_semrep(tree) for tree in trees] + + def combine_readings(self, readings): + """:see: ReadingCommand.combine_readings()""" + return reduce(and_, readings) + + def to_fol(self, expression): + """:see: ReadingCommand.to_fol()""" + return expression + + +class DrtGlueReadingCommand(ReadingCommand): + def __init__(self, semtype_file=None, remove_duplicates=False, depparser=None): + """ + :param semtype_file: name of file where grammar can be loaded + :param remove_duplicates: should duplicates be removed? + :param depparser: the dependency parser + """ + if semtype_file is None: + semtype_file = os.path.join( + "grammars", "sample_grammars", "drt_glue.semtype" + ) + self._glue = DrtGlue( + semtype_file=semtype_file, + remove_duplicates=remove_duplicates, + depparser=depparser, + ) + + def parse_to_readings(self, sentence): + """:see: ReadingCommand.parse_to_readings()""" + return self._glue.parse_to_meaning(sentence) + + def process_thread(self, sentence_readings): + """:see: ReadingCommand.process_thread()""" + try: + return [self.combine_readings(sentence_readings)] + except AnaphoraResolutionException: + return [] + + def combine_readings(self, readings): + """:see: ReadingCommand.combine_readings()""" + thread_reading = reduce(add, readings) + return resolve_anaphora(thread_reading.simplify()) + + def to_fol(self, expression): + """:see: ReadingCommand.to_fol()""" + return expression.fol() + + +class DiscourseTester: + """ + Check properties of an ongoing discourse. + """ + + def __init__(self, input, reading_command=None, background=None): + """ + Initialize a ``DiscourseTester``. + + :param input: the discourse sentences + :type input: list of str + :param background: Formulas which express background assumptions + :type background: list(Expression) + """ + self._input = input + self._sentences = {"s%s" % i: sent for i, sent in enumerate(input)} + self._models = None + self._readings = {} + self._reading_command = ( + reading_command if reading_command else CfgReadingCommand() + ) + self._threads = {} + self._filtered_threads = {} + if background is not None: + from nltk.sem.logic import Expression + + for e in background: + assert isinstance(e, Expression) + self._background = background + else: + self._background = [] + + ############################### + # Sentences + ############################### + + def sentences(self): + """ + Display the list of sentences in the current discourse. + """ + for id in sorted(self._sentences): + print(f"{id}: {self._sentences[id]}") + + def add_sentence(self, sentence, informchk=False, consistchk=False): + """ + Add a sentence to the current discourse. + + Updates ``self._input`` and ``self._sentences``. + :param sentence: An input sentence + :type sentence: str + :param informchk: if ``True``, check that the result of adding the sentence is thread-informative. Updates ``self._readings``. + :param consistchk: if ``True``, check that the result of adding the sentence is thread-consistent. Updates ``self._readings``. + + """ + # check whether the new sentence is informative (i.e. not entailed by the previous discourse) + if informchk: + self.readings(verbose=False) + for tid in sorted(self._threads): + assumptions = [reading for (rid, reading) in self.expand_threads(tid)] + assumptions += self._background + for sent_reading in self._get_readings(sentence): + tp = Prover9Command(goal=sent_reading, assumptions=assumptions) + if tp.prove(): + print( + "Sentence '%s' under reading '%s':" + % (sentence, str(sent_reading)) + ) + print("Not informative relative to thread '%s'" % tid) + + self._input.append(sentence) + self._sentences = {"s%s" % i: sent for i, sent in enumerate(self._input)} + # check whether adding the new sentence to the discourse preserves consistency (i.e. a model can be found for the combined set of + # of assumptions + if consistchk: + self.readings(verbose=False) + self.models(show=False) + + def retract_sentence(self, sentence, verbose=True): + """ + Remove a sentence from the current discourse. + + Updates ``self._input``, ``self._sentences`` and ``self._readings``. + :param sentence: An input sentence + :type sentence: str + :param verbose: If ``True``, report on the updated list of sentences. + """ + try: + self._input.remove(sentence) + except ValueError: + print( + "Retraction failed. The sentence '%s' is not part of the current discourse:" + % sentence + ) + self.sentences() + return None + self._sentences = {"s%s" % i: sent for i, sent in enumerate(self._input)} + self.readings(verbose=False) + if verbose: + print("Current sentences are ") + self.sentences() + + def grammar(self): + """ + Print out the grammar in use for parsing input sentences + """ + show_cfg(self._reading_command._gramfile) + + ############################### + # Readings and Threads + ############################### + + def _get_readings(self, sentence): + """ + Build a list of semantic readings for a sentence. + + :rtype: list(Expression) + """ + return self._reading_command.parse_to_readings(sentence) + + def _construct_readings(self): + """ + Use ``self._sentences`` to construct a value for ``self._readings``. + """ + # re-initialize self._readings in case we have retracted a sentence + self._readings = {} + for sid in sorted(self._sentences): + sentence = self._sentences[sid] + readings = self._get_readings(sentence) + self._readings[sid] = { + f"{sid}-r{rid}": reading.simplify() + for rid, reading in enumerate(sorted(readings, key=str)) + } + + def _construct_threads(self): + """ + Use ``self._readings`` to construct a value for ``self._threads`` + and use the model builder to construct a value for ``self._filtered_threads`` + """ + thread_list = [[]] + for sid in sorted(self._readings): + thread_list = self.multiply(thread_list, sorted(self._readings[sid])) + self._threads = {"d%s" % tid: thread for tid, thread in enumerate(thread_list)} + # re-initialize the filtered threads + self._filtered_threads = {} + # keep the same ids, but only include threads which get models + consistency_checked = self._check_consistency(self._threads) + for (tid, thread) in self._threads.items(): + if (tid, True) in consistency_checked: + self._filtered_threads[tid] = thread + + def _show_readings(self, sentence=None): + """ + Print out the readings for the discourse (or a single sentence). + """ + if sentence is not None: + print("The sentence '%s' has these readings:" % sentence) + for r in [str(reading) for reading in (self._get_readings(sentence))]: + print(" %s" % r) + else: + for sid in sorted(self._readings): + print() + print("%s readings:" % sid) + print() #'-' * 30 + for rid in sorted(self._readings[sid]): + lf = self._readings[sid][rid] + print(f"{rid}: {lf.normalize()}") + + def _show_threads(self, filter=False, show_thread_readings=False): + """ + Print out the value of ``self._threads`` or ``self._filtered_hreads`` + """ + threads = self._filtered_threads if filter else self._threads + for tid in sorted(threads): + if show_thread_readings: + readings = [ + self._readings[rid.split("-")[0]][rid] for rid in self._threads[tid] + ] + try: + thread_reading = ( + ": %s" + % self._reading_command.combine_readings(readings).normalize() + ) + except Exception as e: + thread_reading = ": INVALID: %s" % e.__class__.__name__ + else: + thread_reading = "" + + print("%s:" % tid, self._threads[tid], thread_reading) + + def readings( + self, + sentence=None, + threaded=False, + verbose=True, + filter=False, + show_thread_readings=False, + ): + """ + Construct and show the readings of the discourse (or of a single sentence). + + :param sentence: test just this sentence + :type sentence: str + :param threaded: if ``True``, print out each thread ID and the corresponding thread. + :param filter: if ``True``, only print out consistent thread IDs and threads. + """ + self._construct_readings() + self._construct_threads() + + # if we are filtering or showing thread readings, show threads + if filter or show_thread_readings: + threaded = True + + if verbose: + if not threaded: + self._show_readings(sentence=sentence) + else: + self._show_threads( + filter=filter, show_thread_readings=show_thread_readings + ) + + def expand_threads(self, thread_id, threads=None): + """ + Given a thread ID, find the list of ``logic.Expression`` objects corresponding to the reading IDs in that thread. + + :param thread_id: thread ID + :type thread_id: str + :param threads: a mapping from thread IDs to lists of reading IDs + :type threads: dict + :return: A list of pairs ``(rid, reading)`` where reading is the ``logic.Expression`` associated with a reading ID + :rtype: list of tuple + """ + if threads is None: + threads = self._threads + return [ + (rid, self._readings[sid][rid]) + for rid in threads[thread_id] + for sid in rid.split("-")[:1] + ] + + ############################### + # Models and Background + ############################### + + def _check_consistency(self, threads, show=False, verbose=False): + results = [] + for tid in sorted(threads): + assumptions = [ + reading for (rid, reading) in self.expand_threads(tid, threads=threads) + ] + assumptions = list( + map( + self._reading_command.to_fol, + self._reading_command.process_thread(assumptions), + ) + ) + if assumptions: + assumptions += self._background + # if Mace4 finds a model, it always seems to find it quickly + mb = MaceCommand(None, assumptions, max_models=20) + modelfound = mb.build_model() + else: + modelfound = False + results.append((tid, modelfound)) + if show: + spacer(80) + print("Model for Discourse Thread %s" % tid) + spacer(80) + if verbose: + for a in assumptions: + print(a) + spacer(80) + if modelfound: + print(mb.model(format="cooked")) + else: + print("No model found!\n") + return results + + def models(self, thread_id=None, show=True, verbose=False): + """ + Call Mace4 to build a model for each current discourse thread. + + :param thread_id: thread ID + :type thread_id: str + :param show: If ``True``, display the model that has been found. + """ + self._construct_readings() + self._construct_threads() + threads = {thread_id: self._threads[thread_id]} if thread_id else self._threads + + for (tid, modelfound) in self._check_consistency( + threads, show=show, verbose=verbose + ): + idlist = [rid for rid in threads[tid]] + + if not modelfound: + print(f"Inconsistent discourse: {tid} {idlist}:") + for rid, reading in self.expand_threads(tid): + print(f" {rid}: {reading.normalize()}") + print() + else: + print(f"Consistent discourse: {tid} {idlist}:") + for rid, reading in self.expand_threads(tid): + print(f" {rid}: {reading.normalize()}") + print() + + def add_background(self, background, verbose=False): + """ + Add a list of background assumptions for reasoning about the discourse. + + When called, this method also updates the discourse model's set of readings and threads. + :param background: Formulas which contain background information + :type background: list(Expression) + """ + from nltk.sem.logic import Expression + + for (count, e) in enumerate(background): + assert isinstance(e, Expression) + if verbose: + print("Adding assumption %s to background" % count) + self._background.append(e) + + # update the state + self._construct_readings() + self._construct_threads() + + def background(self): + """ + Show the current background assumptions. + """ + for e in self._background: + print(str(e)) + + ############################### + # Misc + ############################### + + @staticmethod + def multiply(discourse, readings): + """ + Multiply every thread in ``discourse`` by every reading in ``readings``. + + Given discourse = [['A'], ['B']], readings = ['a', 'b', 'c'] , returns + [['A', 'a'], ['A', 'b'], ['A', 'c'], ['B', 'a'], ['B', 'b'], ['B', 'c']] + + :param discourse: the current list of readings + :type discourse: list of lists + :param readings: an additional list of readings + :type readings: list(Expression) + :rtype: A list of lists + """ + result = [] + for sublist in discourse: + for r in readings: + new = [] + new += sublist + new.append(r) + result.append(new) + return result + + +def load_fol(s): + """ + Temporarily duplicated from ``nltk.sem.util``. + Convert a file of first order formulas into a list of ``Expression`` objects. + + :param s: the contents of the file + :type s: str + :return: a list of parsed formulas. + :rtype: list(Expression) + """ + statements = [] + for linenum, line in enumerate(s.splitlines()): + line = line.strip() + if line.startswith("#") or line == "": + continue + try: + statements.append(Expression.fromstring(line)) + except Exception as e: + raise ValueError(f"Unable to parse line {linenum}: {line}") from e + return statements + + +############################### +# Demo +############################### +def discourse_demo(reading_command=None): + """ + Illustrate the various methods of ``DiscourseTester`` + """ + dt = DiscourseTester( + ["A boxer walks", "Every boxer chases a girl"], reading_command + ) + dt.models() + print() + # dt.grammar() + print() + dt.sentences() + print() + dt.readings() + print() + dt.readings(threaded=True) + print() + dt.models("d1") + dt.add_sentence("John is a boxer") + print() + dt.sentences() + print() + dt.readings(threaded=True) + print() + dt = DiscourseTester( + ["A student dances", "Every student is a person"], reading_command + ) + print() + dt.add_sentence("No person dances", consistchk=True) + print() + dt.readings() + print() + dt.retract_sentence("No person dances", verbose=True) + print() + dt.models() + print() + dt.readings("A person dances") + print() + dt.add_sentence("A person dances", informchk=True) + dt = DiscourseTester( + ["Vincent is a boxer", "Fido is a boxer", "Vincent is married", "Fido barks"], + reading_command, + ) + dt.readings(filter=True) + import nltk.data + + background_file = os.path.join("grammars", "book_grammars", "background.fol") + background = nltk.data.load(background_file) + + print() + dt.add_background(background, verbose=False) + dt.background() + print() + dt.readings(filter=True) + print() + dt.models() + + +def drt_discourse_demo(reading_command=None): + """ + Illustrate the various methods of ``DiscourseTester`` + """ + dt = DiscourseTester(["every dog chases a boy", "he runs"], reading_command) + dt.models() + print() + dt.sentences() + print() + dt.readings() + print() + dt.readings(show_thread_readings=True) + print() + dt.readings(filter=True, show_thread_readings=True) + + +def spacer(num=30): + print("-" * num) + + +def demo(): + discourse_demo() + + tagger = RegexpTagger( + [ + ("^(chases|runs)$", "VB"), + ("^(a)$", "ex_quant"), + ("^(every)$", "univ_quant"), + ("^(dog|boy)$", "NN"), + ("^(he)$", "PRP"), + ] + ) + depparser = MaltParser(tagger=tagger) + drt_discourse_demo( + DrtGlueReadingCommand(remove_duplicates=False, depparser=depparser) + ) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/mace.py b/llmeval-env/lib/python3.10/site-packages/nltk/inference/mace.py new file mode 100644 index 0000000000000000000000000000000000000000..ee4d9e8e38d7db34c4b58f9c37dee330d397e123 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/inference/mace.py @@ -0,0 +1,383 @@ +# Natural Language Toolkit: Interface to the Mace4 Model Builder +# +# Author: Dan Garrette +# Ewan Klein + +# URL: +# For license information, see LICENSE.TXT + +""" +A model builder that makes use of the external 'Mace4' package. +""" + +import os +import tempfile + +from nltk.inference.api import BaseModelBuilderCommand, ModelBuilder +from nltk.inference.prover9 import Prover9CommandParent, Prover9Parent +from nltk.sem import Expression, Valuation +from nltk.sem.logic import is_indvar + + +class MaceCommand(Prover9CommandParent, BaseModelBuilderCommand): + """ + A ``MaceCommand`` specific to the ``Mace`` model builder. It contains + a print_assumptions() method that is used to print the list + of assumptions in multiple formats. + """ + + _interpformat_bin = None + + def __init__(self, goal=None, assumptions=None, max_models=500, model_builder=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + :param max_models: The maximum number of models that Mace will try before + simply returning false. (Use 0 for no maximum.) + :type max_models: int + """ + if model_builder is not None: + assert isinstance(model_builder, Mace) + else: + model_builder = Mace(max_models) + + BaseModelBuilderCommand.__init__(self, model_builder, goal, assumptions) + + @property + def valuation(mbc): + return mbc.model("valuation") + + def _convert2val(self, valuation_str): + """ + Transform the output file into an NLTK-style Valuation. + + :return: A model if one is generated; None otherwise. + :rtype: sem.Valuation + """ + valuation_standard_format = self._transform_output(valuation_str, "standard") + + val = [] + for line in valuation_standard_format.splitlines(False): + l = line.strip() + + if l.startswith("interpretation"): + # find the number of entities in the model + num_entities = int(l[l.index("(") + 1 : l.index(",")].strip()) + + elif l.startswith("function") and l.find("_") == -1: + # replace the integer identifier with a corresponding alphabetic character + name = l[l.index("(") + 1 : l.index(",")].strip() + if is_indvar(name): + name = name.upper() + value = int(l[l.index("[") + 1 : l.index("]")].strip()) + val.append((name, MaceCommand._make_model_var(value))) + + elif l.startswith("relation"): + l = l[l.index("(") + 1 :] + if "(" in l: + # relation is not nullary + name = l[: l.index("(")].strip() + values = [ + int(v.strip()) + for v in l[l.index("[") + 1 : l.index("]")].split(",") + ] + val.append( + (name, MaceCommand._make_relation_set(num_entities, values)) + ) + else: + # relation is nullary + name = l[: l.index(",")].strip() + value = int(l[l.index("[") + 1 : l.index("]")].strip()) + val.append((name, value == 1)) + + return Valuation(val) + + @staticmethod + def _make_relation_set(num_entities, values): + """ + Convert a Mace4-style relation table into a dictionary. + + :param num_entities: the number of entities in the model; determines the row length in the table. + :type num_entities: int + :param values: a list of 1's and 0's that represent whether a relation holds in a Mace4 model. + :type values: list of int + """ + r = set() + for position in [pos for (pos, v) in enumerate(values) if v == 1]: + r.add( + tuple(MaceCommand._make_relation_tuple(position, values, num_entities)) + ) + return r + + @staticmethod + def _make_relation_tuple(position, values, num_entities): + if len(values) == 1: + return [] + else: + sublist_size = len(values) // num_entities + sublist_start = position // sublist_size + sublist_position = int(position % sublist_size) + + sublist = values[ + sublist_start * sublist_size : (sublist_start + 1) * sublist_size + ] + return [ + MaceCommand._make_model_var(sublist_start) + ] + MaceCommand._make_relation_tuple( + sublist_position, sublist, num_entities + ) + + @staticmethod + def _make_model_var(value): + """ + Pick an alphabetic character as identifier for an entity in the model. + + :param value: where to index into the list of characters + :type value: int + """ + letter = [ + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + ][value] + num = value // 26 + return letter + str(num) if num > 0 else letter + + def _decorate_model(self, valuation_str, format): + """ + Print out a Mace4 model using any Mace4 ``interpformat`` format. + See https://www.cs.unm.edu/~mccune/mace4/manual/ for details. + + :param valuation_str: str with the model builder's output + :param format: str indicating the format for displaying + models. Defaults to 'standard' format. + :return: str + """ + if not format: + return valuation_str + elif format == "valuation": + return self._convert2val(valuation_str) + else: + return self._transform_output(valuation_str, format) + + def _transform_output(self, valuation_str, format): + """ + Transform the output file into any Mace4 ``interpformat`` format. + + :param format: Output format for displaying models. + :type format: str + """ + if format in [ + "standard", + "standard2", + "portable", + "tabular", + "raw", + "cooked", + "xml", + "tex", + ]: + return self._call_interpformat(valuation_str, [format])[0] + else: + raise LookupError("The specified format does not exist") + + def _call_interpformat(self, input_str, args=[], verbose=False): + """ + Call the ``interpformat`` binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if self._interpformat_bin is None: + self._interpformat_bin = self._modelbuilder._find_binary( + "interpformat", verbose + ) + + return self._modelbuilder._call( + input_str, self._interpformat_bin, args, verbose + ) + + +class Mace(Prover9Parent, ModelBuilder): + _mace4_bin = None + + def __init__(self, end_size=500): + self._end_size = end_size + """The maximum model size that Mace will try before + simply returning false. (Use -1 for no maximum.)""" + + def _build_model(self, goal=None, assumptions=None, verbose=False): + """ + Use Mace4 to build a first order model. + + :return: ``True`` if a model was found (i.e. Mace returns value of 0), + else ``False`` + """ + if not assumptions: + assumptions = [] + + stdout, returncode = self._call_mace4( + self.prover9_input(goal, assumptions), verbose=verbose + ) + return (returncode == 0, stdout) + + def _call_mace4(self, input_str, args=[], verbose=False): + """ + Call the ``mace4`` binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if self._mace4_bin is None: + self._mace4_bin = self._find_binary("mace4", verbose) + + updated_input_str = "" + if self._end_size > 0: + updated_input_str += "assign(end_size, %d).\n\n" % self._end_size + updated_input_str += input_str + + return self._call(updated_input_str, self._mace4_bin, args, verbose) + + +def spacer(num=30): + print("-" * num) + + +def decode_result(found): + """ + Decode the result of model_found() + + :param found: The output of model_found() + :type found: bool + """ + return {True: "Countermodel found", False: "No countermodel found", None: "None"}[ + found + ] + + +def test_model_found(arguments): + """ + Try some proofs and exhibit the results. + """ + for (goal, assumptions) in arguments: + g = Expression.fromstring(goal) + alist = [lp.parse(a) for a in assumptions] + m = MaceCommand(g, assumptions=alist, max_models=50) + found = m.build_model() + for a in alist: + print(" %s" % a) + print(f"|- {g}: {decode_result(found)}\n") + + +def test_build_model(arguments): + """ + Try to build a ``nltk.sem.Valuation``. + """ + g = Expression.fromstring("all x.man(x)") + alist = [ + Expression.fromstring(a) + for a in [ + "man(John)", + "man(Socrates)", + "man(Bill)", + "some x.(-(x = John) & man(x) & sees(John,x))", + "some x.(-(x = Bill) & man(x))", + "all x.some y.(man(x) -> gives(Socrates,x,y))", + ] + ] + + m = MaceCommand(g, assumptions=alist) + m.build_model() + spacer() + print("Assumptions and Goal") + spacer() + for a in alist: + print(" %s" % a) + print(f"|- {g}: {decode_result(m.build_model())}\n") + spacer() + # print(m.model('standard')) + # print(m.model('cooked')) + print("Valuation") + spacer() + print(m.valuation, "\n") + + +def test_transform_output(argument_pair): + """ + Transform the model into various Mace4 ``interpformat`` formats. + """ + g = Expression.fromstring(argument_pair[0]) + alist = [lp.parse(a) for a in argument_pair[1]] + m = MaceCommand(g, assumptions=alist) + m.build_model() + for a in alist: + print(" %s" % a) + print(f"|- {g}: {m.build_model()}\n") + for format in ["standard", "portable", "xml", "cooked"]: + spacer() + print("Using '%s' format" % format) + spacer() + print(m.model(format=format)) + + +def test_make_relation_set(): + print( + MaceCommand._make_relation_set(num_entities=3, values=[1, 0, 1]) + == {("c",), ("a",)} + ) + print( + MaceCommand._make_relation_set( + num_entities=3, values=[0, 0, 0, 0, 0, 0, 1, 0, 0] + ) + == {("c", "a")} + ) + print( + MaceCommand._make_relation_set(num_entities=2, values=[0, 0, 1, 0, 0, 0, 1, 0]) + == {("a", "b", "a"), ("b", "b", "a")} + ) + + +arguments = [ + ("mortal(Socrates)", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]), + ("(not mortal(Socrates))", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]), +] + + +def demo(): + test_model_found(arguments) + test_build_model(arguments) + test_transform_output(arguments[1]) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/nonmonotonic.py b/llmeval-env/lib/python3.10/site-packages/nltk/inference/nonmonotonic.py new file mode 100644 index 0000000000000000000000000000000000000000..2f7075ed11e7833201ad98c6fc80406d1ef646db --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/inference/nonmonotonic.py @@ -0,0 +1,561 @@ +# Natural Language Toolkit: Nonmonotonic Reasoning +# +# Author: Daniel H. Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +A module to perform nonmonotonic reasoning. The ideas and demonstrations in +this module are based on "Logical Foundations of Artificial Intelligence" by +Michael R. Genesereth and Nils J. Nilsson. +""" + +from collections import defaultdict +from functools import reduce + +from nltk.inference.api import Prover, ProverCommandDecorator +from nltk.inference.prover9 import Prover9, Prover9Command +from nltk.sem.logic import ( + AbstractVariableExpression, + AllExpression, + AndExpression, + ApplicationExpression, + BooleanExpression, + EqualityExpression, + ExistsExpression, + Expression, + ImpExpression, + NegatedExpression, + Variable, + VariableExpression, + operator, + unique_variable, +) + + +class ProverParseError(Exception): + pass + + +def get_domain(goal, assumptions): + if goal is None: + all_expressions = assumptions + else: + all_expressions = assumptions + [-goal] + return reduce(operator.or_, (a.constants() for a in all_expressions), set()) + + +class ClosedDomainProver(ProverCommandDecorator): + """ + This is a prover decorator that adds domain closure assumptions before + proving. + """ + + def assumptions(self): + assumptions = [a for a in self._command.assumptions()] + goal = self._command.goal() + domain = get_domain(goal, assumptions) + return [self.replace_quants(ex, domain) for ex in assumptions] + + def goal(self): + goal = self._command.goal() + domain = get_domain(goal, self._command.assumptions()) + return self.replace_quants(goal, domain) + + def replace_quants(self, ex, domain): + """ + Apply the closed domain assumption to the expression + + - Domain = union([e.free()|e.constants() for e in all_expressions]) + - translate "exists x.P" to "(z=d1 | z=d2 | ... ) & P.replace(x,z)" OR + "P.replace(x, d1) | P.replace(x, d2) | ..." + - translate "all x.P" to "P.replace(x, d1) & P.replace(x, d2) & ..." + + :param ex: ``Expression`` + :param domain: set of {Variable}s + :return: ``Expression`` + """ + if isinstance(ex, AllExpression): + conjuncts = [ + ex.term.replace(ex.variable, VariableExpression(d)) for d in domain + ] + conjuncts = [self.replace_quants(c, domain) for c in conjuncts] + return reduce(lambda x, y: x & y, conjuncts) + elif isinstance(ex, BooleanExpression): + return ex.__class__( + self.replace_quants(ex.first, domain), + self.replace_quants(ex.second, domain), + ) + elif isinstance(ex, NegatedExpression): + return -self.replace_quants(ex.term, domain) + elif isinstance(ex, ExistsExpression): + disjuncts = [ + ex.term.replace(ex.variable, VariableExpression(d)) for d in domain + ] + disjuncts = [self.replace_quants(d, domain) for d in disjuncts] + return reduce(lambda x, y: x | y, disjuncts) + else: + return ex + + +class UniqueNamesProver(ProverCommandDecorator): + """ + This is a prover decorator that adds unique names assumptions before + proving. + """ + + def assumptions(self): + """ + - Domain = union([e.free()|e.constants() for e in all_expressions]) + - if "d1 = d2" cannot be proven from the premises, then add "d1 != d2" + """ + assumptions = self._command.assumptions() + + domain = list(get_domain(self._command.goal(), assumptions)) + + # build a dictionary of obvious equalities + eq_sets = SetHolder() + for a in assumptions: + if isinstance(a, EqualityExpression): + av = a.first.variable + bv = a.second.variable + # put 'a' and 'b' in the same set + eq_sets[av].add(bv) + + new_assumptions = [] + for i, a in enumerate(domain): + for b in domain[i + 1 :]: + # if a and b are not already in the same equality set + if b not in eq_sets[a]: + newEqEx = EqualityExpression( + VariableExpression(a), VariableExpression(b) + ) + if Prover9().prove(newEqEx, assumptions): + # we can prove that the names are the same entity. + # remember that they are equal so we don't re-check. + eq_sets[a].add(b) + else: + # we can't prove it, so assume unique names + new_assumptions.append(-newEqEx) + + return assumptions + new_assumptions + + +class SetHolder(list): + """ + A list of sets of Variables. + """ + + def __getitem__(self, item): + """ + :param item: ``Variable`` + :return: the set containing 'item' + """ + assert isinstance(item, Variable) + for s in self: + if item in s: + return s + # item is not found in any existing set. so create a new set + new = {item} + self.append(new) + return new + + +class ClosedWorldProver(ProverCommandDecorator): + """ + This is a prover decorator that completes predicates before proving. + + If the assumptions contain "P(A)", then "all x.(P(x) -> (x=A))" is the completion of "P". + If the assumptions contain "all x.(ostrich(x) -> bird(x))", then "all x.(bird(x) -> ostrich(x))" is the completion of "bird". + If the assumptions don't contain anything that are "P", then "all x.-P(x)" is the completion of "P". + + walk(Socrates) + Socrates != Bill + + all x.(walk(x) -> (x=Socrates)) + ---------------- + -walk(Bill) + + see(Socrates, John) + see(John, Mary) + Socrates != John + John != Mary + + all x.all y.(see(x,y) -> ((x=Socrates & y=John) | (x=John & y=Mary))) + ---------------- + -see(Socrates, Mary) + + all x.(ostrich(x) -> bird(x)) + bird(Tweety) + -ostrich(Sam) + Sam != Tweety + + all x.(bird(x) -> (ostrich(x) | x=Tweety)) + + all x.-ostrich(x) + ------------------- + -bird(Sam) + """ + + def assumptions(self): + assumptions = self._command.assumptions() + + predicates = self._make_predicate_dict(assumptions) + + new_assumptions = [] + for p in predicates: + predHolder = predicates[p] + new_sig = self._make_unique_signature(predHolder) + new_sig_exs = [VariableExpression(v) for v in new_sig] + + disjuncts = [] + + # Turn the signatures into disjuncts + for sig in predHolder.signatures: + equality_exs = [] + for v1, v2 in zip(new_sig_exs, sig): + equality_exs.append(EqualityExpression(v1, v2)) + disjuncts.append(reduce(lambda x, y: x & y, equality_exs)) + + # Turn the properties into disjuncts + for prop in predHolder.properties: + # replace variables from the signature with new sig variables + bindings = {} + for v1, v2 in zip(new_sig_exs, prop[0]): + bindings[v2] = v1 + disjuncts.append(prop[1].substitute_bindings(bindings)) + + # make the assumption + if disjuncts: + # disjuncts exist, so make an implication + antecedent = self._make_antecedent(p, new_sig) + consequent = reduce(lambda x, y: x | y, disjuncts) + accum = ImpExpression(antecedent, consequent) + else: + # nothing has property 'p' + accum = NegatedExpression(self._make_antecedent(p, new_sig)) + + # quantify the implication + for new_sig_var in new_sig[::-1]: + accum = AllExpression(new_sig_var, accum) + new_assumptions.append(accum) + + return assumptions + new_assumptions + + def _make_unique_signature(self, predHolder): + """ + This method figures out how many arguments the predicate takes and + returns a tuple containing that number of unique variables. + """ + return tuple(unique_variable() for i in range(predHolder.signature_len)) + + def _make_antecedent(self, predicate, signature): + """ + Return an application expression with 'predicate' as the predicate + and 'signature' as the list of arguments. + """ + antecedent = predicate + for v in signature: + antecedent = antecedent(VariableExpression(v)) + return antecedent + + def _make_predicate_dict(self, assumptions): + """ + Create a dictionary of predicates from the assumptions. + + :param assumptions: a list of ``Expression``s + :return: dict mapping ``AbstractVariableExpression`` to ``PredHolder`` + """ + predicates = defaultdict(PredHolder) + for a in assumptions: + self._map_predicates(a, predicates) + return predicates + + def _map_predicates(self, expression, predDict): + if isinstance(expression, ApplicationExpression): + func, args = expression.uncurry() + if isinstance(func, AbstractVariableExpression): + predDict[func].append_sig(tuple(args)) + elif isinstance(expression, AndExpression): + self._map_predicates(expression.first, predDict) + self._map_predicates(expression.second, predDict) + elif isinstance(expression, AllExpression): + # collect all the universally quantified variables + sig = [expression.variable] + term = expression.term + while isinstance(term, AllExpression): + sig.append(term.variable) + term = term.term + if isinstance(term, ImpExpression): + if isinstance(term.first, ApplicationExpression) and isinstance( + term.second, ApplicationExpression + ): + func1, args1 = term.first.uncurry() + func2, args2 = term.second.uncurry() + if ( + isinstance(func1, AbstractVariableExpression) + and isinstance(func2, AbstractVariableExpression) + and sig == [v.variable for v in args1] + and sig == [v.variable for v in args2] + ): + predDict[func2].append_prop((tuple(sig), term.first)) + predDict[func1].validate_sig_len(sig) + + +class PredHolder: + """ + This class will be used by a dictionary that will store information + about predicates to be used by the ``ClosedWorldProver``. + + The 'signatures' property is a list of tuples defining signatures for + which the predicate is true. For instance, 'see(john, mary)' would be + result in the signature '(john,mary)' for 'see'. + + The second element of the pair is a list of pairs such that the first + element of the pair is a tuple of variables and the second element is an + expression of those variables that makes the predicate true. For instance, + 'all x.all y.(see(x,y) -> know(x,y))' would result in "((x,y),('see(x,y)'))" + for 'know'. + """ + + def __init__(self): + self.signatures = [] + self.properties = [] + self.signature_len = None + + def append_sig(self, new_sig): + self.validate_sig_len(new_sig) + self.signatures.append(new_sig) + + def append_prop(self, new_prop): + self.validate_sig_len(new_prop[0]) + self.properties.append(new_prop) + + def validate_sig_len(self, new_sig): + if self.signature_len is None: + self.signature_len = len(new_sig) + elif self.signature_len != len(new_sig): + raise Exception("Signature lengths do not match") + + def __str__(self): + return f"({self.signatures},{self.properties},{self.signature_len})" + + def __repr__(self): + return "%s" % self + + +def closed_domain_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"exists x.walk(x)") + p2 = lexpr(r"man(Socrates)") + c = lexpr(r"walk(Socrates)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"exists x.walk(x)") + p2 = lexpr(r"man(Socrates)") + p3 = lexpr(r"-walk(Bill)") + c = lexpr(r"walk(Socrates)") + prover = Prover9Command(c, [p1, p2, p3]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"exists x.walk(x)") + p2 = lexpr(r"man(Socrates)") + p3 = lexpr(r"-walk(Bill)") + c = lexpr(r"walk(Socrates)") + prover = Prover9Command(c, [p1, p2, p3]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"walk(Socrates)") + p2 = lexpr(r"walk(Bill)") + c = lexpr(r"all x.walk(x)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"girl(mary)") + p2 = lexpr(r"dog(rover)") + p3 = lexpr(r"all x.(girl(x) -> -dog(x))") + p4 = lexpr(r"all x.(dog(x) -> -girl(x))") + p5 = lexpr(r"chase(mary, rover)") + c = lexpr(r"exists y.(dog(y) & all x.(girl(x) -> chase(x,y)))") + prover = Prover9Command(c, [p1, p2, p3, p4, p5]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + +def unique_names_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"man(Socrates)") + p2 = lexpr(r"man(Bill)") + c = lexpr(r"exists x.exists y.(x != y)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + unp = UniqueNamesProver(prover) + print("assumptions:") + for a in unp.assumptions(): + print(" ", a) + print("goal:", unp.goal()) + print(unp.prove()) + + p1 = lexpr(r"all x.(walk(x) -> (x = Socrates))") + p2 = lexpr(r"Bill = William") + p3 = lexpr(r"Bill = Billy") + c = lexpr(r"-walk(William)") + prover = Prover9Command(c, [p1, p2, p3]) + print(prover.prove()) + unp = UniqueNamesProver(prover) + print("assumptions:") + for a in unp.assumptions(): + print(" ", a) + print("goal:", unp.goal()) + print(unp.prove()) + + +def closed_world_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"walk(Socrates)") + p2 = lexpr(r"(Socrates != Bill)") + c = lexpr(r"-walk(Bill)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + cwp = ClosedWorldProver(prover) + print("assumptions:") + for a in cwp.assumptions(): + print(" ", a) + print("goal:", cwp.goal()) + print(cwp.prove()) + + p1 = lexpr(r"see(Socrates, John)") + p2 = lexpr(r"see(John, Mary)") + p3 = lexpr(r"(Socrates != John)") + p4 = lexpr(r"(John != Mary)") + c = lexpr(r"-see(Socrates, Mary)") + prover = Prover9Command(c, [p1, p2, p3, p4]) + print(prover.prove()) + cwp = ClosedWorldProver(prover) + print("assumptions:") + for a in cwp.assumptions(): + print(" ", a) + print("goal:", cwp.goal()) + print(cwp.prove()) + + p1 = lexpr(r"all x.(ostrich(x) -> bird(x))") + p2 = lexpr(r"bird(Tweety)") + p3 = lexpr(r"-ostrich(Sam)") + p4 = lexpr(r"Sam != Tweety") + c = lexpr(r"-bird(Sam)") + prover = Prover9Command(c, [p1, p2, p3, p4]) + print(prover.prove()) + cwp = ClosedWorldProver(prover) + print("assumptions:") + for a in cwp.assumptions(): + print(" ", a) + print("goal:", cwp.goal()) + print(cwp.prove()) + + +def combination_prover_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"see(Socrates, John)") + p2 = lexpr(r"see(John, Mary)") + c = lexpr(r"-see(Socrates, Mary)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + command = ClosedDomainProver(UniqueNamesProver(ClosedWorldProver(prover))) + for a in command.assumptions(): + print(a) + print(command.prove()) + + +def default_reasoning_demo(): + lexpr = Expression.fromstring + + premises = [] + + # define taxonomy + premises.append(lexpr(r"all x.(elephant(x) -> animal(x))")) + premises.append(lexpr(r"all x.(bird(x) -> animal(x))")) + premises.append(lexpr(r"all x.(dove(x) -> bird(x))")) + premises.append(lexpr(r"all x.(ostrich(x) -> bird(x))")) + premises.append(lexpr(r"all x.(flying_ostrich(x) -> ostrich(x))")) + + # default properties + premises.append( + lexpr(r"all x.((animal(x) & -Ab1(x)) -> -fly(x))") + ) # normal animals don't fly + premises.append( + lexpr(r"all x.((bird(x) & -Ab2(x)) -> fly(x))") + ) # normal birds fly + premises.append( + lexpr(r"all x.((ostrich(x) & -Ab3(x)) -> -fly(x))") + ) # normal ostriches don't fly + + # specify abnormal entities + premises.append(lexpr(r"all x.(bird(x) -> Ab1(x))")) # flight + premises.append(lexpr(r"all x.(ostrich(x) -> Ab2(x))")) # non-flying bird + premises.append(lexpr(r"all x.(flying_ostrich(x) -> Ab3(x))")) # flying ostrich + + # define entities + premises.append(lexpr(r"elephant(E)")) + premises.append(lexpr(r"dove(D)")) + premises.append(lexpr(r"ostrich(O)")) + + # print the assumptions + prover = Prover9Command(None, premises) + command = UniqueNamesProver(ClosedWorldProver(prover)) + for a in command.assumptions(): + print(a) + + print_proof("-fly(E)", premises) + print_proof("fly(D)", premises) + print_proof("-fly(O)", premises) + + +def print_proof(goal, premises): + lexpr = Expression.fromstring + prover = Prover9Command(lexpr(goal), premises) + command = UniqueNamesProver(ClosedWorldProver(prover)) + print(goal, prover.prove(), command.prove()) + + +def demo(): + closed_domain_demo() + unique_names_demo() + closed_world_demo() + combination_prover_demo() + default_reasoning_demo() + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/prover9.py b/llmeval-env/lib/python3.10/site-packages/nltk/inference/prover9.py new file mode 100644 index 0000000000000000000000000000000000000000..73345f27473f011a7628c91834606f6e1f532044 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/inference/prover9.py @@ -0,0 +1,508 @@ +# Natural Language Toolkit: Interface to the Prover9 Theorem Prover +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Dan Garrette +# Ewan Klein +# +# URL: +# For license information, see LICENSE.TXT +""" +A theorem prover that makes use of the external 'Prover9' package. +""" + +import os +import subprocess + +import nltk +from nltk.inference.api import BaseProverCommand, Prover +from nltk.sem.logic import ( + AllExpression, + AndExpression, + EqualityExpression, + ExistsExpression, + Expression, + IffExpression, + ImpExpression, + NegatedExpression, + OrExpression, +) + +# +# Following is not yet used. Return code for 2 actually realized as 512. +# +p9_return_codes = { + 0: True, + 1: "(FATAL)", # A fatal error occurred (user's syntax error). + 2: False, # (SOS_EMPTY) Prover9 ran out of things to do + # (sos list exhausted). + 3: "(MAX_MEGS)", # The max_megs (memory limit) parameter was exceeded. + 4: "(MAX_SECONDS)", # The max_seconds parameter was exceeded. + 5: "(MAX_GIVEN)", # The max_given parameter was exceeded. + 6: "(MAX_KEPT)", # The max_kept parameter was exceeded. + 7: "(ACTION)", # A Prover9 action terminated the search. + 101: "(SIGSEGV)", # Prover9 crashed, most probably due to a bug. +} + + +class Prover9CommandParent: + """ + A common base class used by both ``Prover9Command`` and ``MaceCommand``, + which is responsible for maintaining a goal and a set of assumptions, + and generating prover9-style input files from them. + """ + + def print_assumptions(self, output_format="nltk"): + """ + Print the list of the current assumptions. + """ + if output_format.lower() == "nltk": + for a in self.assumptions(): + print(a) + elif output_format.lower() == "prover9": + for a in convert_to_prover9(self.assumptions()): + print(a) + else: + raise NameError( + "Unrecognized value for 'output_format': %s" % output_format + ) + + +class Prover9Command(Prover9CommandParent, BaseProverCommand): + """ + A ``ProverCommand`` specific to the ``Prover9`` prover. It contains + the a print_assumptions() method that is used to print the list + of assumptions in multiple formats. + """ + + def __init__(self, goal=None, assumptions=None, timeout=60, prover=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + :param timeout: number of seconds before timeout; set to 0 for + no timeout. + :type timeout: int + :param prover: a prover. If not set, one will be created. + :type prover: Prover9 + """ + if not assumptions: + assumptions = [] + + if prover is not None: + assert isinstance(prover, Prover9) + else: + prover = Prover9(timeout) + + BaseProverCommand.__init__(self, prover, goal, assumptions) + + def decorate_proof(self, proof_string, simplify=True): + """ + :see BaseProverCommand.decorate_proof() + """ + if simplify: + return self._prover._call_prooftrans(proof_string, ["striplabels"])[ + 0 + ].rstrip() + else: + return proof_string.rstrip() + + +class Prover9Parent: + """ + A common class extended by both ``Prover9`` and ``Mace ``. + It contains the functionality required to convert NLTK-style + expressions into Prover9-style expressions. + """ + + _binary_location = None + + def config_prover9(self, binary_location, verbose=False): + if binary_location is None: + self._binary_location = None + self._prover9_bin = None + else: + name = "prover9" + self._prover9_bin = nltk.internals.find_binary( + name, + path_to_bin=binary_location, + env_vars=["PROVER9"], + url="https://www.cs.unm.edu/~mccune/prover9/", + binary_names=[name, name + ".exe"], + verbose=verbose, + ) + self._binary_location = self._prover9_bin.rsplit(os.path.sep, 1) + + def prover9_input(self, goal, assumptions): + """ + :return: The input string that should be provided to the + prover9 binary. This string is formed based on the goal, + assumptions, and timeout value of this object. + """ + s = "" + + if assumptions: + s += "formulas(assumptions).\n" + for p9_assumption in convert_to_prover9(assumptions): + s += " %s.\n" % p9_assumption + s += "end_of_list.\n\n" + + if goal: + s += "formulas(goals).\n" + s += " %s.\n" % convert_to_prover9(goal) + s += "end_of_list.\n\n" + + return s + + def binary_locations(self): + """ + A list of directories that should be searched for the prover9 + executables. This list is used by ``config_prover9`` when searching + for the prover9 executables. + """ + return [ + "/usr/local/bin/prover9", + "/usr/local/bin/prover9/bin", + "/usr/local/bin", + "/usr/bin", + "/usr/local/prover9", + "/usr/local/share/prover9", + ] + + def _find_binary(self, name, verbose=False): + binary_locations = self.binary_locations() + if self._binary_location is not None: + binary_locations += [self._binary_location] + return nltk.internals.find_binary( + name, + searchpath=binary_locations, + env_vars=["PROVER9"], + url="https://www.cs.unm.edu/~mccune/prover9/", + binary_names=[name, name + ".exe"], + verbose=verbose, + ) + + def _call(self, input_str, binary, args=[], verbose=False): + """ + Call the binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param binary: The location of the binary to call + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if verbose: + print("Calling:", binary) + print("Args:", args) + print("Input:\n", input_str, "\n") + + # Call prover9 via a subprocess + cmd = [binary] + args + try: + input_str = input_str.encode("utf8") + except AttributeError: + pass + p = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE + ) + (stdout, stderr) = p.communicate(input=input_str) + + if verbose: + print("Return code:", p.returncode) + if stdout: + print("stdout:\n", stdout, "\n") + if stderr: + print("stderr:\n", stderr, "\n") + + return (stdout.decode("utf-8"), p.returncode) + + +def convert_to_prover9(input): + """ + Convert a ``logic.Expression`` to Prover9 format. + """ + if isinstance(input, list): + result = [] + for s in input: + try: + result.append(_convert_to_prover9(s.simplify())) + except: + print("input %s cannot be converted to Prover9 input syntax" % input) + raise + return result + else: + try: + return _convert_to_prover9(input.simplify()) + except: + print("input %s cannot be converted to Prover9 input syntax" % input) + raise + + +def _convert_to_prover9(expression): + """ + Convert ``logic.Expression`` to Prover9 formatted string. + """ + if isinstance(expression, ExistsExpression): + return ( + "exists " + + str(expression.variable) + + " " + + _convert_to_prover9(expression.term) + ) + elif isinstance(expression, AllExpression): + return ( + "all " + + str(expression.variable) + + " " + + _convert_to_prover9(expression.term) + ) + elif isinstance(expression, NegatedExpression): + return "-(" + _convert_to_prover9(expression.term) + ")" + elif isinstance(expression, AndExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " & " + + _convert_to_prover9(expression.second) + + ")" + ) + elif isinstance(expression, OrExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " | " + + _convert_to_prover9(expression.second) + + ")" + ) + elif isinstance(expression, ImpExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " -> " + + _convert_to_prover9(expression.second) + + ")" + ) + elif isinstance(expression, IffExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " <-> " + + _convert_to_prover9(expression.second) + + ")" + ) + elif isinstance(expression, EqualityExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " = " + + _convert_to_prover9(expression.second) + + ")" + ) + else: + return str(expression) + + +class Prover9(Prover9Parent, Prover): + _prover9_bin = None + _prooftrans_bin = None + + def __init__(self, timeout=60): + self._timeout = timeout + """The timeout value for prover9. If a proof can not be found + in this amount of time, then prover9 will return false. + (Use 0 for no timeout.)""" + + def _prove(self, goal=None, assumptions=None, verbose=False): + """ + Use Prover9 to prove a theorem. + :return: A pair whose first element is a boolean indicating if the + proof was successful (i.e. returns value of 0) and whose second element + is the output of the prover. + """ + if not assumptions: + assumptions = [] + + stdout, returncode = self._call_prover9( + self.prover9_input(goal, assumptions), verbose=verbose + ) + return (returncode == 0, stdout) + + def prover9_input(self, goal, assumptions): + """ + :see: Prover9Parent.prover9_input + """ + s = "clear(auto_denials).\n" # only one proof required + return s + Prover9Parent.prover9_input(self, goal, assumptions) + + def _call_prover9(self, input_str, args=[], verbose=False): + """ + Call the ``prover9`` binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if self._prover9_bin is None: + self._prover9_bin = self._find_binary("prover9", verbose) + + updated_input_str = "" + if self._timeout > 0: + updated_input_str += "assign(max_seconds, %d).\n\n" % self._timeout + updated_input_str += input_str + + stdout, returncode = self._call( + updated_input_str, self._prover9_bin, args, verbose + ) + + if returncode not in [0, 2]: + errormsgprefix = "%%ERROR:" + if errormsgprefix in stdout: + msgstart = stdout.index(errormsgprefix) + errormsg = stdout[msgstart:].strip() + else: + errormsg = None + if returncode in [3, 4, 5, 6]: + raise Prover9LimitExceededException(returncode, errormsg) + else: + raise Prover9FatalException(returncode, errormsg) + + return stdout, returncode + + def _call_prooftrans(self, input_str, args=[], verbose=False): + """ + Call the ``prooftrans`` binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if self._prooftrans_bin is None: + self._prooftrans_bin = self._find_binary("prooftrans", verbose) + + return self._call(input_str, self._prooftrans_bin, args, verbose) + + +class Prover9Exception(Exception): + def __init__(self, returncode, message): + msg = p9_return_codes[returncode] + if message: + msg += "\n%s" % message + Exception.__init__(self, msg) + + +class Prover9FatalException(Prover9Exception): + pass + + +class Prover9LimitExceededException(Prover9Exception): + pass + + +###################################################################### +# { Tests and Demos +###################################################################### + + +def test_config(): + + a = Expression.fromstring("(walk(j) & sing(j))") + g = Expression.fromstring("walk(j)") + p = Prover9Command(g, assumptions=[a]) + p._executable_path = None + p.prover9_search = [] + p.prove() + # config_prover9('/usr/local/bin') + print(p.prove()) + print(p.proof()) + + +def test_convert_to_prover9(expr): + """ + Test that parsing works OK. + """ + for t in expr: + e = Expression.fromstring(t) + print(convert_to_prover9(e)) + + +def test_prove(arguments): + """ + Try some proofs and exhibit the results. + """ + for (goal, assumptions) in arguments: + g = Expression.fromstring(goal) + alist = [Expression.fromstring(a) for a in assumptions] + p = Prover9Command(g, assumptions=alist).prove() + for a in alist: + print(" %s" % a) + print(f"|- {g}: {p}\n") + + +arguments = [ + ("(man(x) <-> (not (not man(x))))", []), + ("(not (man(x) & (not man(x))))", []), + ("(man(x) | (not man(x)))", []), + ("(man(x) & (not man(x)))", []), + ("(man(x) -> man(x))", []), + ("(not (man(x) & (not man(x))))", []), + ("(man(x) | (not man(x)))", []), + ("(man(x) -> man(x))", []), + ("(man(x) <-> man(x))", []), + ("(not (man(x) <-> (not man(x))))", []), + ("mortal(Socrates)", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]), + ("((all x.(man(x) -> walks(x)) & man(Socrates)) -> some y.walks(y))", []), + ("(all x.man(x) -> all x.man(x))", []), + ("some x.all y.sees(x,y)", []), + ( + "some e3.(walk(e3) & subj(e3, mary))", + [ + "some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))" + ], + ), + ( + "some x e1.(see(e1) & subj(e1, x) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))", + [ + "some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))" + ], + ), +] + +expressions = [ + r"some x y.sees(x,y)", + r"some x.(man(x) & walks(x))", + r"\x.(man(x) & walks(x))", + r"\x y.sees(x,y)", + r"walks(john)", + r"\x.big(x, \y.mouse(y))", + r"(walks(x) & (runs(x) & (threes(x) & fours(x))))", + r"(walks(x) -> runs(x))", + r"some x.(PRO(x) & sees(John, x))", + r"some x.(man(x) & (not walks(x)))", + r"all x.(man(x) -> walks(x))", +] + + +def spacer(num=45): + print("-" * num) + + +def demo(): + print("Testing configuration") + spacer() + test_config() + print() + print("Testing conversion to Prover9 format") + spacer() + test_convert_to_prover9(expressions) + print() + print("Testing proofs") + spacer() + test_prove(arguments) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/resolution.py b/llmeval-env/lib/python3.10/site-packages/nltk/inference/resolution.py new file mode 100644 index 0000000000000000000000000000000000000000..52428eb2c5d2bee410716b058165cbccbbb238a4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/inference/resolution.py @@ -0,0 +1,759 @@ +# Natural Language Toolkit: First-order Resolution-based Theorem Prover +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +Module for a resolution-based First Order theorem prover. +""" + +import operator +from collections import defaultdict +from functools import reduce + +from nltk.inference.api import BaseProverCommand, Prover +from nltk.sem import skolemize +from nltk.sem.logic import ( + AndExpression, + ApplicationExpression, + EqualityExpression, + Expression, + IndividualVariableExpression, + NegatedExpression, + OrExpression, + Variable, + VariableExpression, + is_indvar, + unique_variable, +) + + +class ProverParseError(Exception): + pass + + +class ResolutionProver(Prover): + ANSWER_KEY = "ANSWER" + _assume_false = True + + def _prove(self, goal=None, assumptions=None, verbose=False): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in the proof + :type assumptions: list(sem.Expression) + """ + if not assumptions: + assumptions = [] + + result = None + try: + clauses = [] + if goal: + clauses.extend(clausify(-goal)) + for a in assumptions: + clauses.extend(clausify(a)) + result, clauses = self._attempt_proof(clauses) + if verbose: + print(ResolutionProverCommand._decorate_clauses(clauses)) + except RuntimeError as e: + if self._assume_false and str(e).startswith( + "maximum recursion depth exceeded" + ): + result = False + clauses = [] + else: + if verbose: + print(e) + else: + raise e + return (result, clauses) + + def _attempt_proof(self, clauses): + # map indices to lists of indices, to store attempted unifications + tried = defaultdict(list) + + i = 0 + while i < len(clauses): + if not clauses[i].is_tautology(): + # since we try clauses in order, we should start after the last + # index tried + if tried[i]: + j = tried[i][-1] + 1 + else: + j = i + 1 # nothing tried yet for 'i', so start with the next + + while j < len(clauses): + # don't: 1) unify a clause with itself, + # 2) use tautologies + if i != j and j and not clauses[j].is_tautology(): + tried[i].append(j) + newclauses = clauses[i].unify(clauses[j]) + if newclauses: + for newclause in newclauses: + newclause._parents = (i + 1, j + 1) + clauses.append(newclause) + if not len(newclause): # if there's an empty clause + return (True, clauses) + i = -1 # since we added a new clause, restart from the top + break + j += 1 + i += 1 + return (False, clauses) + + +class ResolutionProverCommand(BaseProverCommand): + def __init__(self, goal=None, assumptions=None, prover=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + """ + if prover is not None: + assert isinstance(prover, ResolutionProver) + else: + prover = ResolutionProver() + + BaseProverCommand.__init__(self, prover, goal, assumptions) + self._clauses = None + + def prove(self, verbose=False): + """ + Perform the actual proof. Store the result to prevent unnecessary + re-proving. + """ + if self._result is None: + self._result, clauses = self._prover._prove( + self.goal(), self.assumptions(), verbose + ) + self._clauses = clauses + self._proof = ResolutionProverCommand._decorate_clauses(clauses) + return self._result + + def find_answers(self, verbose=False): + self.prove(verbose) + + answers = set() + answer_ex = VariableExpression(Variable(ResolutionProver.ANSWER_KEY)) + for clause in self._clauses: + for term in clause: + if ( + isinstance(term, ApplicationExpression) + and term.function == answer_ex + and not isinstance(term.argument, IndividualVariableExpression) + ): + answers.add(term.argument) + return answers + + @staticmethod + def _decorate_clauses(clauses): + """ + Decorate the proof output. + """ + out = "" + max_clause_len = max(len(str(clause)) for clause in clauses) + max_seq_len = len(str(len(clauses))) + for i in range(len(clauses)): + parents = "A" + taut = "" + if clauses[i].is_tautology(): + taut = "Tautology" + if clauses[i]._parents: + parents = str(clauses[i]._parents) + parents = " " * (max_clause_len - len(str(clauses[i])) + 1) + parents + seq = " " * (max_seq_len - len(str(i + 1))) + str(i + 1) + out += f"[{seq}] {clauses[i]} {parents} {taut}\n" + return out + + +class Clause(list): + def __init__(self, data): + list.__init__(self, data) + self._is_tautology = None + self._parents = None + + def unify(self, other, bindings=None, used=None, skipped=None, debug=False): + """ + Attempt to unify this Clause with the other, returning a list of + resulting, unified, Clauses. + + :param other: ``Clause`` with which to unify + :param bindings: ``BindingDict`` containing bindings that should be used + during the unification + :param used: tuple of two lists of atoms. The first lists the + atoms from 'self' that were successfully unified with atoms from + 'other'. The second lists the atoms from 'other' that were successfully + unified with atoms from 'self'. + :param skipped: tuple of two ``Clause`` objects. The first is a list of all + the atoms from the 'self' Clause that have not been unified with + anything on the path. The second is same thing for the 'other' Clause. + :param debug: bool indicating whether debug statements should print + :return: list containing all the resulting ``Clause`` objects that could be + obtained by unification + """ + if bindings is None: + bindings = BindingDict() + if used is None: + used = ([], []) + if skipped is None: + skipped = ([], []) + if isinstance(debug, bool): + debug = DebugObject(debug) + + newclauses = _iterate_first( + self, other, bindings, used, skipped, _complete_unify_path, debug + ) + + # remove subsumed clauses. make a list of all indices of subsumed + # clauses, and then remove them from the list + subsumed = [] + for i, c1 in enumerate(newclauses): + if i not in subsumed: + for j, c2 in enumerate(newclauses): + if i != j and j not in subsumed and c1.subsumes(c2): + subsumed.append(j) + result = [] + for i in range(len(newclauses)): + if i not in subsumed: + result.append(newclauses[i]) + + return result + + def isSubsetOf(self, other): + """ + Return True iff every term in 'self' is a term in 'other'. + + :param other: ``Clause`` + :return: bool + """ + for a in self: + if a not in other: + return False + return True + + def subsumes(self, other): + """ + Return True iff 'self' subsumes 'other', this is, if there is a + substitution such that every term in 'self' can be unified with a term + in 'other'. + + :param other: ``Clause`` + :return: bool + """ + negatedother = [] + for atom in other: + if isinstance(atom, NegatedExpression): + negatedother.append(atom.term) + else: + negatedother.append(-atom) + + negatedotherClause = Clause(negatedother) + + bindings = BindingDict() + used = ([], []) + skipped = ([], []) + debug = DebugObject(False) + + return ( + len( + _iterate_first( + self, + negatedotherClause, + bindings, + used, + skipped, + _subsumes_finalize, + debug, + ) + ) + > 0 + ) + + def __getslice__(self, start, end): + return Clause(list.__getslice__(self, start, end)) + + def __sub__(self, other): + return Clause([a for a in self if a not in other]) + + def __add__(self, other): + return Clause(list.__add__(self, other)) + + def is_tautology(self): + """ + Self is a tautology if it contains ground terms P and -P. The ground + term, P, must be an exact match, ie, not using unification. + """ + if self._is_tautology is not None: + return self._is_tautology + for i, a in enumerate(self): + if not isinstance(a, EqualityExpression): + j = len(self) - 1 + while j > i: + b = self[j] + if isinstance(a, NegatedExpression): + if a.term == b: + self._is_tautology = True + return True + elif isinstance(b, NegatedExpression): + if a == b.term: + self._is_tautology = True + return True + j -= 1 + self._is_tautology = False + return False + + def free(self): + return reduce(operator.or_, ((atom.free() | atom.constants()) for atom in self)) + + def replace(self, variable, expression): + """ + Replace every instance of variable with expression across every atom + in the clause + + :param variable: ``Variable`` + :param expression: ``Expression`` + """ + return Clause([atom.replace(variable, expression) for atom in self]) + + def substitute_bindings(self, bindings): + """ + Replace every binding + + :param bindings: A list of tuples mapping Variable Expressions to the + Expressions to which they are bound. + :return: ``Clause`` + """ + return Clause([atom.substitute_bindings(bindings) for atom in self]) + + def __str__(self): + return "{" + ", ".join("%s" % item for item in self) + "}" + + def __repr__(self): + return "%s" % self + + +def _iterate_first(first, second, bindings, used, skipped, finalize_method, debug): + """ + This method facilitates movement through the terms of 'self' + """ + debug.line(f"unify({first},{second}) {bindings}") + + if not len(first) or not len(second): # if no more recursions can be performed + return finalize_method(first, second, bindings, used, skipped, debug) + else: + # explore this 'self' atom + result = _iterate_second( + first, second, bindings, used, skipped, finalize_method, debug + 1 + ) + + # skip this possible 'self' atom + newskipped = (skipped[0] + [first[0]], skipped[1]) + result += _iterate_first( + first[1:], second, bindings, used, newskipped, finalize_method, debug + 1 + ) + + try: + newbindings, newused, unused = _unify_terms( + first[0], second[0], bindings, used + ) + # Unification found, so progress with this line of unification + # put skipped and unused terms back into play for later unification. + newfirst = first[1:] + skipped[0] + unused[0] + newsecond = second[1:] + skipped[1] + unused[1] + result += _iterate_first( + newfirst, + newsecond, + newbindings, + newused, + ([], []), + finalize_method, + debug + 1, + ) + except BindingException: + # the atoms could not be unified, + pass + + return result + + +def _iterate_second(first, second, bindings, used, skipped, finalize_method, debug): + """ + This method facilitates movement through the terms of 'other' + """ + debug.line(f"unify({first},{second}) {bindings}") + + if not len(first) or not len(second): # if no more recursions can be performed + return finalize_method(first, second, bindings, used, skipped, debug) + else: + # skip this possible pairing and move to the next + newskipped = (skipped[0], skipped[1] + [second[0]]) + result = _iterate_second( + first, second[1:], bindings, used, newskipped, finalize_method, debug + 1 + ) + + try: + newbindings, newused, unused = _unify_terms( + first[0], second[0], bindings, used + ) + # Unification found, so progress with this line of unification + # put skipped and unused terms back into play for later unification. + newfirst = first[1:] + skipped[0] + unused[0] + newsecond = second[1:] + skipped[1] + unused[1] + result += _iterate_second( + newfirst, + newsecond, + newbindings, + newused, + ([], []), + finalize_method, + debug + 1, + ) + except BindingException: + # the atoms could not be unified, + pass + + return result + + +def _unify_terms(a, b, bindings=None, used=None): + """ + This method attempts to unify two terms. Two expressions are unifiable + if there exists a substitution function S such that S(a) == S(-b). + + :param a: ``Expression`` + :param b: ``Expression`` + :param bindings: ``BindingDict`` a starting set of bindings with which + the unification must be consistent + :return: ``BindingDict`` A dictionary of the bindings required to unify + :raise ``BindingException``: If the terms cannot be unified + """ + assert isinstance(a, Expression) + assert isinstance(b, Expression) + + if bindings is None: + bindings = BindingDict() + if used is None: + used = ([], []) + + # Use resolution + if isinstance(a, NegatedExpression) and isinstance(b, ApplicationExpression): + newbindings = most_general_unification(a.term, b, bindings) + newused = (used[0] + [a], used[1] + [b]) + unused = ([], []) + elif isinstance(a, ApplicationExpression) and isinstance(b, NegatedExpression): + newbindings = most_general_unification(a, b.term, bindings) + newused = (used[0] + [a], used[1] + [b]) + unused = ([], []) + + # Use demodulation + elif isinstance(a, EqualityExpression): + newbindings = BindingDict([(a.first.variable, a.second)]) + newused = (used[0] + [a], used[1]) + unused = ([], [b]) + elif isinstance(b, EqualityExpression): + newbindings = BindingDict([(b.first.variable, b.second)]) + newused = (used[0], used[1] + [b]) + unused = ([a], []) + + else: + raise BindingException((a, b)) + + return newbindings, newused, unused + + +def _complete_unify_path(first, second, bindings, used, skipped, debug): + if used[0] or used[1]: # if bindings were made along the path + newclause = Clause(skipped[0] + skipped[1] + first + second) + debug.line(" -> New Clause: %s" % newclause) + return [newclause.substitute_bindings(bindings)] + else: # no bindings made means no unification occurred. so no result + debug.line(" -> End") + return [] + + +def _subsumes_finalize(first, second, bindings, used, skipped, debug): + if not len(skipped[0]) and not len(first): + # If there are no skipped terms and no terms left in 'first', then + # all of the terms in the original 'self' were unified with terms + # in 'other'. Therefore, there exists a binding (this one) such that + # every term in self can be unified with a term in other, which + # is the definition of subsumption. + return [True] + else: + return [] + + +def clausify(expression): + """ + Skolemize, clausify, and standardize the variables apart. + """ + clause_list = [] + for clause in _clausify(skolemize(expression)): + for free in clause.free(): + if is_indvar(free.name): + newvar = VariableExpression(unique_variable()) + clause = clause.replace(free, newvar) + clause_list.append(clause) + return clause_list + + +def _clausify(expression): + """ + :param expression: a skolemized expression in CNF + """ + if isinstance(expression, AndExpression): + return _clausify(expression.first) + _clausify(expression.second) + elif isinstance(expression, OrExpression): + first = _clausify(expression.first) + second = _clausify(expression.second) + assert len(first) == 1 + assert len(second) == 1 + return [first[0] + second[0]] + elif isinstance(expression, EqualityExpression): + return [Clause([expression])] + elif isinstance(expression, ApplicationExpression): + return [Clause([expression])] + elif isinstance(expression, NegatedExpression): + if isinstance(expression.term, ApplicationExpression): + return [Clause([expression])] + elif isinstance(expression.term, EqualityExpression): + return [Clause([expression])] + raise ProverParseError() + + +class BindingDict: + def __init__(self, binding_list=None): + """ + :param binding_list: list of (``AbstractVariableExpression``, ``AtomicExpression``) to initialize the dictionary + """ + self.d = {} + + if binding_list: + for (v, b) in binding_list: + self[v] = b + + def __setitem__(self, variable, binding): + """ + A binding is consistent with the dict if its variable is not already bound, OR if its + variable is already bound to its argument. + + :param variable: ``Variable`` The variable to bind + :param binding: ``Expression`` The atomic to which 'variable' should be bound + :raise BindingException: If the variable cannot be bound in this dictionary + """ + assert isinstance(variable, Variable) + assert isinstance(binding, Expression) + + try: + existing = self[variable] + except KeyError: + existing = None + + if not existing or binding == existing: + self.d[variable] = binding + elif isinstance(binding, IndividualVariableExpression): + # Since variable is already bound, try to bind binding to variable + try: + existing = self[binding.variable] + except KeyError: + existing = None + + binding2 = VariableExpression(variable) + + if not existing or binding2 == existing: + self.d[binding.variable] = binding2 + else: + raise BindingException( + "Variable %s already bound to another " "value" % (variable) + ) + else: + raise BindingException( + "Variable %s already bound to another " "value" % (variable) + ) + + def __getitem__(self, variable): + """ + Return the expression to which 'variable' is bound + """ + assert isinstance(variable, Variable) + + intermediate = self.d[variable] + while intermediate: + try: + intermediate = self.d[intermediate] + except KeyError: + return intermediate + + def __contains__(self, item): + return item in self.d + + def __add__(self, other): + """ + :param other: ``BindingDict`` The dict with which to combine self + :return: ``BindingDict`` A new dict containing all the elements of both parameters + :raise BindingException: If the parameter dictionaries are not consistent with each other + """ + try: + combined = BindingDict() + for v in self.d: + combined[v] = self.d[v] + for v in other.d: + combined[v] = other.d[v] + return combined + except BindingException as e: + raise BindingException( + "Attempting to add two contradicting " + "BindingDicts: '%s' and '%s'" % (self, other) + ) from e + + def __len__(self): + return len(self.d) + + def __str__(self): + data_str = ", ".join(f"{v}: {self.d[v]}" for v in sorted(self.d.keys())) + return "{" + data_str + "}" + + def __repr__(self): + return "%s" % self + + +def most_general_unification(a, b, bindings=None): + """ + Find the most general unification of the two given expressions + + :param a: ``Expression`` + :param b: ``Expression`` + :param bindings: ``BindingDict`` a starting set of bindings with which the + unification must be consistent + :return: a list of bindings + :raise BindingException: if the Expressions cannot be unified + """ + if bindings is None: + bindings = BindingDict() + + if a == b: + return bindings + elif isinstance(a, IndividualVariableExpression): + return _mgu_var(a, b, bindings) + elif isinstance(b, IndividualVariableExpression): + return _mgu_var(b, a, bindings) + elif isinstance(a, ApplicationExpression) and isinstance(b, ApplicationExpression): + return most_general_unification( + a.function, b.function, bindings + ) + most_general_unification(a.argument, b.argument, bindings) + raise BindingException((a, b)) + + +def _mgu_var(var, expression, bindings): + if var.variable in expression.free() | expression.constants(): + raise BindingException((var, expression)) + else: + return BindingDict([(var.variable, expression)]) + bindings + + +class BindingException(Exception): + def __init__(self, arg): + if isinstance(arg, tuple): + Exception.__init__(self, "'%s' cannot be bound to '%s'" % arg) + else: + Exception.__init__(self, arg) + + +class UnificationException(Exception): + def __init__(self, a, b): + Exception.__init__(self, f"'{a}' cannot unify with '{b}'") + + +class DebugObject: + def __init__(self, enabled=True, indent=0): + self.enabled = enabled + self.indent = indent + + def __add__(self, i): + return DebugObject(self.enabled, self.indent + i) + + def line(self, line): + if self.enabled: + print(" " * self.indent + line) + + +def testResolutionProver(): + resolution_test(r"man(x)") + resolution_test(r"(man(x) -> man(x))") + resolution_test(r"(man(x) -> --man(x))") + resolution_test(r"-(man(x) and -man(x))") + resolution_test(r"(man(x) or -man(x))") + resolution_test(r"(man(x) -> man(x))") + resolution_test(r"-(man(x) and -man(x))") + resolution_test(r"(man(x) or -man(x))") + resolution_test(r"(man(x) -> man(x))") + resolution_test(r"(man(x) iff man(x))") + resolution_test(r"-(man(x) iff -man(x))") + resolution_test("all x.man(x)") + resolution_test("-all x.some y.F(x,y) & some x.all y.(-F(x,y))") + resolution_test("some x.all y.sees(x,y)") + + p1 = Expression.fromstring(r"all x.(man(x) -> mortal(x))") + p2 = Expression.fromstring(r"man(Socrates)") + c = Expression.fromstring(r"mortal(Socrates)") + print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}") + + p1 = Expression.fromstring(r"all x.(man(x) -> walks(x))") + p2 = Expression.fromstring(r"man(John)") + c = Expression.fromstring(r"some y.walks(y)") + print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}") + + p = Expression.fromstring(r"some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))") + c = Expression.fromstring(r"some e0.walk(e0,mary)") + print(f"{p} |- {c}: {ResolutionProver().prove(c, [p])}") + + +def resolution_test(e): + f = Expression.fromstring(e) + t = ResolutionProver().prove(f) + print(f"|- {f}: {t}") + + +def test_clausify(): + lexpr = Expression.fromstring + + print(clausify(lexpr("P(x) | Q(x)"))) + print(clausify(lexpr("(P(x) & Q(x)) | R(x)"))) + print(clausify(lexpr("P(x) | (Q(x) & R(x))"))) + print(clausify(lexpr("(P(x) & Q(x)) | (R(x) & S(x))"))) + + print(clausify(lexpr("P(x) | Q(x) | R(x)"))) + print(clausify(lexpr("P(x) | (Q(x) & R(x)) | S(x)"))) + + print(clausify(lexpr("exists x.P(x) | Q(x)"))) + + print(clausify(lexpr("-(-P(x) & Q(x))"))) + print(clausify(lexpr("P(x) <-> Q(x)"))) + print(clausify(lexpr("-(P(x) <-> Q(x))"))) + print(clausify(lexpr("-(all x.P(x))"))) + print(clausify(lexpr("-(some x.P(x))"))) + + print(clausify(lexpr("some x.P(x)"))) + print(clausify(lexpr("some x.all y.P(x,y)"))) + print(clausify(lexpr("all y.some x.P(x,y)"))) + print(clausify(lexpr("all z.all y.some x.P(x,y,z)"))) + print(clausify(lexpr("all x.(all y.P(x,y) -> -all y.(Q(x,y) -> R(x,y)))"))) + + +def demo(): + test_clausify() + print() + testResolutionProver() + print() + + p = Expression.fromstring("man(x)") + print(ResolutionProverCommand(p, [p]).prove()) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/tableau.py b/llmeval-env/lib/python3.10/site-packages/nltk/inference/tableau.py new file mode 100644 index 0000000000000000000000000000000000000000..620f21b465225f3d8dc91a05414bfd9bbbe3e5c2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/inference/tableau.py @@ -0,0 +1,712 @@ +# Natural Language Toolkit: First-Order Tableau Theorem Prover +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Dan Garrette +# +# URL: +# For license information, see LICENSE.TXT + +""" +Module for a tableau-based First Order theorem prover. +""" + +from nltk.inference.api import BaseProverCommand, Prover +from nltk.internals import Counter +from nltk.sem.logic import ( + AbstractVariableExpression, + AllExpression, + AndExpression, + ApplicationExpression, + EqualityExpression, + ExistsExpression, + Expression, + FunctionVariableExpression, + IffExpression, + ImpExpression, + LambdaExpression, + NegatedExpression, + OrExpression, + Variable, + VariableExpression, + unique_variable, +) + +_counter = Counter() + + +class ProverParseError(Exception): + pass + + +class TableauProver(Prover): + _assume_false = False + + def _prove(self, goal=None, assumptions=None, verbose=False): + if not assumptions: + assumptions = [] + + result = None + try: + agenda = Agenda() + if goal: + agenda.put(-goal) + agenda.put_all(assumptions) + debugger = Debug(verbose) + result = self._attempt_proof(agenda, set(), set(), debugger) + except RuntimeError as e: + if self._assume_false and str(e).startswith( + "maximum recursion depth exceeded" + ): + result = False + else: + if verbose: + print(e) + else: + raise e + return (result, "\n".join(debugger.lines)) + + def _attempt_proof(self, agenda, accessible_vars, atoms, debug): + (current, context), category = agenda.pop_first() + + # if there's nothing left in the agenda, and we haven't closed the path + if not current: + debug.line("AGENDA EMPTY") + return False + + proof_method = { + Categories.ATOM: self._attempt_proof_atom, + Categories.PROP: self._attempt_proof_prop, + Categories.N_ATOM: self._attempt_proof_n_atom, + Categories.N_PROP: self._attempt_proof_n_prop, + Categories.APP: self._attempt_proof_app, + Categories.N_APP: self._attempt_proof_n_app, + Categories.N_EQ: self._attempt_proof_n_eq, + Categories.D_NEG: self._attempt_proof_d_neg, + Categories.N_ALL: self._attempt_proof_n_all, + Categories.N_EXISTS: self._attempt_proof_n_some, + Categories.AND: self._attempt_proof_and, + Categories.N_OR: self._attempt_proof_n_or, + Categories.N_IMP: self._attempt_proof_n_imp, + Categories.OR: self._attempt_proof_or, + Categories.IMP: self._attempt_proof_imp, + Categories.N_AND: self._attempt_proof_n_and, + Categories.IFF: self._attempt_proof_iff, + Categories.N_IFF: self._attempt_proof_n_iff, + Categories.EQ: self._attempt_proof_eq, + Categories.EXISTS: self._attempt_proof_some, + Categories.ALL: self._attempt_proof_all, + }[category] + + debug.line((current, context)) + return proof_method(current, context, agenda, accessible_vars, atoms, debug) + + def _attempt_proof_atom( + self, current, context, agenda, accessible_vars, atoms, debug + ): + # Check if the branch is closed. Return 'True' if it is + if (current, True) in atoms: + debug.line("CLOSED", 1) + return True + + if context: + if isinstance(context.term, NegatedExpression): + current = current.negate() + agenda.put(context(current).simplify()) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + else: + # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, + accessible_vars | set(current.args), + atoms | {(current, False)}, + debug + 1, + ) + + def _attempt_proof_n_atom( + self, current, context, agenda, accessible_vars, atoms, debug + ): + # Check if the branch is closed. Return 'True' if it is + if (current.term, False) in atoms: + debug.line("CLOSED", 1) + return True + + if context: + if isinstance(context.term, NegatedExpression): + current = current.negate() + agenda.put(context(current).simplify()) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + else: + # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, + accessible_vars | set(current.term.args), + atoms | {(current.term, True)}, + debug + 1, + ) + + def _attempt_proof_prop( + self, current, context, agenda, accessible_vars, atoms, debug + ): + # Check if the branch is closed. Return 'True' if it is + if (current, True) in atoms: + debug.line("CLOSED", 1) + return True + + # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, accessible_vars, atoms | {(current, False)}, debug + 1 + ) + + def _attempt_proof_n_prop( + self, current, context, agenda, accessible_vars, atoms, debug + ): + # Check if the branch is closed. Return 'True' if it is + if (current.term, False) in atoms: + debug.line("CLOSED", 1) + return True + + # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, accessible_vars, atoms | {(current.term, True)}, debug + 1 + ) + + def _attempt_proof_app( + self, current, context, agenda, accessible_vars, atoms, debug + ): + f, args = current.uncurry() + for i, arg in enumerate(args): + if not TableauProver.is_atom(arg): + ctx = f + nv = Variable("X%s" % _counter.get()) + for j, a in enumerate(args): + ctx = ctx(VariableExpression(nv)) if i == j else ctx(a) + if context: + ctx = context(ctx).simplify() + ctx = LambdaExpression(nv, ctx) + agenda.put(arg, ctx) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + raise Exception("If this method is called, there must be a non-atomic argument") + + def _attempt_proof_n_app( + self, current, context, agenda, accessible_vars, atoms, debug + ): + f, args = current.term.uncurry() + for i, arg in enumerate(args): + if not TableauProver.is_atom(arg): + ctx = f + nv = Variable("X%s" % _counter.get()) + for j, a in enumerate(args): + ctx = ctx(VariableExpression(nv)) if i == j else ctx(a) + if context: + # combine new context with existing + ctx = context(ctx).simplify() + ctx = LambdaExpression(nv, -ctx) + agenda.put(-arg, ctx) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + raise Exception("If this method is called, there must be a non-atomic argument") + + def _attempt_proof_n_eq( + self, current, context, agenda, accessible_vars, atoms, debug + ): + ########################################################################### + # Since 'current' is of type '~(a=b)', the path is closed if 'a' == 'b' + ########################################################################### + if current.term.first == current.term.second: + debug.line("CLOSED", 1) + return True + + agenda[Categories.N_EQ].add((current, context)) + current._exhausted = True + return self._attempt_proof( + agenda, + accessible_vars | {current.term.first, current.term.second}, + atoms, + debug + 1, + ) + + def _attempt_proof_d_neg( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda.put(current.term.term, context) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_all( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda[Categories.EXISTS].add( + (ExistsExpression(current.term.variable, -current.term.term), context) + ) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_some( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda[Categories.ALL].add( + (AllExpression(current.term.variable, -current.term.term), context) + ) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_and( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda.put(current.first, context) + agenda.put(current.second, context) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_or( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda.put(-current.term.first, context) + agenda.put(-current.term.second, context) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_imp( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda.put(current.term.first, context) + agenda.put(-current.term.second, context) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_or( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(current.first, context) + new_agenda.put(current.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_imp( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(-current.first, context) + new_agenda.put(current.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_and( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(-current.term.first, context) + new_agenda.put(-current.term.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_iff( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(current.first, context) + agenda.put(current.second, context) + new_agenda.put(-current.first, context) + new_agenda.put(-current.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_iff( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(current.term.first, context) + agenda.put(-current.term.second, context) + new_agenda.put(-current.term.first, context) + new_agenda.put(current.term.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_eq( + self, current, context, agenda, accessible_vars, atoms, debug + ): + ######################################################################### + # Since 'current' is of the form '(a = b)', replace ALL free instances + # of 'a' with 'b' + ######################################################################### + agenda.put_atoms(atoms) + agenda.replace_all(current.first, current.second) + accessible_vars.discard(current.first) + agenda.mark_neqs_fresh() + return self._attempt_proof(agenda, accessible_vars, set(), debug + 1) + + def _attempt_proof_some( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_unique_variable = VariableExpression(unique_variable()) + agenda.put(current.term.replace(current.variable, new_unique_variable), context) + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, accessible_vars | {new_unique_variable}, atoms, debug + 1 + ) + + def _attempt_proof_all( + self, current, context, agenda, accessible_vars, atoms, debug + ): + try: + current._used_vars + except AttributeError: + current._used_vars = set() + + # if there are accessible_vars on the path + if accessible_vars: + # get the set of bound variables that have not be used by this AllExpression + bv_available = accessible_vars - current._used_vars + + if bv_available: + variable_to_use = list(bv_available)[0] + debug.line("--> Using '%s'" % variable_to_use, 2) + current._used_vars |= {variable_to_use} + agenda.put( + current.term.replace(current.variable, variable_to_use), context + ) + agenda[Categories.ALL].add((current, context)) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + else: + # no more available variables to substitute + debug.line("--> Variables Exhausted", 2) + current._exhausted = True + agenda[Categories.ALL].add((current, context)) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + else: + new_unique_variable = VariableExpression(unique_variable()) + debug.line("--> Using '%s'" % new_unique_variable, 2) + current._used_vars |= {new_unique_variable} + agenda.put( + current.term.replace(current.variable, new_unique_variable), context + ) + agenda[Categories.ALL].add((current, context)) + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, accessible_vars | {new_unique_variable}, atoms, debug + 1 + ) + + @staticmethod + def is_atom(e): + if isinstance(e, NegatedExpression): + e = e.term + + if isinstance(e, ApplicationExpression): + for arg in e.args: + if not TableauProver.is_atom(arg): + return False + return True + elif isinstance(e, AbstractVariableExpression) or isinstance( + e, LambdaExpression + ): + return True + else: + return False + + +class TableauProverCommand(BaseProverCommand): + def __init__(self, goal=None, assumptions=None, prover=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + """ + if prover is not None: + assert isinstance(prover, TableauProver) + else: + prover = TableauProver() + + BaseProverCommand.__init__(self, prover, goal, assumptions) + + +class Agenda: + def __init__(self): + self.sets = tuple(set() for i in range(21)) + + def clone(self): + new_agenda = Agenda() + set_list = [s.copy() for s in self.sets] + + new_allExs = set() + for allEx, _ in set_list[Categories.ALL]: + new_allEx = AllExpression(allEx.variable, allEx.term) + try: + new_allEx._used_vars = {used for used in allEx._used_vars} + except AttributeError: + new_allEx._used_vars = set() + new_allExs.add((new_allEx, None)) + set_list[Categories.ALL] = new_allExs + + set_list[Categories.N_EQ] = { + (NegatedExpression(n_eq.term), ctx) + for (n_eq, ctx) in set_list[Categories.N_EQ] + } + + new_agenda.sets = tuple(set_list) + return new_agenda + + def __getitem__(self, index): + return self.sets[index] + + def put(self, expression, context=None): + if isinstance(expression, AllExpression): + ex_to_add = AllExpression(expression.variable, expression.term) + try: + ex_to_add._used_vars = {used for used in expression._used_vars} + except AttributeError: + ex_to_add._used_vars = set() + else: + ex_to_add = expression + self.sets[self._categorize_expression(ex_to_add)].add((ex_to_add, context)) + + def put_all(self, expressions): + for expression in expressions: + self.put(expression) + + def put_atoms(self, atoms): + for atom, neg in atoms: + if neg: + self[Categories.N_ATOM].add((-atom, None)) + else: + self[Categories.ATOM].add((atom, None)) + + def pop_first(self): + """Pop the first expression that appears in the agenda""" + for i, s in enumerate(self.sets): + if s: + if i in [Categories.N_EQ, Categories.ALL]: + for ex in s: + try: + if not ex[0]._exhausted: + s.remove(ex) + return (ex, i) + except AttributeError: + s.remove(ex) + return (ex, i) + else: + return (s.pop(), i) + return ((None, None), None) + + def replace_all(self, old, new): + for s in self.sets: + for ex, ctx in s: + ex.replace(old.variable, new) + if ctx is not None: + ctx.replace(old.variable, new) + + def mark_alls_fresh(self): + for u, _ in self.sets[Categories.ALL]: + u._exhausted = False + + def mark_neqs_fresh(self): + for neq, _ in self.sets[Categories.N_EQ]: + neq._exhausted = False + + def _categorize_expression(self, current): + if isinstance(current, NegatedExpression): + return self._categorize_NegatedExpression(current) + elif isinstance(current, FunctionVariableExpression): + return Categories.PROP + elif TableauProver.is_atom(current): + return Categories.ATOM + elif isinstance(current, AllExpression): + return Categories.ALL + elif isinstance(current, AndExpression): + return Categories.AND + elif isinstance(current, OrExpression): + return Categories.OR + elif isinstance(current, ImpExpression): + return Categories.IMP + elif isinstance(current, IffExpression): + return Categories.IFF + elif isinstance(current, EqualityExpression): + return Categories.EQ + elif isinstance(current, ExistsExpression): + return Categories.EXISTS + elif isinstance(current, ApplicationExpression): + return Categories.APP + else: + raise ProverParseError("cannot categorize %s" % current.__class__.__name__) + + def _categorize_NegatedExpression(self, current): + negated = current.term + + if isinstance(negated, NegatedExpression): + return Categories.D_NEG + elif isinstance(negated, FunctionVariableExpression): + return Categories.N_PROP + elif TableauProver.is_atom(negated): + return Categories.N_ATOM + elif isinstance(negated, AllExpression): + return Categories.N_ALL + elif isinstance(negated, AndExpression): + return Categories.N_AND + elif isinstance(negated, OrExpression): + return Categories.N_OR + elif isinstance(negated, ImpExpression): + return Categories.N_IMP + elif isinstance(negated, IffExpression): + return Categories.N_IFF + elif isinstance(negated, EqualityExpression): + return Categories.N_EQ + elif isinstance(negated, ExistsExpression): + return Categories.N_EXISTS + elif isinstance(negated, ApplicationExpression): + return Categories.N_APP + else: + raise ProverParseError("cannot categorize %s" % negated.__class__.__name__) + + +class Debug: + def __init__(self, verbose, indent=0, lines=None): + self.verbose = verbose + self.indent = indent + + if not lines: + lines = [] + self.lines = lines + + def __add__(self, increment): + return Debug(self.verbose, self.indent + 1, self.lines) + + def line(self, data, indent=0): + if isinstance(data, tuple): + ex, ctx = data + if ctx: + data = f"{ex}, {ctx}" + else: + data = "%s" % ex + + if isinstance(ex, AllExpression): + try: + used_vars = "[%s]" % ( + ",".join("%s" % ve.variable.name for ve in ex._used_vars) + ) + data += ": %s" % used_vars + except AttributeError: + data += ": []" + + newline = "{}{}".format(" " * (self.indent + indent), data) + self.lines.append(newline) + + if self.verbose: + print(newline) + + +class Categories: + ATOM = 0 + PROP = 1 + N_ATOM = 2 + N_PROP = 3 + APP = 4 + N_APP = 5 + N_EQ = 6 + D_NEG = 7 + N_ALL = 8 + N_EXISTS = 9 + AND = 10 + N_OR = 11 + N_IMP = 12 + OR = 13 + IMP = 14 + N_AND = 15 + IFF = 16 + N_IFF = 17 + EQ = 18 + EXISTS = 19 + ALL = 20 + + +def testTableauProver(): + tableau_test("P | -P") + tableau_test("P & -P") + tableau_test("Q", ["P", "(P -> Q)"]) + tableau_test("man(x)") + tableau_test("(man(x) -> man(x))") + tableau_test("(man(x) -> --man(x))") + tableau_test("-(man(x) and -man(x))") + tableau_test("(man(x) or -man(x))") + tableau_test("(man(x) -> man(x))") + tableau_test("-(man(x) and -man(x))") + tableau_test("(man(x) or -man(x))") + tableau_test("(man(x) -> man(x))") + tableau_test("(man(x) iff man(x))") + tableau_test("-(man(x) iff -man(x))") + tableau_test("all x.man(x)") + tableau_test("all x.all y.((x = y) -> (y = x))") + tableau_test("all x.all y.all z.(((x = y) & (y = z)) -> (x = z))") + # tableau_test('-all x.some y.F(x,y) & some x.all y.(-F(x,y))') + # tableau_test('some x.all y.sees(x,y)') + + p1 = "all x.(man(x) -> mortal(x))" + p2 = "man(Socrates)" + c = "mortal(Socrates)" + tableau_test(c, [p1, p2]) + + p1 = "all x.(man(x) -> walks(x))" + p2 = "man(John)" + c = "some y.walks(y)" + tableau_test(c, [p1, p2]) + + p = "((x = y) & walks(y))" + c = "walks(x)" + tableau_test(c, [p]) + + p = "((x = y) & ((y = z) & (z = w)))" + c = "(x = w)" + tableau_test(c, [p]) + + p = "some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))" + c = "some e0.walk(e0,mary)" + tableau_test(c, [p]) + + c = "(exists x.exists z3.((x = Mary) & ((z3 = John) & sees(z3,x))) <-> exists x.exists z4.((x = John) & ((z4 = Mary) & sees(x,z4))))" + tableau_test(c) + + +# p = 'some e1.some e2.((believe e1 john e2) and (walk e2 mary))' +# c = 'some x.some e3.some e4.((believe e3 x e4) and (walk e4 mary))' +# tableau_test(c, [p]) + + +def testHigherOrderTableauProver(): + tableau_test("believe(j, -lie(b))", ["believe(j, -lie(b) & -cheat(b))"]) + tableau_test("believe(j, lie(b) & cheat(b))", ["believe(j, lie(b))"]) + tableau_test( + "believe(j, lie(b))", ["lie(b)"] + ) # how do we capture that John believes all things that are true + tableau_test( + "believe(j, know(b, cheat(b)))", + ["believe(j, know(b, lie(b)) & know(b, steals(b) & cheat(b)))"], + ) + tableau_test("P(Q(y), R(y) & R(z))", ["P(Q(x) & Q(y), R(y) & R(z))"]) + + tableau_test("believe(j, cheat(b) & lie(b))", ["believe(j, lie(b) & cheat(b))"]) + tableau_test("believe(j, -cheat(b) & -lie(b))", ["believe(j, -lie(b) & -cheat(b))"]) + + +def tableau_test(c, ps=None, verbose=False): + pc = Expression.fromstring(c) + pps = [Expression.fromstring(p) for p in ps] if ps else [] + if not ps: + ps = [] + print( + "%s |- %s: %s" + % (", ".join(ps), pc, TableauProver().prove(pc, pps, verbose=verbose)) + ) + + +def demo(): + testTableauProver() + testHigherOrderTableauProver() + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ada17ef29e19763f8bc42d103436e7fa72d3cfd0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__init__.py @@ -0,0 +1,51 @@ +# Natural Language Toolkit: Metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +# + +""" +NLTK Metrics + +Classes and methods for scoring processing modules. +""" + +from nltk.metrics.agreement import AnnotationTask +from nltk.metrics.aline import align +from nltk.metrics.association import ( + BigramAssocMeasures, + ContingencyMeasures, + NgramAssocMeasures, + QuadgramAssocMeasures, + TrigramAssocMeasures, +) +from nltk.metrics.confusionmatrix import ConfusionMatrix +from nltk.metrics.distance import ( + binary_distance, + custom_distance, + edit_distance, + edit_distance_align, + fractional_presence, + interval_distance, + jaccard_distance, + masi_distance, + presence, +) +from nltk.metrics.paice import Paice +from nltk.metrics.scores import ( + accuracy, + approxrand, + f_measure, + log_likelihood, + precision, + recall, +) +from nltk.metrics.segmentation import ghd, pk, windowdiff +from nltk.metrics.spearman import ( + ranks_from_scores, + ranks_from_sequence, + spearman_correlation, +) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..821b883f17953c4a6900e63746cb5aa34c6f3b8d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcf18b7d93e8442dbb4abd3f304074b49561d30a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/distance.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/distance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1510b2d5ced41499a8d3a30cd05e5504305aebcf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/distance.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/agreement.py b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/agreement.py new file mode 100644 index 0000000000000000000000000000000000000000..69b1a39fe2017df3beef39fcdd57b9a73c6ac0f5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/agreement.py @@ -0,0 +1,465 @@ +# Natural Language Toolkit: Agreement Metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tom Lippincott +# URL: +# For license information, see LICENSE.TXT +# + +""" +Implementations of inter-annotator agreement coefficients surveyed by Artstein +and Poesio (2007), Inter-Coder Agreement for Computational Linguistics. + +An agreement coefficient calculates the amount that annotators agreed on label +assignments beyond what is expected by chance. + +In defining the AnnotationTask class, we use naming conventions similar to the +paper's terminology. There are three types of objects in an annotation task: + + the coders (variables "c" and "C") + the items to be annotated (variables "i" and "I") + the potential categories to be assigned (variables "k" and "K") + +Additionally, it is often the case that we don't want to treat two different +labels as complete disagreement, and so the AnnotationTask constructor can also +take a distance metric as a final argument. Distance metrics are simply +functions that take two arguments, and return a value between 0.0 and 1.0 +indicating the distance between them. If not supplied, the default is binary +comparison between the arguments. + +The simplest way to initialize an AnnotationTask is with a list of triples, +each containing a coder's assignment for one object in the task: + + task = AnnotationTask(data=[('c1', '1', 'v1'),('c2', '1', 'v1'),...]) + +Note that the data list needs to contain the same number of triples for each +individual coder, containing category values for the same set of items. + +Alpha (Krippendorff 1980) +Kappa (Cohen 1960) +S (Bennet, Albert and Goldstein 1954) +Pi (Scott 1955) + + +TODO: Describe handling of multiple coders and missing data + +Expected results from the Artstein and Poesio survey paper: + + >>> from nltk.metrics.agreement import AnnotationTask + >>> import os.path + >>> t = AnnotationTask(data=[x.split() for x in open(os.path.join(os.path.dirname(__file__), "artstein_poesio_example.txt"))]) + >>> t.avg_Ao() + 0.88 + >>> round(t.pi(), 5) + 0.79953 + >>> round(t.S(), 2) + 0.82 + + This would have returned a wrong value (0.0) in @785fb79 as coders are in + the wrong order. Subsequently, all values for pi(), S(), and kappa() would + have been wrong as they are computed with avg_Ao(). + >>> t2 = AnnotationTask(data=[('b','1','stat'),('a','1','stat')]) + >>> t2.avg_Ao() + 1.0 + + The following, of course, also works. + >>> t3 = AnnotationTask(data=[('a','1','othr'),('b','1','othr')]) + >>> t3.avg_Ao() + 1.0 + +""" + +import logging +from itertools import groupby +from operator import itemgetter + +from nltk.internals import deprecated +from nltk.metrics.distance import binary_distance +from nltk.probability import ConditionalFreqDist, FreqDist + +log = logging.getLogger(__name__) + + +class AnnotationTask: + """Represents an annotation task, i.e. people assign labels to items. + + Notation tries to match notation in Artstein and Poesio (2007). + + In general, coders and items can be represented as any hashable object. + Integers, for example, are fine, though strings are more readable. + Labels must support the distance functions applied to them, so e.g. + a string-edit-distance makes no sense if your labels are integers, + whereas interval distance needs numeric values. A notable case of this + is the MASI metric, which requires Python sets. + """ + + def __init__(self, data=None, distance=binary_distance): + """Initialize an annotation task. + + The data argument can be None (to create an empty annotation task) or a sequence of 3-tuples, + each representing a coder's labeling of an item: + ``(coder,item,label)`` + + The distance argument is a function taking two arguments (labels) and producing a numerical distance. + The distance from a label to itself should be zero: + ``distance(l,l) = 0`` + """ + self.distance = distance + self.I = set() + self.K = set() + self.C = set() + self.data = [] + if data is not None: + self.load_array(data) + + def __str__(self): + return "\r\n".join( + map( + lambda x: "%s\t%s\t%s" + % (x["coder"], x["item"].replace("_", "\t"), ",".join(x["labels"])), + self.data, + ) + ) + + def load_array(self, array): + """Load an sequence of annotation results, appending to any data already loaded. + + The argument is a sequence of 3-tuples, each representing a coder's labeling of an item: + (coder,item,label) + """ + for coder, item, labels in array: + self.C.add(coder) + self.K.add(labels) + self.I.add(item) + self.data.append({"coder": coder, "labels": labels, "item": item}) + + def agr(self, cA, cB, i, data=None): + """Agreement between two coders on a given item""" + data = data or self.data + # cfedermann: we don't know what combination of coder/item will come + # first in x; to avoid StopIteration problems due to assuming an order + # cA,cB, we allow either for k1 and then look up the missing as k2. + k1 = next(x for x in data if x["coder"] in (cA, cB) and x["item"] == i) + if k1["coder"] == cA: + k2 = next(x for x in data if x["coder"] == cB and x["item"] == i) + else: + k2 = next(x for x in data if x["coder"] == cA and x["item"] == i) + + ret = 1.0 - float(self.distance(k1["labels"], k2["labels"])) + log.debug("Observed agreement between %s and %s on %s: %f", cA, cB, i, ret) + log.debug( + 'Distance between "%r" and "%r": %f', k1["labels"], k2["labels"], 1.0 - ret + ) + return ret + + def Nk(self, k): + return float(sum(1 for x in self.data if x["labels"] == k)) + + def Nik(self, i, k): + return float(sum(1 for x in self.data if x["item"] == i and x["labels"] == k)) + + def Nck(self, c, k): + return float(sum(1 for x in self.data if x["coder"] == c and x["labels"] == k)) + + @deprecated("Use Nk, Nik or Nck instead") + def N(self, k=None, i=None, c=None): + """Implements the "n-notation" used in Artstein and Poesio (2007)""" + if k is not None and i is None and c is None: + ret = self.Nk(k) + elif k is not None and i is not None and c is None: + ret = self.Nik(i, k) + elif k is not None and c is not None and i is None: + ret = self.Nck(c, k) + else: + raise ValueError( + f"You must pass either i or c, not both! (k={k!r},i={i!r},c={c!r})" + ) + log.debug("Count on N[%s,%s,%s]: %d", k, i, c, ret) + return ret + + def _grouped_data(self, field, data=None): + data = data or self.data + return groupby(sorted(data, key=itemgetter(field)), itemgetter(field)) + + def Ao(self, cA, cB): + """Observed agreement between two coders on all items.""" + data = self._grouped_data( + "item", (x for x in self.data if x["coder"] in (cA, cB)) + ) + ret = sum(self.agr(cA, cB, item, item_data) for item, item_data in data) / len( + self.I + ) + log.debug("Observed agreement between %s and %s: %f", cA, cB, ret) + return ret + + def _pairwise_average(self, function): + """ + Calculates the average of function results for each coder pair + """ + total = 0 + n = 0 + s = self.C.copy() + for cA in self.C: + s.remove(cA) + for cB in s: + total += function(cA, cB) + n += 1 + ret = total / n + return ret + + def avg_Ao(self): + """Average observed agreement across all coders and items.""" + ret = self._pairwise_average(self.Ao) + log.debug("Average observed agreement: %f", ret) + return ret + + def Do_Kw_pairwise(self, cA, cB, max_distance=1.0): + """The observed disagreement for the weighted kappa coefficient.""" + total = 0.0 + data = (x for x in self.data if x["coder"] in (cA, cB)) + for i, itemdata in self._grouped_data("item", data): + # we should have two items; distance doesn't care which comes first + total += self.distance(next(itemdata)["labels"], next(itemdata)["labels"]) + + ret = total / (len(self.I) * max_distance) + log.debug("Observed disagreement between %s and %s: %f", cA, cB, ret) + return ret + + def Do_Kw(self, max_distance=1.0): + """Averaged over all labelers""" + ret = self._pairwise_average( + lambda cA, cB: self.Do_Kw_pairwise(cA, cB, max_distance) + ) + log.debug("Observed disagreement: %f", ret) + return ret + + # Agreement Coefficients + def S(self): + """Bennett, Albert and Goldstein 1954""" + Ae = 1.0 / len(self.K) + ret = (self.avg_Ao() - Ae) / (1.0 - Ae) + return ret + + def pi(self): + """Scott 1955; here, multi-pi. + Equivalent to K from Siegel and Castellan (1988). + + """ + total = 0.0 + label_freqs = FreqDist(x["labels"] for x in self.data) + for k, f in label_freqs.items(): + total += f**2 + Ae = total / ((len(self.I) * len(self.C)) ** 2) + return (self.avg_Ao() - Ae) / (1 - Ae) + + def Ae_kappa(self, cA, cB): + Ae = 0.0 + nitems = float(len(self.I)) + label_freqs = ConditionalFreqDist((x["labels"], x["coder"]) for x in self.data) + for k in label_freqs.conditions(): + Ae += (label_freqs[k][cA] / nitems) * (label_freqs[k][cB] / nitems) + return Ae + + def kappa_pairwise(self, cA, cB): + """ """ + Ae = self.Ae_kappa(cA, cB) + ret = (self.Ao(cA, cB) - Ae) / (1.0 - Ae) + log.debug("Expected agreement between %s and %s: %f", cA, cB, Ae) + return ret + + def kappa(self): + """Cohen 1960 + Averages naively over kappas for each coder pair. + + """ + return self._pairwise_average(self.kappa_pairwise) + + def multi_kappa(self): + """Davies and Fleiss 1982 + Averages over observed and expected agreements for each coder pair. + + """ + Ae = self._pairwise_average(self.Ae_kappa) + return (self.avg_Ao() - Ae) / (1.0 - Ae) + + def Disagreement(self, label_freqs): + total_labels = sum(label_freqs.values()) + pairs = 0.0 + for j, nj in label_freqs.items(): + for l, nl in label_freqs.items(): + pairs += float(nj * nl) * self.distance(l, j) + return 1.0 * pairs / (total_labels * (total_labels - 1)) + + def alpha(self): + """Krippendorff 1980""" + # check for degenerate cases + if len(self.K) == 0: + raise ValueError("Cannot calculate alpha, no data present!") + if len(self.K) == 1: + log.debug("Only one annotation value, alpha returning 1.") + return 1 + if len(self.C) == 1 and len(self.I) == 1: + raise ValueError("Cannot calculate alpha, only one coder and item present!") + + total_disagreement = 0.0 + total_ratings = 0 + all_valid_labels_freq = FreqDist([]) + + total_do = 0.0 # Total observed disagreement for all items. + for i, itemdata in self._grouped_data("item"): + label_freqs = FreqDist(x["labels"] for x in itemdata) + labels_count = sum(label_freqs.values()) + if labels_count < 2: + # Ignore the item. + continue + all_valid_labels_freq += label_freqs + total_do += self.Disagreement(label_freqs) * labels_count + + do = total_do / sum(all_valid_labels_freq.values()) + + de = self.Disagreement(all_valid_labels_freq) # Expected disagreement. + k_alpha = 1.0 - do / de + + return k_alpha + + def weighted_kappa_pairwise(self, cA, cB, max_distance=1.0): + """Cohen 1968""" + total = 0.0 + label_freqs = ConditionalFreqDist( + (x["coder"], x["labels"]) for x in self.data if x["coder"] in (cA, cB) + ) + for j in self.K: + for l in self.K: + total += label_freqs[cA][j] * label_freqs[cB][l] * self.distance(j, l) + De = total / (max_distance * pow(len(self.I), 2)) + log.debug("Expected disagreement between %s and %s: %f", cA, cB, De) + Do = self.Do_Kw_pairwise(cA, cB) + ret = 1.0 - (Do / De) + return ret + + def weighted_kappa(self, max_distance=1.0): + """Cohen 1968""" + return self._pairwise_average( + lambda cA, cB: self.weighted_kappa_pairwise(cA, cB, max_distance) + ) + + +if __name__ == "__main__": + + import optparse + import re + + from nltk.metrics import distance + + # process command-line arguments + parser = optparse.OptionParser() + parser.add_option( + "-d", + "--distance", + dest="distance", + default="binary_distance", + help="distance metric to use", + ) + parser.add_option( + "-a", + "--agreement", + dest="agreement", + default="kappa", + help="agreement coefficient to calculate", + ) + parser.add_option( + "-e", + "--exclude", + dest="exclude", + action="append", + default=[], + help="coder names to exclude (may be specified multiple times)", + ) + parser.add_option( + "-i", + "--include", + dest="include", + action="append", + default=[], + help="coder names to include, same format as exclude", + ) + parser.add_option( + "-f", + "--file", + dest="file", + help="file to read labelings from, each line with three columns: 'labeler item labels'", + ) + parser.add_option( + "-v", + "--verbose", + dest="verbose", + default="0", + help="how much debugging to print on stderr (0-4)", + ) + parser.add_option( + "-c", + "--columnsep", + dest="columnsep", + default="\t", + help="char/string that separates the three columns in the file, defaults to tab", + ) + parser.add_option( + "-l", + "--labelsep", + dest="labelsep", + default=",", + help="char/string that separates labels (if labelers can assign more than one), defaults to comma", + ) + parser.add_option( + "-p", + "--presence", + dest="presence", + default=None, + help="convert each labeling into 1 or 0, based on presence of LABEL", + ) + parser.add_option( + "-T", + "--thorough", + dest="thorough", + default=False, + action="store_true", + help="calculate agreement for every subset of the annotators", + ) + (options, remainder) = parser.parse_args() + + if not options.file: + parser.print_help() + exit() + + logging.basicConfig(level=50 - 10 * int(options.verbose)) + + # read in data from the specified file + data = [] + with open(options.file) as infile: + for l in infile: + toks = l.split(options.columnsep) + coder, object_, labels = ( + toks[0], + str(toks[1:-1]), + frozenset(toks[-1].strip().split(options.labelsep)), + ) + if ( + (options.include == options.exclude) + or (len(options.include) > 0 and coder in options.include) + or (len(options.exclude) > 0 and coder not in options.exclude) + ): + data.append((coder, object_, labels)) + + if options.presence: + task = AnnotationTask( + data, getattr(distance, options.distance)(options.presence) + ) + else: + task = AnnotationTask(data, getattr(distance, options.distance)) + + if options.thorough: + pass + else: + print(getattr(task, options.agreement)()) + + logging.shutdown() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/aline.py b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/aline.py new file mode 100644 index 0000000000000000000000000000000000000000..5bf8d9930228b2bba3d07b5c92201a011bb9ca25 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/aline.py @@ -0,0 +1,1354 @@ +# Natural Language Toolkit: ALINE +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Greg Kondrak +# Geoff Bacon (Python port) +# URL: +# For license information, see LICENSE.TXT + +""" +ALINE +https://webdocs.cs.ualberta.ca/~kondrak/ +Copyright 2002 by Grzegorz Kondrak. + +ALINE is an algorithm for aligning phonetic sequences, described in [1]. +This module is a port of Kondrak's (2002) ALINE. It provides functions for +phonetic sequence alignment and similarity analysis. These are useful in +historical linguistics, sociolinguistics and synchronic phonology. + +ALINE has parameters that can be tuned for desired output. These parameters are: +- C_skip, C_sub, C_exp, C_vwl +- Salience weights +- Segmental features + +In this implementation, some parameters have been changed from their default +values as described in [1], in order to replicate published results. All changes +are noted in comments. + +Example usage +------------- + +# Get optimal alignment of two phonetic sequences + +>>> align('θin', 'tenwis') # doctest: +SKIP +[[('θ', 't'), ('i', 'e'), ('n', 'n'), ('-', 'w'), ('-', 'i'), ('-', 's')]] + +[1] G. Kondrak. Algorithms for Language Reconstruction. PhD dissertation, +University of Toronto. +""" + +try: + import numpy as np +except ImportError: + np = None + +# === Constants === + +inf = float("inf") + +# Default values for maximum similarity scores (Kondrak 2002: 54) +C_skip = -10 # Indels +C_sub = 35 # Substitutions +C_exp = 45 # Expansions/compressions +C_vwl = 5 # Vowel/consonant relative weight (decreased from 10) + +consonants = [ + "B", + "N", + "R", + "b", + "c", + "d", + "f", + "g", + "h", + "j", + "k", + "l", + "m", + "n", + "p", + "q", + "r", + "s", + "t", + "v", + "x", + "z", + "ç", + "ð", + "ħ", + "ŋ", + "ɖ", + "ɟ", + "ɢ", + "ɣ", + "ɦ", + "ɬ", + "ɮ", + "ɰ", + "ɱ", + "ɲ", + "ɳ", + "ɴ", + "ɸ", + "ɹ", + "ɻ", + "ɽ", + "ɾ", + "ʀ", + "ʁ", + "ʂ", + "ʃ", + "ʈ", + "ʋ", + "ʐ ", + "ʒ", + "ʔ", + "ʕ", + "ʙ", + "ʝ", + "β", + "θ", + "χ", + "ʐ", + "w", +] + +# Relevant features for comparing consonants and vowels +R_c = [ + "aspirated", + "lateral", + "manner", + "nasal", + "place", + "retroflex", + "syllabic", + "voice", +] +# 'high' taken out of R_v because same as manner +R_v = [ + "back", + "lateral", + "long", + "manner", + "nasal", + "place", + "retroflex", + "round", + "syllabic", + "voice", +] + +# Flattened feature matrix (Kondrak 2002: 56) +similarity_matrix = { + # place + "bilabial": 1.0, + "labiodental": 0.95, + "dental": 0.9, + "alveolar": 0.85, + "retroflex": 0.8, + "palato-alveolar": 0.75, + "palatal": 0.7, + "velar": 0.6, + "uvular": 0.5, + "pharyngeal": 0.3, + "glottal": 0.1, + "labiovelar": 1.0, + "vowel": -1.0, # added 'vowel' + # manner + "stop": 1.0, + "affricate": 0.9, + "fricative": 0.85, # increased fricative from 0.8 + "trill": 0.7, + "tap": 0.65, + "approximant": 0.6, + "high vowel": 0.4, + "mid vowel": 0.2, + "low vowel": 0.0, + "vowel2": 0.5, # added vowel + # high + "high": 1.0, + "mid": 0.5, + "low": 0.0, + # back + "front": 1.0, + "central": 0.5, + "back": 0.0, + # binary features + "plus": 1.0, + "minus": 0.0, +} + +# Relative weights of phonetic features (Kondrak 2002: 55) +salience = { + "syllabic": 5, + "place": 40, + "manner": 50, + "voice": 5, # decreased from 10 + "nasal": 20, # increased from 10 + "retroflex": 10, + "lateral": 10, + "aspirated": 5, + "long": 0, # decreased from 1 + "high": 3, # decreased from 5 + "back": 2, # decreased from 5 + "round": 2, # decreased from 5 +} + +# (Kondrak 2002: 59-60) +feature_matrix = { + # Consonants + "p": { + "place": "bilabial", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "b": { + "place": "bilabial", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "t": { + "place": "alveolar", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "d": { + "place": "alveolar", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʈ": { + "place": "retroflex", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɖ": { + "place": "retroflex", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "c": { + "place": "palatal", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɟ": { + "place": "palatal", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "k": { + "place": "velar", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "g": { + "place": "velar", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "q": { + "place": "uvular", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɢ": { + "place": "uvular", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʔ": { + "place": "glottal", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "m": { + "place": "bilabial", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɱ": { + "place": "labiodental", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "n": { + "place": "alveolar", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɳ": { + "place": "retroflex", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɲ": { + "place": "palatal", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ŋ": { + "place": "velar", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɴ": { + "place": "uvular", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "N": { + "place": "uvular", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʙ": { + "place": "bilabial", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "B": { + "place": "bilabial", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "r": { + "place": "alveolar", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʀ": { + "place": "uvular", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "R": { + "place": "uvular", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɾ": { + "place": "alveolar", + "manner": "tap", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɽ": { + "place": "retroflex", + "manner": "tap", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɸ": { + "place": "bilabial", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "β": { + "place": "bilabial", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "f": { + "place": "labiodental", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "v": { + "place": "labiodental", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "θ": { + "place": "dental", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ð": { + "place": "dental", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "s": { + "place": "alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "z": { + "place": "alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʃ": { + "place": "palato-alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʒ": { + "place": "palato-alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʂ": { + "place": "retroflex", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʐ": { + "place": "retroflex", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ç": { + "place": "palatal", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʝ": { + "place": "palatal", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "x": { + "place": "velar", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɣ": { + "place": "velar", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "χ": { + "place": "uvular", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʁ": { + "place": "uvular", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ħ": { + "place": "pharyngeal", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʕ": { + "place": "pharyngeal", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "h": { + "place": "glottal", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɦ": { + "place": "glottal", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɬ": { + "place": "alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "plus", + "aspirated": "minus", + }, + "ɮ": { + "place": "alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "plus", + "aspirated": "minus", + }, + "ʋ": { + "place": "labiodental", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɹ": { + "place": "alveolar", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɻ": { + "place": "retroflex", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "j": { + "place": "palatal", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɰ": { + "place": "velar", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "l": { + "place": "alveolar", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "plus", + "aspirated": "minus", + }, + "w": { + "place": "labiovelar", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + # Vowels + "i": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "y": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "front", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "e": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "E": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "minus", + "long": "plus", + "aspirated": "minus", + }, + "ø": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "ɛ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "œ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "æ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "low", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "a": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "low", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "A": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "low", + "back": "front", + "round": "minus", + "long": "plus", + "aspirated": "minus", + }, + "ɨ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "central", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "ʉ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "central", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "ə": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "central", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "u": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "back", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "U": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "back", + "round": "plus", + "long": "plus", + "aspirated": "minus", + }, + "o": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "back", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "O": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "back", + "round": "plus", + "long": "plus", + "aspirated": "minus", + }, + "ɔ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "back", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "ɒ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "low", + "back": "back", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "I": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "front", + "round": "minus", + "long": "plus", + "aspirated": "minus", + }, +} + +# === Algorithm === + + +def align(str1, str2, epsilon=0): + """ + Compute the alignment of two phonetic strings. + + :param str str1: First string to be aligned + :param str str2: Second string to be aligned + + :type epsilon: float (0.0 to 1.0) + :param epsilon: Adjusts threshold similarity score for near-optimal alignments + + :rtype: list(list(tuple(str, str))) + :return: Alignment(s) of str1 and str2 + + (Kondrak 2002: 51) + """ + if np is None: + raise ImportError("You need numpy in order to use the align function") + + assert 0.0 <= epsilon <= 1.0, "Epsilon must be between 0.0 and 1.0." + m = len(str1) + n = len(str2) + # This includes Kondrak's initialization of row 0 and column 0 to all 0s. + S = np.zeros((m + 1, n + 1), dtype=float) + + # If i <= 1 or j <= 1, don't allow expansions as it doesn't make sense, + # and breaks array and string indices. Make sure they never get chosen + # by setting them to -inf. + for i in range(1, m + 1): + for j in range(1, n + 1): + edit1 = S[i - 1, j] + sigma_skip(str1[i - 1]) + edit2 = S[i, j - 1] + sigma_skip(str2[j - 1]) + edit3 = S[i - 1, j - 1] + sigma_sub(str1[i - 1], str2[j - 1]) + if i > 1: + edit4 = S[i - 2, j - 1] + sigma_exp(str2[j - 1], str1[i - 2 : i]) + else: + edit4 = -inf + if j > 1: + edit5 = S[i - 1, j - 2] + sigma_exp(str1[i - 1], str2[j - 2 : j]) + else: + edit5 = -inf + S[i, j] = max(edit1, edit2, edit3, edit4, edit5, 0) + + T = (1 - epsilon) * np.amax(S) # Threshold score for near-optimal alignments + + alignments = [] + for i in range(1, m + 1): + for j in range(1, n + 1): + if S[i, j] >= T: + alignments.append(_retrieve(i, j, 0, S, T, str1, str2, [])) + return alignments + + +def _retrieve(i, j, s, S, T, str1, str2, out): + """ + Retrieve the path through the similarity matrix S starting at (i, j). + + :rtype: list(tuple(str, str)) + :return: Alignment of str1 and str2 + """ + if S[i, j] == 0: + return out + else: + if j > 1 and S[i - 1, j - 2] + sigma_exp(str1[i - 1], str2[j - 2 : j]) + s >= T: + out.insert(0, (str1[i - 1], str2[j - 2 : j])) + _retrieve( + i - 1, + j - 2, + s + sigma_exp(str1[i - 1], str2[j - 2 : j]), + S, + T, + str1, + str2, + out, + ) + elif ( + i > 1 and S[i - 2, j - 1] + sigma_exp(str2[j - 1], str1[i - 2 : i]) + s >= T + ): + out.insert(0, (str1[i - 2 : i], str2[j - 1])) + _retrieve( + i - 2, + j - 1, + s + sigma_exp(str2[j - 1], str1[i - 2 : i]), + S, + T, + str1, + str2, + out, + ) + elif S[i, j - 1] + sigma_skip(str2[j - 1]) + s >= T: + out.insert(0, ("-", str2[j - 1])) + _retrieve(i, j - 1, s + sigma_skip(str2[j - 1]), S, T, str1, str2, out) + elif S[i - 1, j] + sigma_skip(str1[i - 1]) + s >= T: + out.insert(0, (str1[i - 1], "-")) + _retrieve(i - 1, j, s + sigma_skip(str1[i - 1]), S, T, str1, str2, out) + elif S[i - 1, j - 1] + sigma_sub(str1[i - 1], str2[j - 1]) + s >= T: + out.insert(0, (str1[i - 1], str2[j - 1])) + _retrieve( + i - 1, + j - 1, + s + sigma_sub(str1[i - 1], str2[j - 1]), + S, + T, + str1, + str2, + out, + ) + return out + + +def sigma_skip(p): + """ + Returns score of an indel of P. + + (Kondrak 2002: 54) + """ + return C_skip + + +def sigma_sub(p, q): + """ + Returns score of a substitution of P with Q. + + (Kondrak 2002: 54) + """ + return C_sub - delta(p, q) - V(p) - V(q) + + +def sigma_exp(p, q): + """ + Returns score of an expansion/compression. + + (Kondrak 2002: 54) + """ + q1 = q[0] + q2 = q[1] + return C_exp - delta(p, q1) - delta(p, q2) - V(p) - max(V(q1), V(q2)) + + +def delta(p, q): + """ + Return weighted sum of difference between P and Q. + + (Kondrak 2002: 54) + """ + features = R(p, q) + total = 0 + for f in features: + total += diff(p, q, f) * salience[f] + return total + + +def diff(p, q, f): + """ + Returns difference between phonetic segments P and Q for feature F. + + (Kondrak 2002: 52, 54) + """ + p_features, q_features = feature_matrix[p], feature_matrix[q] + return abs(similarity_matrix[p_features[f]] - similarity_matrix[q_features[f]]) + + +def R(p, q): + """ + Return relevant features for segment comparison. + + (Kondrak 2002: 54) + """ + if p in consonants or q in consonants: + return R_c + return R_v + + +def V(p): + """ + Return vowel weight if P is vowel. + + (Kondrak 2002: 54) + """ + if p in consonants: + return 0 + return C_vwl + + +# === Test === + + +def demo(): + """ + A demonstration of the result of aligning phonetic sequences + used in Kondrak's (2002) dissertation. + """ + data = [pair.split(",") for pair in cognate_data.split("\n")] + for pair in data: + alignment = align(pair[0], pair[1])[0] + alignment = [f"({a[0]}, {a[1]})" for a in alignment] + alignment = " ".join(alignment) + print(f"{pair[0]} ~ {pair[1]} : {alignment}") + + +cognate_data = """jo,ʒə +tu,ty +nosotros,nu +kjen,ki +ke,kwa +todos,tu +una,ən +dos,dø +tres,trwa +ombre,om +arbol,arbrə +pluma,plym +kabeθa,kap +boka,buʃ +pje,pje +koraθon,kœr +ber,vwar +benir,vənir +deθir,dir +pobre,povrə +ðis,dIzes +ðæt,das +wat,vas +nat,nixt +loŋ,laŋ +mæn,man +fleʃ,flajʃ +bləd,blyt +feðər,fEdər +hær,hAr +ir,Or +aj,awgə +nowz,nAzə +mawθ,munt +təŋ,tsuŋə +fut,fys +nij,knI +hænd,hant +hart,herts +livər,lEbər +ænd,ante +æt,ad +blow,flAre +ir,awris +ijt,edere +fiʃ,piʃkis +flow,fluere +staɾ,stella +ful,plenus +græs,gramen +hart,kordis +horn,korny +aj,ego +nij,genU +məðər,mAter +mawntən,mons +nejm,nomen +njuw,nowus +wən,unus +rawnd,rotundus +sow,suere +sit,sedere +θrij,tres +tuwθ,dentis +θin,tenwis +kinwawa,kenuaʔ +nina,nenah +napewa,napɛw +wapimini,wapemen +namesa,namɛʔs +okimawa,okemaw +ʃiʃipa,seʔsep +ahkohkwa,ahkɛh +pematesiweni,pematesewen +asenja,aʔsɛn""" + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/association.py b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/association.py new file mode 100644 index 0000000000000000000000000000000000000000..b7010f1f4dd39c122a263aff5d243b3c19c52822 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/association.py @@ -0,0 +1,476 @@ +# Natural Language Toolkit: Ngram Association Measures +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Joel Nothman +# URL: +# For license information, see LICENSE.TXT + +""" +Provides scoring functions for a number of association measures through a +generic, abstract implementation in ``NgramAssocMeasures``, and n-specific +``BigramAssocMeasures`` and ``TrigramAssocMeasures``. +""" + +import math as _math +from abc import ABCMeta, abstractmethod +from functools import reduce + +_log2 = lambda x: _math.log2(x) +_ln = _math.log + +_product = lambda s: reduce(lambda x, y: x * y, s) + +_SMALL = 1e-20 + +try: + from scipy.stats import fisher_exact +except ImportError: + + def fisher_exact(*_args, **_kwargs): + raise NotImplementedError + + +### Indices to marginals arguments: + +NGRAM = 0 +"""Marginals index for the ngram count""" + +UNIGRAMS = -2 +"""Marginals index for a tuple of each unigram count""" + +TOTAL = -1 +"""Marginals index for the number of words in the data""" + + +class NgramAssocMeasures(metaclass=ABCMeta): + """ + An abstract class defining a collection of generic association measures. + Each public method returns a score, taking the following arguments:: + + score_fn(count_of_ngram, + (count_of_n-1gram_1, ..., count_of_n-1gram_j), + (count_of_n-2gram_1, ..., count_of_n-2gram_k), + ..., + (count_of_1gram_1, ..., count_of_1gram_n), + count_of_total_words) + + See ``BigramAssocMeasures`` and ``TrigramAssocMeasures`` + + Inheriting classes should define a property _n, and a method _contingency + which calculates contingency values from marginals in order for all + association measures defined here to be usable. + """ + + _n = 0 + + @staticmethod + @abstractmethod + def _contingency(*marginals): + """Calculates values of a contingency table from marginal values.""" + raise NotImplementedError( + "The contingency table is not available" "in the general ngram case" + ) + + @staticmethod + @abstractmethod + def _marginals(*contingency): + """Calculates values of contingency table marginals from its values.""" + raise NotImplementedError( + "The contingency table is not available" "in the general ngram case" + ) + + @classmethod + def _expected_values(cls, cont): + """Calculates expected values for a contingency table.""" + n_all = sum(cont) + bits = [1 << i for i in range(cls._n)] + + # For each contingency table cell + for i in range(len(cont)): + # Yield the expected value + yield ( + _product( + sum(cont[x] for x in range(2**cls._n) if (x & j) == (i & j)) + for j in bits + ) + / (n_all ** (cls._n - 1)) + ) + + @staticmethod + def raw_freq(*marginals): + """Scores ngrams by their frequency""" + return marginals[NGRAM] / marginals[TOTAL] + + @classmethod + def student_t(cls, *marginals): + """Scores ngrams using Student's t test with independence hypothesis + for unigrams, as in Manning and Schutze 5.3.1. + """ + return ( + marginals[NGRAM] + - _product(marginals[UNIGRAMS]) / (marginals[TOTAL] ** (cls._n - 1)) + ) / (marginals[NGRAM] + _SMALL) ** 0.5 + + @classmethod + def chi_sq(cls, *marginals): + """Scores ngrams using Pearson's chi-square as in Manning and Schutze + 5.3.3. + """ + cont = cls._contingency(*marginals) + exps = cls._expected_values(cont) + return sum((obs - exp) ** 2 / (exp + _SMALL) for obs, exp in zip(cont, exps)) + + @staticmethod + def mi_like(*marginals, **kwargs): + """Scores ngrams using a variant of mutual information. The keyword + argument power sets an exponent (default 3) for the numerator. No + logarithm of the result is calculated. + """ + return marginals[NGRAM] ** kwargs.get("power", 3) / _product( + marginals[UNIGRAMS] + ) + + @classmethod + def pmi(cls, *marginals): + """Scores ngrams by pointwise mutual information, as in Manning and + Schutze 5.4. + """ + return _log2(marginals[NGRAM] * marginals[TOTAL] ** (cls._n - 1)) - _log2( + _product(marginals[UNIGRAMS]) + ) + + @classmethod + def likelihood_ratio(cls, *marginals): + """Scores ngrams using likelihood ratios as in Manning and Schutze 5.3.4.""" + cont = cls._contingency(*marginals) + return 2 * sum( + obs * _ln(obs / (exp + _SMALL) + _SMALL) + for obs, exp in zip(cont, cls._expected_values(cont)) + ) + + @classmethod + def poisson_stirling(cls, *marginals): + """Scores ngrams using the Poisson-Stirling measure.""" + exp = _product(marginals[UNIGRAMS]) / (marginals[TOTAL] ** (cls._n - 1)) + return marginals[NGRAM] * (_log2(marginals[NGRAM] / exp) - 1) + + @classmethod + def jaccard(cls, *marginals): + """Scores ngrams using the Jaccard index.""" + cont = cls._contingency(*marginals) + return cont[0] / sum(cont[:-1]) + + +class BigramAssocMeasures(NgramAssocMeasures): + """ + A collection of bigram association measures. Each association measure + is provided as a function with three arguments:: + + bigram_score_fn(n_ii, (n_ix, n_xi), n_xx) + + The arguments constitute the marginals of a contingency table, counting + the occurrences of particular events in a corpus. The letter i in the + suffix refers to the appearance of the word in question, while x indicates + the appearance of any word. Thus, for example: + + - n_ii counts ``(w1, w2)``, i.e. the bigram being scored + - n_ix counts ``(w1, *)`` + - n_xi counts ``(*, w2)`` + - n_xx counts ``(*, *)``, i.e. any bigram + + This may be shown with respect to a contingency table:: + + w1 ~w1 + ------ ------ + w2 | n_ii | n_oi | = n_xi + ------ ------ + ~w2 | n_io | n_oo | + ------ ------ + = n_ix TOTAL = n_xx + """ + + _n = 2 + + @staticmethod + def _contingency(n_ii, n_ix_xi_tuple, n_xx): + """Calculates values of a bigram contingency table from marginal values.""" + (n_ix, n_xi) = n_ix_xi_tuple + n_oi = n_xi - n_ii + n_io = n_ix - n_ii + return (n_ii, n_oi, n_io, n_xx - n_ii - n_oi - n_io) + + @staticmethod + def _marginals(n_ii, n_oi, n_io, n_oo): + """Calculates values of contingency table marginals from its values.""" + return (n_ii, (n_oi + n_ii, n_io + n_ii), n_oo + n_oi + n_io + n_ii) + + @staticmethod + def _expected_values(cont): + """Calculates expected values for a contingency table.""" + n_xx = sum(cont) + # For each contingency table cell + for i in range(4): + yield (cont[i] + cont[i ^ 1]) * (cont[i] + cont[i ^ 2]) / n_xx + + @classmethod + def phi_sq(cls, *marginals): + """Scores bigrams using phi-square, the square of the Pearson correlation + coefficient. + """ + n_ii, n_io, n_oi, n_oo = cls._contingency(*marginals) + + return (n_ii * n_oo - n_io * n_oi) ** 2 / ( + (n_ii + n_io) * (n_ii + n_oi) * (n_io + n_oo) * (n_oi + n_oo) + ) + + @classmethod + def chi_sq(cls, n_ii, n_ix_xi_tuple, n_xx): + """Scores bigrams using chi-square, i.e. phi-sq multiplied by the number + of bigrams, as in Manning and Schutze 5.3.3. + """ + (n_ix, n_xi) = n_ix_xi_tuple + return n_xx * cls.phi_sq(n_ii, (n_ix, n_xi), n_xx) + + @classmethod + def fisher(cls, *marginals): + """Scores bigrams using Fisher's Exact Test (Pedersen 1996). Less + sensitive to small counts than PMI or Chi Sq, but also more expensive + to compute. Requires scipy. + """ + + n_ii, n_io, n_oi, n_oo = cls._contingency(*marginals) + + (odds, pvalue) = fisher_exact([[n_ii, n_io], [n_oi, n_oo]], alternative="less") + return pvalue + + @staticmethod + def dice(n_ii, n_ix_xi_tuple, n_xx): + """Scores bigrams using Dice's coefficient.""" + (n_ix, n_xi) = n_ix_xi_tuple + return 2 * n_ii / (n_ix + n_xi) + + +class TrigramAssocMeasures(NgramAssocMeasures): + """ + A collection of trigram association measures. Each association measure + is provided as a function with four arguments:: + + trigram_score_fn(n_iii, + (n_iix, n_ixi, n_xii), + (n_ixx, n_xix, n_xxi), + n_xxx) + + The arguments constitute the marginals of a contingency table, counting + the occurrences of particular events in a corpus. The letter i in the + suffix refers to the appearance of the word in question, while x indicates + the appearance of any word. Thus, for example: + + - n_iii counts ``(w1, w2, w3)``, i.e. the trigram being scored + - n_ixx counts ``(w1, *, *)`` + - n_xxx counts ``(*, *, *)``, i.e. any trigram + """ + + _n = 3 + + @staticmethod + def _contingency(n_iii, n_iix_tuple, n_ixx_tuple, n_xxx): + """Calculates values of a trigram contingency table (or cube) from + marginal values. + >>> TrigramAssocMeasures._contingency(1, (1, 1, 1), (1, 73, 1), 2000) + (1, 0, 0, 0, 0, 72, 0, 1927) + """ + (n_iix, n_ixi, n_xii) = n_iix_tuple + (n_ixx, n_xix, n_xxi) = n_ixx_tuple + n_oii = n_xii - n_iii + n_ioi = n_ixi - n_iii + n_iio = n_iix - n_iii + n_ooi = n_xxi - n_iii - n_oii - n_ioi + n_oio = n_xix - n_iii - n_oii - n_iio + n_ioo = n_ixx - n_iii - n_ioi - n_iio + n_ooo = n_xxx - n_iii - n_oii - n_ioi - n_iio - n_ooi - n_oio - n_ioo + + return (n_iii, n_oii, n_ioi, n_ooi, n_iio, n_oio, n_ioo, n_ooo) + + @staticmethod + def _marginals(*contingency): + """Calculates values of contingency table marginals from its values. + >>> TrigramAssocMeasures._marginals(1, 0, 0, 0, 0, 72, 0, 1927) + (1, (1, 1, 1), (1, 73, 1), 2000) + """ + n_iii, n_oii, n_ioi, n_ooi, n_iio, n_oio, n_ioo, n_ooo = contingency + return ( + n_iii, + (n_iii + n_iio, n_iii + n_ioi, n_iii + n_oii), + ( + n_iii + n_ioi + n_iio + n_ioo, + n_iii + n_oii + n_iio + n_oio, + n_iii + n_oii + n_ioi + n_ooi, + ), + sum(contingency), + ) + + +class QuadgramAssocMeasures(NgramAssocMeasures): + """ + A collection of quadgram association measures. Each association measure + is provided as a function with five arguments:: + + trigram_score_fn(n_iiii, + (n_iiix, n_iixi, n_ixii, n_xiii), + (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix), + (n_ixxx, n_xixx, n_xxix, n_xxxi), + n_all) + + The arguments constitute the marginals of a contingency table, counting + the occurrences of particular events in a corpus. The letter i in the + suffix refers to the appearance of the word in question, while x indicates + the appearance of any word. Thus, for example: + + - n_iiii counts ``(w1, w2, w3, w4)``, i.e. the quadgram being scored + - n_ixxi counts ``(w1, *, *, w4)`` + - n_xxxx counts ``(*, *, *, *)``, i.e. any quadgram + """ + + _n = 4 + + @staticmethod + def _contingency(n_iiii, n_iiix_tuple, n_iixx_tuple, n_ixxx_tuple, n_xxxx): + """Calculates values of a quadgram contingency table from + marginal values. + """ + (n_iiix, n_iixi, n_ixii, n_xiii) = n_iiix_tuple + (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix) = n_iixx_tuple + (n_ixxx, n_xixx, n_xxix, n_xxxi) = n_ixxx_tuple + n_oiii = n_xiii - n_iiii + n_ioii = n_ixii - n_iiii + n_iioi = n_iixi - n_iiii + n_ooii = n_xxii - n_iiii - n_oiii - n_ioii + n_oioi = n_xixi - n_iiii - n_oiii - n_iioi + n_iooi = n_ixxi - n_iiii - n_ioii - n_iioi + n_oooi = n_xxxi - n_iiii - n_oiii - n_ioii - n_iioi - n_ooii - n_iooi - n_oioi + n_iiio = n_iiix - n_iiii + n_oiio = n_xiix - n_iiii - n_oiii - n_iiio + n_ioio = n_ixix - n_iiii - n_ioii - n_iiio + n_ooio = n_xxix - n_iiii - n_oiii - n_ioii - n_iiio - n_ooii - n_ioio - n_oiio + n_iioo = n_iixx - n_iiii - n_iioi - n_iiio + n_oioo = n_xixx - n_iiii - n_oiii - n_iioi - n_iiio - n_oioi - n_oiio - n_iioo + n_iooo = n_ixxx - n_iiii - n_ioii - n_iioi - n_iiio - n_iooi - n_iioo - n_ioio + n_oooo = ( + n_xxxx + - n_iiii + - n_oiii + - n_ioii + - n_iioi + - n_ooii + - n_oioi + - n_iooi + - n_oooi + - n_iiio + - n_oiio + - n_ioio + - n_ooio + - n_iioo + - n_oioo + - n_iooo + ) + + return ( + n_iiii, + n_oiii, + n_ioii, + n_ooii, + n_iioi, + n_oioi, + n_iooi, + n_oooi, + n_iiio, + n_oiio, + n_ioio, + n_ooio, + n_iioo, + n_oioo, + n_iooo, + n_oooo, + ) + + @staticmethod + def _marginals(*contingency): + """Calculates values of contingency table marginals from its values. + QuadgramAssocMeasures._marginals(1, 0, 2, 46, 552, 825, 2577, 34967, 1, 0, 2, 48, 7250, 9031, 28585, 356653) + (1, (2, 553, 3, 1), (7804, 6, 3132, 1378, 49, 2), (38970, 17660, 100, 38970), 440540) + """ + ( + n_iiii, + n_oiii, + n_ioii, + n_ooii, + n_iioi, + n_oioi, + n_iooi, + n_oooi, + n_iiio, + n_oiio, + n_ioio, + n_ooio, + n_iioo, + n_oioo, + n_iooo, + n_oooo, + ) = contingency + + n_iiix = n_iiii + n_iiio + n_iixi = n_iiii + n_iioi + n_ixii = n_iiii + n_ioii + n_xiii = n_iiii + n_oiii + + n_iixx = n_iiii + n_iioi + n_iiio + n_iioo + n_ixix = n_iiii + n_ioii + n_iiio + n_ioio + n_ixxi = n_iiii + n_ioii + n_iioi + n_iooi + n_xixi = n_iiii + n_oiii + n_iioi + n_oioi + n_xxii = n_iiii + n_oiii + n_ioii + n_ooii + n_xiix = n_iiii + n_oiii + n_iiio + n_oiio + + n_ixxx = n_iiii + n_ioii + n_iioi + n_iiio + n_iooi + n_iioo + n_ioio + n_iooo + n_xixx = n_iiii + n_oiii + n_iioi + n_iiio + n_oioi + n_oiio + n_iioo + n_oioo + n_xxix = n_iiii + n_oiii + n_ioii + n_iiio + n_ooii + n_ioio + n_oiio + n_ooio + n_xxxi = n_iiii + n_oiii + n_ioii + n_iioi + n_ooii + n_iooi + n_oioi + n_oooi + + n_all = sum(contingency) + + return ( + n_iiii, + (n_iiix, n_iixi, n_ixii, n_xiii), + (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix), + (n_ixxx, n_xixx, n_xxix, n_xxxi), + n_all, + ) + + +class ContingencyMeasures: + """Wraps NgramAssocMeasures classes such that the arguments of association + measures are contingency table values rather than marginals. + """ + + def __init__(self, measures): + """Constructs a ContingencyMeasures given a NgramAssocMeasures class""" + self.__class__.__name__ = "Contingency" + measures.__class__.__name__ + for k in dir(measures): + if k.startswith("__"): + continue + v = getattr(measures, k) + if not k.startswith("_"): + v = self._make_contingency_fn(measures, v) + setattr(self, k, v) + + @staticmethod + def _make_contingency_fn(measures, old_fn): + """From an association measure function, produces a new function which + accepts contingency table values as its arguments. + """ + + def res(*contingency): + return old_fn(*measures._marginals(*contingency)) + + res.__doc__ = old_fn.__doc__ + res.__name__ = old_fn.__name__ + return res diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/confusionmatrix.py b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/confusionmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..3cb6ee9b2a7e1a9b2235d9268d20fc1269908fe1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/confusionmatrix.py @@ -0,0 +1,353 @@ +# Natural Language Toolkit: Confusion Matrices +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +from nltk.probability import FreqDist + + +class ConfusionMatrix: + """ + The confusion matrix between a list of reference values and a + corresponding list of test values. Entry *[r,t]* of this + matrix is a count of the number of times that the reference value + *r* corresponds to the test value *t*. E.g.: + + >>> from nltk.metrics import ConfusionMatrix + >>> ref = 'DET NN VB DET JJ NN NN IN DET NN'.split() + >>> test = 'DET VB VB DET NN NN NN IN DET NN'.split() + >>> cm = ConfusionMatrix(ref, test) + >>> print(cm['NN', 'NN']) + 3 + + Note that the diagonal entries *Ri=Tj* of this matrix + corresponds to correct values; and the off-diagonal entries + correspond to incorrect values. + """ + + def __init__(self, reference, test, sort_by_count=False): + """ + Construct a new confusion matrix from a list of reference + values and a corresponding list of test values. + + :type reference: list + :param reference: An ordered list of reference values. + :type test: list + :param test: A list of values to compare against the + corresponding reference values. + :raise ValueError: If ``reference`` and ``length`` do not have + the same length. + """ + if len(reference) != len(test): + raise ValueError("Lists must have the same length.") + + # Get a list of all values. + if sort_by_count: + ref_fdist = FreqDist(reference) + test_fdist = FreqDist(test) + + def key(v): + return -(ref_fdist[v] + test_fdist[v]) + + values = sorted(set(reference + test), key=key) + else: + values = sorted(set(reference + test)) + + # Construct a value->index dictionary + indices = {val: i for (i, val) in enumerate(values)} + + # Make a confusion matrix table. + confusion = [[0 for _ in values] for _ in values] + max_conf = 0 # Maximum confusion + for w, g in zip(reference, test): + confusion[indices[w]][indices[g]] += 1 + max_conf = max(max_conf, confusion[indices[w]][indices[g]]) + + #: A list of all values in ``reference`` or ``test``. + self._values = values + #: A dictionary mapping values in ``self._values`` to their indices. + self._indices = indices + #: The confusion matrix itself (as a list of lists of counts). + self._confusion = confusion + #: The greatest count in ``self._confusion`` (used for printing). + self._max_conf = max_conf + #: The total number of values in the confusion matrix. + self._total = len(reference) + #: The number of correct (on-diagonal) values in the matrix. + self._correct = sum(confusion[i][i] for i in range(len(values))) + + def __getitem__(self, li_lj_tuple): + """ + :return: The number of times that value ``li`` was expected and + value ``lj`` was given. + :rtype: int + """ + (li, lj) = li_lj_tuple + i = self._indices[li] + j = self._indices[lj] + return self._confusion[i][j] + + def __repr__(self): + return f"" + + def __str__(self): + return self.pretty_format() + + def pretty_format( + self, + show_percents=False, + values_in_chart=True, + truncate=None, + sort_by_count=False, + ): + """ + :return: A multi-line string representation of this confusion matrix. + :type truncate: int + :param truncate: If specified, then only show the specified + number of values. Any sorting (e.g., sort_by_count) + will be performed before truncation. + :param sort_by_count: If true, then sort by the count of each + label in the reference data. I.e., labels that occur more + frequently in the reference label will be towards the left + edge of the matrix, and labels that occur less frequently + will be towards the right edge. + + @todo: add marginals? + """ + confusion = self._confusion + + values = self._values + if sort_by_count: + values = sorted( + values, key=lambda v: -sum(self._confusion[self._indices[v]]) + ) + + if truncate: + values = values[:truncate] + + if values_in_chart: + value_strings = ["%s" % val for val in values] + else: + value_strings = [str(n + 1) for n in range(len(values))] + + # Construct a format string for row values + valuelen = max(len(val) for val in value_strings) + value_format = "%" + repr(valuelen) + "s | " + # Construct a format string for matrix entries + if show_percents: + entrylen = 6 + entry_format = "%5.1f%%" + zerostr = " ." + else: + entrylen = len(repr(self._max_conf)) + entry_format = "%" + repr(entrylen) + "d" + zerostr = " " * (entrylen - 1) + "." + + # Write the column values. + s = "" + for i in range(valuelen): + s += (" " * valuelen) + " |" + for val in value_strings: + if i >= valuelen - len(val): + s += val[i - valuelen + len(val)].rjust(entrylen + 1) + else: + s += " " * (entrylen + 1) + s += " |\n" + + # Write a dividing line + s += "{}-+-{}+\n".format("-" * valuelen, "-" * ((entrylen + 1) * len(values))) + + # Write the entries. + for val, li in zip(value_strings, values): + i = self._indices[li] + s += value_format % val + for lj in values: + j = self._indices[lj] + if confusion[i][j] == 0: + s += zerostr + elif show_percents: + s += entry_format % (100.0 * confusion[i][j] / self._total) + else: + s += entry_format % confusion[i][j] + if i == j: + prevspace = s.rfind(" ") + s = s[:prevspace] + "<" + s[prevspace + 1 :] + ">" + else: + s += " " + s += "|\n" + + # Write a dividing line + s += "{}-+-{}+\n".format("-" * valuelen, "-" * ((entrylen + 1) * len(values))) + + # Write a key + s += "(row = reference; col = test)\n" + if not values_in_chart: + s += "Value key:\n" + for i, value in enumerate(values): + s += "%6d: %s\n" % (i + 1, value) + + return s + + def key(self): + values = self._values + str = "Value key:\n" + indexlen = len(repr(len(values) - 1)) + key_format = " %" + repr(indexlen) + "d: %s\n" + for i in range(len(values)): + str += key_format % (i, values[i]) + + return str + + def recall(self, value): + """Given a value in the confusion matrix, return the recall + that corresponds to this value. The recall is defined as: + + - *r* = true positive / (true positive + false positive) + + and can loosely be considered the ratio of how often ``value`` + was predicted correctly relative to how often ``value`` was + the true result. + + :param value: value used in the ConfusionMatrix + :return: the recall corresponding to ``value``. + :rtype: float + """ + # Number of times `value` was correct, and also predicted + TP = self[value, value] + # Number of times `value` was correct + TP_FN = sum(self[value, pred_value] for pred_value in self._values) + if TP_FN == 0: + return 0.0 + return TP / TP_FN + + def precision(self, value): + """Given a value in the confusion matrix, return the precision + that corresponds to this value. The precision is defined as: + + - *p* = true positive / (true positive + false negative) + + and can loosely be considered the ratio of how often ``value`` + was predicted correctly relative to the number of predictions + for ``value``. + + :param value: value used in the ConfusionMatrix + :return: the precision corresponding to ``value``. + :rtype: float + """ + # Number of times `value` was correct, and also predicted + TP = self[value, value] + # Number of times `value` was predicted + TP_FP = sum(self[real_value, value] for real_value in self._values) + if TP_FP == 0: + return 0.0 + return TP / TP_FP + + def f_measure(self, value, alpha=0.5): + """ + Given a value used in the confusion matrix, return the f-measure + that corresponds to this value. The f-measure is the harmonic mean + of the ``precision`` and ``recall``, weighted by ``alpha``. + In particular, given the precision *p* and recall *r* defined by: + + - *p* = true positive / (true positive + false negative) + - *r* = true positive / (true positive + false positive) + + The f-measure is: + + - *1/(alpha/p + (1-alpha)/r)* + + With ``alpha = 0.5``, this reduces to: + + - *2pr / (p + r)* + + :param value: value used in the ConfusionMatrix + :param alpha: Ratio of the cost of false negative compared to false + positives. Defaults to 0.5, where the costs are equal. + :type alpha: float + :return: the F-measure corresponding to ``value``. + :rtype: float + """ + p = self.precision(value) + r = self.recall(value) + if p == 0.0 or r == 0.0: + return 0.0 + return 1.0 / (alpha / p + (1 - alpha) / r) + + def evaluate(self, alpha=0.5, truncate=None, sort_by_count=False): + """ + Tabulate the **recall**, **precision** and **f-measure** + for each value in this confusion matrix. + + >>> reference = "DET NN VB DET JJ NN NN IN DET NN".split() + >>> test = "DET VB VB DET NN NN NN IN DET NN".split() + >>> cm = ConfusionMatrix(reference, test) + >>> print(cm.evaluate()) + Tag | Prec. | Recall | F-measure + ----+--------+--------+----------- + DET | 1.0000 | 1.0000 | 1.0000 + IN | 1.0000 | 1.0000 | 1.0000 + JJ | 0.0000 | 0.0000 | 0.0000 + NN | 0.7500 | 0.7500 | 0.7500 + VB | 0.5000 | 1.0000 | 0.6667 + + + :param alpha: Ratio of the cost of false negative compared to false + positives, as used in the f-measure computation. Defaults to 0.5, + where the costs are equal. + :type alpha: float + :param truncate: If specified, then only show the specified + number of values. Any sorting (e.g., sort_by_count) + will be performed before truncation. Defaults to None + :type truncate: int, optional + :param sort_by_count: Whether to sort the outputs on frequency + in the reference label. Defaults to False. + :type sort_by_count: bool, optional + :return: A tabulated recall, precision and f-measure string + :rtype: str + """ + tags = self._values + + # Apply keyword parameters + if sort_by_count: + tags = sorted(tags, key=lambda v: -sum(self._confusion[self._indices[v]])) + if truncate: + tags = tags[:truncate] + + tag_column_len = max(max(len(tag) for tag in tags), 3) + + # Construct the header + s = ( + f"{' ' * (tag_column_len - 3)}Tag | Prec. | Recall | F-measure\n" + f"{'-' * tag_column_len}-+--------+--------+-----------\n" + ) + + # Construct the body + for tag in tags: + s += ( + f"{tag:>{tag_column_len}} | " + f"{self.precision(tag):<6.4f} | " + f"{self.recall(tag):<6.4f} | " + f"{self.f_measure(tag, alpha=alpha):.4f}\n" + ) + + return s + + +def demo(): + reference = "DET NN VB DET JJ NN NN IN DET NN".split() + test = "DET VB VB DET NN NN NN IN DET NN".split() + print("Reference =", reference) + print("Test =", test) + print("Confusion matrix:") + print(ConfusionMatrix(reference, test)) + print(ConfusionMatrix(reference, test).pretty_format(sort_by_count=True)) + + print(ConfusionMatrix(reference, test).recall("VB")) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/distance.py b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/distance.py new file mode 100644 index 0000000000000000000000000000000000000000..1f115d97abd6678f7b1a3b15b2e68671d70e5ea7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/distance.py @@ -0,0 +1,508 @@ +# Natural Language Toolkit: Distance Metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Tom Lippincott +# URL: +# For license information, see LICENSE.TXT +# + +""" +Distance Metrics. + +Compute the distance between two items (usually strings). +As metrics, they must satisfy the following three requirements: + +1. d(a, a) = 0 +2. d(a, b) >= 0 +3. d(a, c) <= d(a, b) + d(b, c) +""" + +import operator +import warnings + + +def _edit_dist_init(len1, len2): + lev = [] + for i in range(len1): + lev.append([0] * len2) # initialize 2D array to zero + for i in range(len1): + lev[i][0] = i # column 0: 0,1,2,3,4,... + for j in range(len2): + lev[0][j] = j # row 0: 0,1,2,3,4,... + return lev + + +def _last_left_t_init(sigma): + return {c: 0 for c in sigma} + + +def _edit_dist_step( + lev, i, j, s1, s2, last_left, last_right, substitution_cost=1, transpositions=False +): + c1 = s1[i - 1] + c2 = s2[j - 1] + + # skipping a character in s1 + a = lev[i - 1][j] + 1 + # skipping a character in s2 + b = lev[i][j - 1] + 1 + # substitution + c = lev[i - 1][j - 1] + (substitution_cost if c1 != c2 else 0) + + # transposition + d = c + 1 # never picked by default + if transpositions and last_left > 0 and last_right > 0: + d = lev[last_left - 1][last_right - 1] + i - last_left + j - last_right - 1 + + # pick the cheapest + lev[i][j] = min(a, b, c, d) + + +def edit_distance(s1, s2, substitution_cost=1, transpositions=False): + """ + Calculate the Levenshtein edit-distance between two strings. + The edit distance is the number of characters that need to be + substituted, inserted, or deleted, to transform s1 into s2. For + example, transforming "rain" to "shine" requires three steps, + consisting of two substitutions and one insertion: + "rain" -> "sain" -> "shin" -> "shine". These operations could have + been done in other orders, but at least three steps are needed. + + Allows specifying the cost of substitution edits (e.g., "a" -> "b"), + because sometimes it makes sense to assign greater penalties to + substitutions. + + This also optionally allows transposition edits (e.g., "ab" -> "ba"), + though this is disabled by default. + + :param s1, s2: The strings to be analysed + :param transpositions: Whether to allow transposition edits + :type s1: str + :type s2: str + :type substitution_cost: int + :type transpositions: bool + :rtype: int + """ + # set up a 2-D array + len1 = len(s1) + len2 = len(s2) + lev = _edit_dist_init(len1 + 1, len2 + 1) + + # retrieve alphabet + sigma = set() + sigma.update(s1) + sigma.update(s2) + + # set up table to remember positions of last seen occurrence in s1 + last_left_t = _last_left_t_init(sigma) + + # iterate over the array + # i and j start from 1 and not 0 to stay close to the wikipedia pseudo-code + # see https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance + for i in range(1, len1 + 1): + last_right_buf = 0 + for j in range(1, len2 + 1): + last_left = last_left_t[s2[j - 1]] + last_right = last_right_buf + if s1[i - 1] == s2[j - 1]: + last_right_buf = j + _edit_dist_step( + lev, + i, + j, + s1, + s2, + last_left, + last_right, + substitution_cost=substitution_cost, + transpositions=transpositions, + ) + last_left_t[s1[i - 1]] = i + return lev[len1][len2] + + +def _edit_dist_backtrace(lev): + i, j = len(lev) - 1, len(lev[0]) - 1 + alignment = [(i, j)] + + while (i, j) != (0, 0): + directions = [ + (i - 1, j - 1), # substitution + (i - 1, j), # skip s1 + (i, j - 1), # skip s2 + ] + + direction_costs = ( + (lev[i][j] if (i >= 0 and j >= 0) else float("inf"), (i, j)) + for i, j in directions + ) + _, (i, j) = min(direction_costs, key=operator.itemgetter(0)) + + alignment.append((i, j)) + return list(reversed(alignment)) + + +def edit_distance_align(s1, s2, substitution_cost=1): + """ + Calculate the minimum Levenshtein edit-distance based alignment + mapping between two strings. The alignment finds the mapping + from string s1 to s2 that minimizes the edit distance cost. + For example, mapping "rain" to "shine" would involve 2 + substitutions, 2 matches and an insertion resulting in + the following mapping: + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (4, 5)] + NB: (0, 0) is the start state without any letters associated + See more: https://web.stanford.edu/class/cs124/lec/med.pdf + + In case of multiple valid minimum-distance alignments, the + backtrace has the following operation precedence: + + 1. Substitute s1 and s2 characters + 2. Skip s1 character + 3. Skip s2 character + + The backtrace is carried out in reverse string order. + + This function does not support transposition. + + :param s1, s2: The strings to be aligned + :type s1: str + :type s2: str + :type substitution_cost: int + :rtype: List[Tuple(int, int)] + """ + # set up a 2-D array + len1 = len(s1) + len2 = len(s2) + lev = _edit_dist_init(len1 + 1, len2 + 1) + + # iterate over the array + for i in range(len1): + for j in range(len2): + _edit_dist_step( + lev, + i + 1, + j + 1, + s1, + s2, + 0, + 0, + substitution_cost=substitution_cost, + transpositions=False, + ) + + # backtrace to find alignment + alignment = _edit_dist_backtrace(lev) + return alignment + + +def binary_distance(label1, label2): + """Simple equality test. + + 0.0 if the labels are identical, 1.0 if they are different. + + >>> from nltk.metrics import binary_distance + >>> binary_distance(1,1) + 0.0 + + >>> binary_distance(1,3) + 1.0 + """ + + return 0.0 if label1 == label2 else 1.0 + + +def jaccard_distance(label1, label2): + """Distance metric comparing set-similarity.""" + return (len(label1.union(label2)) - len(label1.intersection(label2))) / len( + label1.union(label2) + ) + + +def masi_distance(label1, label2): + """Distance metric that takes into account partial agreement when multiple + labels are assigned. + + >>> from nltk.metrics import masi_distance + >>> masi_distance(set([1, 2]), set([1, 2, 3, 4])) + 0.665 + + Passonneau 2006, Measuring Agreement on Set-Valued Items (MASI) + for Semantic and Pragmatic Annotation. + """ + + len_intersection = len(label1.intersection(label2)) + len_union = len(label1.union(label2)) + len_label1 = len(label1) + len_label2 = len(label2) + if len_label1 == len_label2 and len_label1 == len_intersection: + m = 1 + elif len_intersection == min(len_label1, len_label2): + m = 0.67 + elif len_intersection > 0: + m = 0.33 + else: + m = 0 + + return 1 - len_intersection / len_union * m + + +def interval_distance(label1, label2): + """Krippendorff's interval distance metric + + >>> from nltk.metrics import interval_distance + >>> interval_distance(1,10) + 81 + + Krippendorff 1980, Content Analysis: An Introduction to its Methodology + """ + + try: + return pow(label1 - label2, 2) + # return pow(list(label1)[0]-list(label2)[0],2) + except: + print("non-numeric labels not supported with interval distance") + + +def presence(label): + """Higher-order function to test presence of a given label""" + + return lambda x, y: 1.0 * ((label in x) == (label in y)) + + +def fractional_presence(label): + return ( + lambda x, y: abs((1.0 / len(x)) - (1.0 / len(y))) * (label in x and label in y) + or 0.0 * (label not in x and label not in y) + or abs(1.0 / len(x)) * (label in x and label not in y) + or (1.0 / len(y)) * (label not in x and label in y) + ) + + +def custom_distance(file): + data = {} + with open(file) as infile: + for l in infile: + labelA, labelB, dist = l.strip().split("\t") + labelA = frozenset([labelA]) + labelB = frozenset([labelB]) + data[frozenset([labelA, labelB])] = float(dist) + return lambda x, y: data[frozenset([x, y])] + + +def jaro_similarity(s1, s2): + """ + Computes the Jaro similarity between 2 sequences from: + + Matthew A. Jaro (1989). Advances in record linkage methodology + as applied to the 1985 census of Tampa Florida. Journal of the + American Statistical Association. 84 (406): 414-20. + + The Jaro distance between is the min no. of single-character transpositions + required to change one word into another. The Jaro similarity formula from + https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance : + + ``jaro_sim = 0 if m = 0 else 1/3 * (m/|s_1| + m/s_2 + (m-t)/m)`` + + where + - `|s_i|` is the length of string `s_i` + - `m` is the no. of matching characters + - `t` is the half no. of possible transpositions. + """ + # First, store the length of the strings + # because they will be re-used several times. + len_s1, len_s2 = len(s1), len(s2) + + # The upper bound of the distance for being a matched character. + match_bound = max(len_s1, len_s2) // 2 - 1 + + # Initialize the counts for matches and transpositions. + matches = 0 # no.of matched characters in s1 and s2 + transpositions = 0 # no. of transpositions between s1 and s2 + flagged_1 = [] # positions in s1 which are matches to some character in s2 + flagged_2 = [] # positions in s2 which are matches to some character in s1 + + # Iterate through sequences, check for matches and compute transpositions. + for i in range(len_s1): # Iterate through each character. + upperbound = min(i + match_bound, len_s2 - 1) + lowerbound = max(0, i - match_bound) + for j in range(lowerbound, upperbound + 1): + if s1[i] == s2[j] and j not in flagged_2: + matches += 1 + flagged_1.append(i) + flagged_2.append(j) + break + flagged_2.sort() + for i, j in zip(flagged_1, flagged_2): + if s1[i] != s2[j]: + transpositions += 1 + + if matches == 0: + return 0 + else: + return ( + 1 + / 3 + * ( + matches / len_s1 + + matches / len_s2 + + (matches - transpositions // 2) / matches + ) + ) + + +def jaro_winkler_similarity(s1, s2, p=0.1, max_l=4): + """ + The Jaro Winkler distance is an extension of the Jaro similarity in: + + William E. Winkler. 1990. String Comparator Metrics and Enhanced + Decision Rules in the Fellegi-Sunter Model of Record Linkage. + Proceedings of the Section on Survey Research Methods. + American Statistical Association: 354-359. + + such that: + + jaro_winkler_sim = jaro_sim + ( l * p * (1 - jaro_sim) ) + + where, + + - jaro_sim is the output from the Jaro Similarity, + see jaro_similarity() + - l is the length of common prefix at the start of the string + - this implementation provides an upperbound for the l value + to keep the prefixes.A common value of this upperbound is 4. + - p is the constant scaling factor to overweigh common prefixes. + The Jaro-Winkler similarity will fall within the [0, 1] bound, + given that max(p)<=0.25 , default is p=0.1 in Winkler (1990) + + + Test using outputs from https://www.census.gov/srd/papers/pdf/rr93-8.pdf + from "Table 5 Comparison of String Comparators Rescaled between 0 and 1" + + >>> winkler_examples = [("billy", "billy"), ("billy", "bill"), ("billy", "blily"), + ... ("massie", "massey"), ("yvette", "yevett"), ("billy", "bolly"), ("dwayne", "duane"), + ... ("dixon", "dickson"), ("billy", "susan")] + + >>> winkler_scores = [1.000, 0.967, 0.947, 0.944, 0.911, 0.893, 0.858, 0.853, 0.000] + >>> jaro_scores = [1.000, 0.933, 0.933, 0.889, 0.889, 0.867, 0.822, 0.790, 0.000] + + One way to match the values on the Winkler's paper is to provide a different + p scaling factor for different pairs of strings, e.g. + + >>> p_factors = [0.1, 0.125, 0.20, 0.125, 0.20, 0.20, 0.20, 0.15, 0.1] + + >>> for (s1, s2), jscore, wscore, p in zip(winkler_examples, jaro_scores, winkler_scores, p_factors): + ... assert round(jaro_similarity(s1, s2), 3) == jscore + ... assert round(jaro_winkler_similarity(s1, s2, p=p), 3) == wscore + + + Test using outputs from https://www.census.gov/srd/papers/pdf/rr94-5.pdf from + "Table 2.1. Comparison of String Comparators Using Last Names, First Names, and Street Names" + + >>> winkler_examples = [('SHACKLEFORD', 'SHACKELFORD'), ('DUNNINGHAM', 'CUNNIGHAM'), + ... ('NICHLESON', 'NICHULSON'), ('JONES', 'JOHNSON'), ('MASSEY', 'MASSIE'), + ... ('ABROMS', 'ABRAMS'), ('HARDIN', 'MARTINEZ'), ('ITMAN', 'SMITH'), + ... ('JERALDINE', 'GERALDINE'), ('MARHTA', 'MARTHA'), ('MICHELLE', 'MICHAEL'), + ... ('JULIES', 'JULIUS'), ('TANYA', 'TONYA'), ('DWAYNE', 'DUANE'), ('SEAN', 'SUSAN'), + ... ('JON', 'JOHN'), ('JON', 'JAN'), ('BROOKHAVEN', 'BRROKHAVEN'), + ... ('BROOK HALLOW', 'BROOK HLLW'), ('DECATUR', 'DECATIR'), ('FITZRUREITER', 'FITZENREITER'), + ... ('HIGBEE', 'HIGHEE'), ('HIGBEE', 'HIGVEE'), ('LACURA', 'LOCURA'), ('IOWA', 'IONA'), ('1ST', 'IST')] + + >>> jaro_scores = [0.970, 0.896, 0.926, 0.790, 0.889, 0.889, 0.722, 0.467, 0.926, + ... 0.944, 0.869, 0.889, 0.867, 0.822, 0.783, 0.917, 0.000, 0.933, 0.944, 0.905, + ... 0.856, 0.889, 0.889, 0.889, 0.833, 0.000] + + >>> winkler_scores = [0.982, 0.896, 0.956, 0.832, 0.944, 0.922, 0.722, 0.467, 0.926, + ... 0.961, 0.921, 0.933, 0.880, 0.858, 0.805, 0.933, 0.000, 0.947, 0.967, 0.943, + ... 0.913, 0.922, 0.922, 0.900, 0.867, 0.000] + + One way to match the values on the Winkler's paper is to provide a different + p scaling factor for different pairs of strings, e.g. + + >>> p_factors = [0.1, 0.1, 0.1, 0.1, 0.125, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.20, + ... 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] + + + >>> for (s1, s2), jscore, wscore, p in zip(winkler_examples, jaro_scores, winkler_scores, p_factors): + ... if (s1, s2) in [('JON', 'JAN'), ('1ST', 'IST')]: + ... continue # Skip bad examples from the paper. + ... assert round(jaro_similarity(s1, s2), 3) == jscore + ... assert round(jaro_winkler_similarity(s1, s2, p=p), 3) == wscore + + + + This test-case proves that the output of Jaro-Winkler similarity depends on + the product l * p and not on the product max_l * p. Here the product max_l * p > 1 + however the product l * p <= 1 + + >>> round(jaro_winkler_similarity('TANYA', 'TONYA', p=0.1, max_l=100), 3) + 0.88 + """ + # To ensure that the output of the Jaro-Winkler's similarity + # falls between [0,1], the product of l * p needs to be + # also fall between [0,1]. + if not 0 <= max_l * p <= 1: + warnings.warn( + str( + "The product `max_l * p` might not fall between [0,1]." + "Jaro-Winkler similarity might not be between 0 and 1." + ) + ) + + # Compute the Jaro similarity + jaro_sim = jaro_similarity(s1, s2) + + # Initialize the upper bound for the no. of prefixes. + # if user did not pre-define the upperbound, + # use shorter length between s1 and s2 + + # Compute the prefix matches. + l = 0 + # zip() will automatically loop until the end of shorter string. + for s1_i, s2_i in zip(s1, s2): + if s1_i == s2_i: + l += 1 + else: + break + if l == max_l: + break + # Return the similarity value as described in docstring. + return jaro_sim + (l * p * (1 - jaro_sim)) + + +def demo(): + string_distance_examples = [ + ("rain", "shine"), + ("abcdef", "acbdef"), + ("language", "lnaguaeg"), + ("language", "lnaugage"), + ("language", "lngauage"), + ] + for s1, s2 in string_distance_examples: + print(f"Edit distance btwn '{s1}' and '{s2}':", edit_distance(s1, s2)) + print( + f"Edit dist with transpositions btwn '{s1}' and '{s2}':", + edit_distance(s1, s2, transpositions=True), + ) + print(f"Jaro similarity btwn '{s1}' and '{s2}':", jaro_similarity(s1, s2)) + print( + f"Jaro-Winkler similarity btwn '{s1}' and '{s2}':", + jaro_winkler_similarity(s1, s2), + ) + print( + f"Jaro-Winkler distance btwn '{s1}' and '{s2}':", + 1 - jaro_winkler_similarity(s1, s2), + ) + s1 = {1, 2, 3, 4} + s2 = {3, 4, 5} + print("s1:", s1) + print("s2:", s2) + print("Binary distance:", binary_distance(s1, s2)) + print("Jaccard distance:", jaccard_distance(s1, s2)) + print("MASI distance:", masi_distance(s1, s2)) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/paice.py b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/paice.py new file mode 100644 index 0000000000000000000000000000000000000000..bf7de1930b61654f9120a2ec2cd5bf6ef090fc47 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/paice.py @@ -0,0 +1,389 @@ +# Natural Language Toolkit: Agreement Metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Lauri Hallila +# URL: +# For license information, see LICENSE.TXT +# + +"""Counts Paice's performance statistics for evaluating stemming algorithms. + +What is required: + - A dictionary of words grouped by their real lemmas + - A dictionary of words grouped by stems from a stemming algorithm + +When these are given, Understemming Index (UI), Overstemming Index (OI), +Stemming Weight (SW) and Error-rate relative to truncation (ERRT) are counted. + +References: +Chris D. Paice (1994). An evaluation method for stemming algorithms. +In Proceedings of SIGIR, 42--50. +""" + +from math import sqrt + + +def get_words_from_dictionary(lemmas): + """ + Get original set of words used for analysis. + + :param lemmas: A dictionary where keys are lemmas and values are sets + or lists of words corresponding to that lemma. + :type lemmas: dict(str): list(str) + :return: Set of words that exist as values in the dictionary + :rtype: set(str) + """ + words = set() + for lemma in lemmas: + words.update(set(lemmas[lemma])) + return words + + +def _truncate(words, cutlength): + """Group words by stems defined by truncating them at given length. + + :param words: Set of words used for analysis + :param cutlength: Words are stemmed by cutting at this length. + :type words: set(str) or list(str) + :type cutlength: int + :return: Dictionary where keys are stems and values are sets of words + corresponding to that stem. + :rtype: dict(str): set(str) + """ + stems = {} + for word in words: + stem = word[:cutlength] + try: + stems[stem].update([word]) + except KeyError: + stems[stem] = {word} + return stems + + +# Reference: https://en.wikipedia.org/wiki/Line-line_intersection +def _count_intersection(l1, l2): + """Count intersection between two line segments defined by coordinate pairs. + + :param l1: Tuple of two coordinate pairs defining the first line segment + :param l2: Tuple of two coordinate pairs defining the second line segment + :type l1: tuple(float, float) + :type l2: tuple(float, float) + :return: Coordinates of the intersection + :rtype: tuple(float, float) + """ + x1, y1 = l1[0] + x2, y2 = l1[1] + x3, y3 = l2[0] + x4, y4 = l2[1] + + denominator = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) + + if denominator == 0.0: # lines are parallel + if x1 == x2 == x3 == x4 == 0.0: + # When lines are parallel, they must be on the y-axis. + # We can ignore x-axis because we stop counting the + # truncation line when we get there. + # There are no other options as UI (x-axis) grows and + # OI (y-axis) diminishes when we go along the truncation line. + return (0.0, y4) + + x = ( + (x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4) + ) / denominator + y = ( + (x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4) + ) / denominator + return (x, y) + + +def _get_derivative(coordinates): + """Get derivative of the line from (0,0) to given coordinates. + + :param coordinates: A coordinate pair + :type coordinates: tuple(float, float) + :return: Derivative; inf if x is zero + :rtype: float + """ + try: + return coordinates[1] / coordinates[0] + except ZeroDivisionError: + return float("inf") + + +def _calculate_cut(lemmawords, stems): + """Count understemmed and overstemmed pairs for (lemma, stem) pair with common words. + + :param lemmawords: Set or list of words corresponding to certain lemma. + :param stems: A dictionary where keys are stems and values are sets + or lists of words corresponding to that stem. + :type lemmawords: set(str) or list(str) + :type stems: dict(str): set(str) + :return: Amount of understemmed and overstemmed pairs contributed by words + existing in both lemmawords and stems. + :rtype: tuple(float, float) + """ + umt, wmt = 0.0, 0.0 + for stem in stems: + cut = set(lemmawords) & set(stems[stem]) + if cut: + cutcount = len(cut) + stemcount = len(stems[stem]) + # Unachieved merge total + umt += cutcount * (len(lemmawords) - cutcount) + # Wrongly merged total + wmt += cutcount * (stemcount - cutcount) + return (umt, wmt) + + +def _calculate(lemmas, stems): + """Calculate actual and maximum possible amounts of understemmed and overstemmed word pairs. + + :param lemmas: A dictionary where keys are lemmas and values are sets + or lists of words corresponding to that lemma. + :param stems: A dictionary where keys are stems and values are sets + or lists of words corresponding to that stem. + :type lemmas: dict(str): list(str) + :type stems: dict(str): set(str) + :return: Global unachieved merge total (gumt), + global desired merge total (gdmt), + global wrongly merged total (gwmt) and + global desired non-merge total (gdnt). + :rtype: tuple(float, float, float, float) + """ + + n = sum(len(lemmas[word]) for word in lemmas) + + gdmt, gdnt, gumt, gwmt = (0.0, 0.0, 0.0, 0.0) + + for lemma in lemmas: + lemmacount = len(lemmas[lemma]) + + # Desired merge total + gdmt += lemmacount * (lemmacount - 1) + + # Desired non-merge total + gdnt += lemmacount * (n - lemmacount) + + # For each (lemma, stem) pair with common words, count how many + # pairs are understemmed and overstemmed. + umt, wmt = _calculate_cut(lemmas[lemma], stems) + + # Add to total undesired and wrongly-merged totals + gumt += umt + gwmt += wmt + + # Each object is counted twice, so divide by two + return (gumt / 2, gdmt / 2, gwmt / 2, gdnt / 2) + + +def _indexes(gumt, gdmt, gwmt, gdnt): + """Count Understemming Index (UI), Overstemming Index (OI) and Stemming Weight (SW). + + :param gumt, gdmt, gwmt, gdnt: Global unachieved merge total (gumt), + global desired merge total (gdmt), + global wrongly merged total (gwmt) and + global desired non-merge total (gdnt). + :type gumt, gdmt, gwmt, gdnt: float + :return: Understemming Index (UI), + Overstemming Index (OI) and + Stemming Weight (SW). + :rtype: tuple(float, float, float) + """ + # Calculate Understemming Index (UI), + # Overstemming Index (OI) and Stemming Weight (SW) + try: + ui = gumt / gdmt + except ZeroDivisionError: + # If GDMT (max merge total) is 0, define UI as 0 + ui = 0.0 + try: + oi = gwmt / gdnt + except ZeroDivisionError: + # IF GDNT (max non-merge total) is 0, define OI as 0 + oi = 0.0 + try: + sw = oi / ui + except ZeroDivisionError: + if oi == 0.0: + # OI and UI are 0, define SW as 'not a number' + sw = float("nan") + else: + # UI is 0, define SW as infinity + sw = float("inf") + return (ui, oi, sw) + + +class Paice: + """Class for storing lemmas, stems and evaluation metrics.""" + + def __init__(self, lemmas, stems): + """ + :param lemmas: A dictionary where keys are lemmas and values are sets + or lists of words corresponding to that lemma. + :param stems: A dictionary where keys are stems and values are sets + or lists of words corresponding to that stem. + :type lemmas: dict(str): list(str) + :type stems: dict(str): set(str) + """ + self.lemmas = lemmas + self.stems = stems + self.coords = [] + self.gumt, self.gdmt, self.gwmt, self.gdnt = (None, None, None, None) + self.ui, self.oi, self.sw = (None, None, None) + self.errt = None + self.update() + + def __str__(self): + text = ["Global Unachieved Merge Total (GUMT): %s\n" % self.gumt] + text.append("Global Desired Merge Total (GDMT): %s\n" % self.gdmt) + text.append("Global Wrongly-Merged Total (GWMT): %s\n" % self.gwmt) + text.append("Global Desired Non-merge Total (GDNT): %s\n" % self.gdnt) + text.append("Understemming Index (GUMT / GDMT): %s\n" % self.ui) + text.append("Overstemming Index (GWMT / GDNT): %s\n" % self.oi) + text.append("Stemming Weight (OI / UI): %s\n" % self.sw) + text.append("Error-Rate Relative to Truncation (ERRT): %s\r\n" % self.errt) + coordinates = " ".join(["(%s, %s)" % item for item in self.coords]) + text.append("Truncation line: %s" % coordinates) + return "".join(text) + + def _get_truncation_indexes(self, words, cutlength): + """Count (UI, OI) when stemming is done by truncating words at \'cutlength\'. + + :param words: Words used for the analysis + :param cutlength: Words are stemmed by cutting them at this length + :type words: set(str) or list(str) + :type cutlength: int + :return: Understemming and overstemming indexes + :rtype: tuple(int, int) + """ + + truncated = _truncate(words, cutlength) + gumt, gdmt, gwmt, gdnt = _calculate(self.lemmas, truncated) + ui, oi = _indexes(gumt, gdmt, gwmt, gdnt)[:2] + return (ui, oi) + + def _get_truncation_coordinates(self, cutlength=0): + """Count (UI, OI) pairs for truncation points until we find the segment where (ui, oi) crosses the truncation line. + + :param cutlength: Optional parameter to start counting from (ui, oi) + coordinates gotten by stemming at this length. Useful for speeding up + the calculations when you know the approximate location of the + intersection. + :type cutlength: int + :return: List of coordinate pairs that define the truncation line + :rtype: list(tuple(float, float)) + """ + words = get_words_from_dictionary(self.lemmas) + maxlength = max(len(word) for word in words) + + # Truncate words from different points until (0, 0) - (ui, oi) segment crosses the truncation line + coords = [] + while cutlength <= maxlength: + # Get (UI, OI) pair of current truncation point + pair = self._get_truncation_indexes(words, cutlength) + + # Store only new coordinates so we'll have an actual + # line segment when counting the intersection point + if pair not in coords: + coords.append(pair) + if pair == (0.0, 0.0): + # Stop counting if truncation line goes through origo; + # length from origo to truncation line is 0 + return coords + if len(coords) >= 2 and pair[0] > 0.0: + derivative1 = _get_derivative(coords[-2]) + derivative2 = _get_derivative(coords[-1]) + # Derivative of the truncation line is a decreasing value; + # when it passes Stemming Weight, we've found the segment + # of truncation line intersecting with (0, 0) - (ui, oi) segment + if derivative1 >= self.sw >= derivative2: + return coords + cutlength += 1 + return coords + + def _errt(self): + """Count Error-Rate Relative to Truncation (ERRT). + + :return: ERRT, length of the line from origo to (UI, OI) divided by + the length of the line from origo to the point defined by the same + line when extended until the truncation line. + :rtype: float + """ + # Count (UI, OI) pairs for truncation points until we find the segment where (ui, oi) crosses the truncation line + self.coords = self._get_truncation_coordinates() + if (0.0, 0.0) in self.coords: + # Truncation line goes through origo, so ERRT cannot be counted + if (self.ui, self.oi) != (0.0, 0.0): + return float("inf") + else: + return float("nan") + if (self.ui, self.oi) == (0.0, 0.0): + # (ui, oi) is origo; define errt as 0.0 + return 0.0 + # Count the intersection point + # Note that (self.ui, self.oi) cannot be (0.0, 0.0) and self.coords has different coordinates + # so we have actual line segments instead of a line segment and a point + intersection = _count_intersection( + ((0, 0), (self.ui, self.oi)), self.coords[-2:] + ) + # Count OP (length of the line from origo to (ui, oi)) + op = sqrt(self.ui**2 + self.oi**2) + # Count OT (length of the line from origo to truncation line that goes through (ui, oi)) + ot = sqrt(intersection[0] ** 2 + intersection[1] ** 2) + # OP / OT tells how well the stemming algorithm works compared to just truncating words + return op / ot + + def update(self): + """Update statistics after lemmas and stems have been set.""" + self.gumt, self.gdmt, self.gwmt, self.gdnt = _calculate(self.lemmas, self.stems) + self.ui, self.oi, self.sw = _indexes(self.gumt, self.gdmt, self.gwmt, self.gdnt) + self.errt = self._errt() + + +def demo(): + """Demonstration of the module.""" + # Some words with their real lemmas + lemmas = { + "kneel": ["kneel", "knelt"], + "range": ["range", "ranged"], + "ring": ["ring", "rang", "rung"], + } + # Same words with stems from a stemming algorithm + stems = { + "kneel": ["kneel"], + "knelt": ["knelt"], + "rang": ["rang", "range", "ranged"], + "ring": ["ring"], + "rung": ["rung"], + } + print("Words grouped by their lemmas:") + for lemma in sorted(lemmas): + print("{} => {}".format(lemma, " ".join(lemmas[lemma]))) + print() + print("Same words grouped by a stemming algorithm:") + for stem in sorted(stems): + print("{} => {}".format(stem, " ".join(stems[stem]))) + print() + p = Paice(lemmas, stems) + print(p) + print() + # Let's "change" results from a stemming algorithm + stems = { + "kneel": ["kneel"], + "knelt": ["knelt"], + "rang": ["rang"], + "range": ["range", "ranged"], + "ring": ["ring"], + "rung": ["rung"], + } + print("Counting stats after changing stemming results:") + for stem in sorted(stems): + print("{} => {}".format(stem, " ".join(stems[stem]))) + print() + p.stems = stems + p.update() + print(p) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/scores.py b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/scores.py new file mode 100644 index 0000000000000000000000000000000000000000..0d6d296aa62893788de65cdd0cdf3f5480a161f2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/scores.py @@ -0,0 +1,228 @@ +# Natural Language Toolkit: Evaluation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +import operator +from functools import reduce +from math import fabs +from random import shuffle + +try: + from scipy.stats.stats import betai +except ImportError: + betai = None + +from nltk.util import LazyConcatenation, LazyMap + + +def accuracy(reference, test): + """ + Given a list of reference values and a corresponding list of test + values, return the fraction of corresponding values that are + equal. In particular, return the fraction of indices + ``0= actual_stat: + c += 1 + + if verbose and i % 10 == 0: + print("pseudo-statistic: %f" % pseudo_stat) + print("significance: %f" % ((c + 1) / (i + 1))) + print("-" * 60) + + significance = (c + 1) / (shuffles + 1) + + if verbose: + print("significance: %f" % significance) + if betai: + for phi in [0.01, 0.05, 0.10, 0.15, 0.25, 0.50]: + print(f"prob(phi<={phi:f}): {betai(c, shuffles, phi):f}") + + return (significance, c, shuffles) + + +def demo(): + print("-" * 75) + reference = "DET NN VB DET JJ NN NN IN DET NN".split() + test = "DET VB VB DET NN NN NN IN DET NN".split() + print("Reference =", reference) + print("Test =", test) + print("Accuracy:", accuracy(reference, test)) + + print("-" * 75) + reference_set = set(reference) + test_set = set(test) + print("Reference =", reference_set) + print("Test = ", test_set) + print("Precision:", precision(reference_set, test_set)) + print(" Recall:", recall(reference_set, test_set)) + print("F-Measure:", f_measure(reference_set, test_set)) + print("-" * 75) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/segmentation.py b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..518197d35dff62ce5735b8e76fb5939b19ebedd2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/segmentation.py @@ -0,0 +1,222 @@ +# Natural Language Toolkit: Text Segmentation Metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# David Doukhan +# URL: +# For license information, see LICENSE.TXT + + +""" +Text Segmentation Metrics + +1. Windowdiff + +Pevzner, L., and Hearst, M., A Critique and Improvement of + an Evaluation Metric for Text Segmentation, + Computational Linguistics 28, 19-36 + + +2. Generalized Hamming Distance + +Bookstein A., Kulyukin V.A., Raita T. +Generalized Hamming Distance +Information Retrieval 5, 2002, pp 353-375 + +Baseline implementation in C++ +http://digital.cs.usu.edu/~vkulyukin/vkweb/software/ghd/ghd.html + +Study describing benefits of Generalized Hamming Distance Versus +WindowDiff for evaluating text segmentation tasks +Begsten, Y. Quel indice pour mesurer l'efficacite en segmentation de textes ? +TALN 2009 + + +3. Pk text segmentation metric + +Beeferman D., Berger A., Lafferty J. (1999) +Statistical Models for Text Segmentation +Machine Learning, 34, 177-210 +""" + +try: + import numpy as np +except ImportError: + pass + + +def windowdiff(seg1, seg2, k, boundary="1", weighted=False): + """ + Compute the windowdiff score for a pair of segmentations. A + segmentation is any sequence over a vocabulary of two items + (e.g. "0", "1"), where the specified boundary value is used to + mark the edge of a segmentation. + + >>> s1 = "000100000010" + >>> s2 = "000010000100" + >>> s3 = "100000010000" + >>> '%.2f' % windowdiff(s1, s1, 3) + '0.00' + >>> '%.2f' % windowdiff(s1, s2, 3) + '0.30' + >>> '%.2f' % windowdiff(s2, s3, 3) + '0.80' + + :param seg1: a segmentation + :type seg1: str or list + :param seg2: a segmentation + :type seg2: str or list + :param k: window width + :type k: int + :param boundary: boundary value + :type boundary: str or int or bool + :param weighted: use the weighted variant of windowdiff + :type weighted: boolean + :rtype: float + """ + + if len(seg1) != len(seg2): + raise ValueError("Segmentations have unequal length") + if k > len(seg1): + raise ValueError( + "Window width k should be smaller or equal than segmentation lengths" + ) + wd = 0 + for i in range(len(seg1) - k + 1): + ndiff = abs(seg1[i : i + k].count(boundary) - seg2[i : i + k].count(boundary)) + if weighted: + wd += ndiff + else: + wd += min(1, ndiff) + return wd / (len(seg1) - k + 1.0) + + +# Generalized Hamming Distance + + +def _init_mat(nrows, ncols, ins_cost, del_cost): + mat = np.empty((nrows, ncols)) + mat[0, :] = ins_cost * np.arange(ncols) + mat[:, 0] = del_cost * np.arange(nrows) + return mat + + +def _ghd_aux(mat, rowv, colv, ins_cost, del_cost, shift_cost_coeff): + for i, rowi in enumerate(rowv): + for j, colj in enumerate(colv): + shift_cost = shift_cost_coeff * abs(rowi - colj) + mat[i, j] + if rowi == colj: + # boundaries are at the same location, no transformation required + tcost = mat[i, j] + elif rowi > colj: + # boundary match through a deletion + tcost = del_cost + mat[i, j + 1] + else: + # boundary match through an insertion + tcost = ins_cost + mat[i + 1, j] + mat[i + 1, j + 1] = min(tcost, shift_cost) + + +def ghd(ref, hyp, ins_cost=2.0, del_cost=2.0, shift_cost_coeff=1.0, boundary="1"): + """ + Compute the Generalized Hamming Distance for a reference and a hypothetical + segmentation, corresponding to the cost related to the transformation + of the hypothetical segmentation into the reference segmentation + through boundary insertion, deletion and shift operations. + + A segmentation is any sequence over a vocabulary of two items + (e.g. "0", "1"), where the specified boundary value is used to + mark the edge of a segmentation. + + Recommended parameter values are a shift_cost_coeff of 2. + Associated with a ins_cost, and del_cost equal to the mean segment + length in the reference segmentation. + + >>> # Same examples as Kulyukin C++ implementation + >>> ghd('1100100000', '1100010000', 1.0, 1.0, 0.5) + 0.5 + >>> ghd('1100100000', '1100000001', 1.0, 1.0, 0.5) + 2.0 + >>> ghd('011', '110', 1.0, 1.0, 0.5) + 1.0 + >>> ghd('1', '0', 1.0, 1.0, 0.5) + 1.0 + >>> ghd('111', '000', 1.0, 1.0, 0.5) + 3.0 + >>> ghd('000', '111', 1.0, 2.0, 0.5) + 6.0 + + :param ref: the reference segmentation + :type ref: str or list + :param hyp: the hypothetical segmentation + :type hyp: str or list + :param ins_cost: insertion cost + :type ins_cost: float + :param del_cost: deletion cost + :type del_cost: float + :param shift_cost_coeff: constant used to compute the cost of a shift. + ``shift cost = shift_cost_coeff * |i - j|`` where ``i`` and ``j`` + are the positions indicating the shift + :type shift_cost_coeff: float + :param boundary: boundary value + :type boundary: str or int or bool + :rtype: float + """ + + ref_idx = [i for (i, val) in enumerate(ref) if val == boundary] + hyp_idx = [i for (i, val) in enumerate(hyp) if val == boundary] + + nref_bound = len(ref_idx) + nhyp_bound = len(hyp_idx) + + if nref_bound == 0 and nhyp_bound == 0: + return 0.0 + elif nref_bound > 0 and nhyp_bound == 0: + return nref_bound * ins_cost + elif nref_bound == 0 and nhyp_bound > 0: + return nhyp_bound * del_cost + + mat = _init_mat(nhyp_bound + 1, nref_bound + 1, ins_cost, del_cost) + _ghd_aux(mat, hyp_idx, ref_idx, ins_cost, del_cost, shift_cost_coeff) + return mat[-1, -1] + + +# Beeferman's Pk text segmentation evaluation metric + + +def pk(ref, hyp, k=None, boundary="1"): + """ + Compute the Pk metric for a pair of segmentations A segmentation + is any sequence over a vocabulary of two items (e.g. "0", "1"), + where the specified boundary value is used to mark the edge of a + segmentation. + + >>> '%.2f' % pk('0100'*100, '1'*400, 2) + '0.50' + >>> '%.2f' % pk('0100'*100, '0'*400, 2) + '0.50' + >>> '%.2f' % pk('0100'*100, '0100'*100, 2) + '0.00' + + :param ref: the reference segmentation + :type ref: str or list + :param hyp: the segmentation to evaluate + :type hyp: str or list + :param k: window size, if None, set to half of the average reference segment length + :type boundary: str or int or bool + :param boundary: boundary value + :type boundary: str or int or bool + :rtype: float + """ + + if k is None: + k = int(round(len(ref) / (ref.count(boundary) * 2.0))) + + err = 0 + for i in range(len(ref) - k + 1): + r = ref[i : i + k].count(boundary) > 0 + h = hyp[i : i + k].count(boundary) > 0 + if r != h: + err += 1 + return err / (len(ref) - k + 1.0) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/spearman.py b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/spearman.py new file mode 100644 index 0000000000000000000000000000000000000000..8b2987d371a2af218c7223e155b335315663fd2c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/spearman.py @@ -0,0 +1,68 @@ +# Natural Language Toolkit: Spearman Rank Correlation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Joel Nothman +# URL: +# For license information, see LICENSE.TXT + +""" +Tools for comparing ranked lists. +""" + + +def _rank_dists(ranks1, ranks2): + """Finds the difference between the values in ranks1 and ranks2 for keys + present in both dicts. If the arguments are not dicts, they are converted + from (key, rank) sequences. + """ + ranks1 = dict(ranks1) + ranks2 = dict(ranks2) + for k in ranks1: + try: + yield k, ranks1[k] - ranks2[k] + except KeyError: + pass + + +def spearman_correlation(ranks1, ranks2): + """Returns the Spearman correlation coefficient for two rankings, which + should be dicts or sequences of (key, rank). The coefficient ranges from + -1.0 (ranks are opposite) to 1.0 (ranks are identical), and is only + calculated for keys in both rankings (for meaningful results, remove keys + present in only one list before ranking).""" + n = 0 + res = 0 + for k, d in _rank_dists(ranks1, ranks2): + res += d * d + n += 1 + try: + return 1 - (6 * res / (n * (n * n - 1))) + except ZeroDivisionError: + # Result is undefined if only one item is ranked + return 0.0 + + +def ranks_from_sequence(seq): + """Given a sequence, yields each element with an increasing rank, suitable + for use as an argument to ``spearman_correlation``. + """ + return ((k, i) for i, k in enumerate(seq)) + + +def ranks_from_scores(scores, rank_gap=1e-15): + """Given a sequence of (key, score) tuples, yields each key with an + increasing rank, tying with previous key's rank if the difference between + their scores is less than rank_gap. Suitable for use as an argument to + ``spearman_correlation``. + """ + prev_score = None + rank = 0 + for i, (key, score) in enumerate(scores): + try: + if abs(score - prev_score) > rank_gap: + rank = i + except TypeError: + pass + + yield key, rank + prev_score = score diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/misc/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/misc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ac9e0f7125810319ed560d0cdfdc0c1f0114b18 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/misc/__init__.py @@ -0,0 +1,11 @@ +# Natural Language Toolkit: Miscellaneous modules +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from nltk.misc.babelfish import babelize_shell +from nltk.misc.chomsky import generate_chomsky +from nltk.misc.minimalset import MinimalSet +from nltk.misc.wordfinder import word_finder diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d478aa10892f331d319dafdb414117b62226555 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/babelfish.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/babelfish.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcfe3469a9680813920c25e5892261800ead37bb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/babelfish.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/chomsky.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/chomsky.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b578ef518831d908955f9f530cff5dac86ca7ac Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/chomsky.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/minimalset.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/minimalset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1083661f559f02aa3dd647e76ed944315bae01fa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/minimalset.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/sort.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/sort.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ca01fbf0b4fae832f27dc18047165f416e95e27 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/sort.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/wordfinder.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/wordfinder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..933fe7cde69743310e6f1833790b7ded7f5cd092 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/wordfinder.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/misc/sort.py b/llmeval-env/lib/python3.10/site-packages/nltk/misc/sort.py new file mode 100644 index 0000000000000000000000000000000000000000..cb543d93929f45505475f9d985afea5e92f58a94 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/misc/sort.py @@ -0,0 +1,176 @@ +# Natural Language Toolkit: List Sorting +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +This module provides a variety of list sorting algorithms, to +illustrate the many different algorithms (recipes) for solving a +problem, and how to analyze algorithms experimentally. +""" +# These algorithms are taken from: +# Levitin (2004) The Design and Analysis of Algorithms + +################################################################## +# Selection Sort +################################################################## + + +def selection(a): + """ + Selection Sort: scan the list to find its smallest element, then + swap it with the first element. The remainder of the list is one + element smaller; apply the same method to this list, and so on. + """ + count = 0 + + for i in range(len(a) - 1): + min = i + + for j in range(i + 1, len(a)): + if a[j] < a[min]: + min = j + + count += 1 + + a[min], a[i] = a[i], a[min] + + return count + + +################################################################## +# Bubble Sort +################################################################## + + +def bubble(a): + """ + Bubble Sort: compare adjacent elements of the list left-to-right, + and swap them if they are out of order. After one pass through + the list swapping adjacent items, the largest item will be in + the rightmost position. The remainder is one element smaller; + apply the same method to this list, and so on. + """ + count = 0 + for i in range(len(a) - 1): + for j in range(len(a) - i - 1): + if a[j + 1] < a[j]: + a[j], a[j + 1] = a[j + 1], a[j] + count += 1 + return count + + +################################################################## +# Merge Sort +################################################################## + + +def _merge_lists(b, c): + count = 0 + i = j = 0 + a = [] + while i < len(b) and j < len(c): + count += 1 + if b[i] <= c[j]: + a.append(b[i]) + i += 1 + else: + a.append(c[j]) + j += 1 + if i == len(b): + a += c[j:] + else: + a += b[i:] + return a, count + + +def merge(a): + """ + Merge Sort: split the list in half, and sort each half, then + combine the sorted halves. + """ + count = 0 + if len(a) > 1: + midpoint = len(a) // 2 + b = a[:midpoint] + c = a[midpoint:] + count_b = merge(b) + count_c = merge(c) + result, count_a = _merge_lists(b, c) + a[:] = result # copy the result back into a. + count = count_a + count_b + count_c + return count + + +################################################################## +# Quick Sort +################################################################## + + +def _partition(a, l, r): + p = a[l] + i = l + j = r + 1 + count = 0 + while True: + while i < r: + i += 1 + if a[i] >= p: + break + while j > l: + j -= 1 + if j < l or a[j] <= p: + break + a[i], a[j] = a[j], a[i] # swap + count += 1 + if i >= j: + break + a[i], a[j] = a[j], a[i] # undo last swap + a[l], a[j] = a[j], a[l] + return j, count + + +def _quick(a, l, r): + count = 0 + if l < r: + s, count = _partition(a, l, r) + count += _quick(a, l, s - 1) + count += _quick(a, s + 1, r) + return count + + +def quick(a): + return _quick(a, 0, len(a) - 1) + + +################################################################## +# Demonstration +################################################################## + + +def demo(): + from random import shuffle + + for size in (10, 20, 50, 100, 200, 500, 1000): + a = list(range(size)) + + # various sort methods + shuffle(a) + count_selection = selection(a) + shuffle(a) + count_bubble = bubble(a) + shuffle(a) + count_merge = merge(a) + shuffle(a) + count_quick = quick(a) + + print( + ("size=%5d: selection=%8d, bubble=%8d, " "merge=%6d, quick=%6d") + % (size, count_selection, count_bubble, count_merge, count_quick) + ) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/misc/wordfinder.py b/llmeval-env/lib/python3.10/site-packages/nltk/misc/wordfinder.py new file mode 100644 index 0000000000000000000000000000000000000000..e8ddca0dd6282e988ad38d287ae1029dadc98dfc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/misc/wordfinder.py @@ -0,0 +1,139 @@ +# Natural Language Toolkit: Word Finder +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# Simplified from PHP version by Robert Klein +# http://fswordfinder.sourceforge.net/ + +import random + + +# reverse a word with probability 0.5 +def revword(word): + if random.randint(1, 2) == 1: + return word[::-1] + return word + + +# try to insert word at position x,y; direction encoded in xf,yf +def step(word, x, xf, y, yf, grid): + for i in range(len(word)): + if grid[xf(i)][yf(i)] != "" and grid[xf(i)][yf(i)] != word[i]: + return False + for i in range(len(word)): + grid[xf(i)][yf(i)] = word[i] + return True + + +# try to insert word at position x,y, in direction dir +def check(word, dir, x, y, grid, rows, cols): + if dir == 1: + if x - len(word) < 0 or y - len(word) < 0: + return False + return step(word, x, lambda i: x - i, y, lambda i: y - i, grid) + elif dir == 2: + if x - len(word) < 0: + return False + return step(word, x, lambda i: x - i, y, lambda i: y, grid) + elif dir == 3: + if x - len(word) < 0 or y + (len(word) - 1) >= cols: + return False + return step(word, x, lambda i: x - i, y, lambda i: y + i, grid) + elif dir == 4: + if y - len(word) < 0: + return False + return step(word, x, lambda i: x, y, lambda i: y - i, grid) + + +def wordfinder(words, rows=20, cols=20, attempts=50, alph="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): + """ + Attempt to arrange words into a letter-grid with the specified + number of rows and columns. Try each word in several positions + and directions, until it can be fitted into the grid, or the + maximum number of allowable attempts is exceeded. Returns a tuple + consisting of the grid and the words that were successfully + placed. + + :param words: the list of words to be put into the grid + :type words: list + :param rows: the number of rows in the grid + :type rows: int + :param cols: the number of columns in the grid + :type cols: int + :param attempts: the number of times to attempt placing a word + :type attempts: int + :param alph: the alphabet, to be used for filling blank cells + :type alph: list + :rtype: tuple + """ + + # place longer words first + words = sorted(words, key=len, reverse=True) + + grid = [] # the letter grid + used = [] # the words we used + + # initialize the grid + for i in range(rows): + grid.append([""] * cols) + + # try to place each word + for word in words: + word = word.strip().upper() # normalize + save = word # keep a record of the word + word = revword(word) + for attempt in range(attempts): + r = random.randint(0, len(word)) + dir = random.choice([1, 2, 3, 4]) + x = random.randint(0, rows) + y = random.randint(0, cols) + if dir == 1: + x += r + y += r + elif dir == 2: + x += r + elif dir == 3: + x += r + y -= r + elif dir == 4: + y += r + if 0 <= x < rows and 0 <= y < cols: + if check(word, dir, x, y, grid, rows, cols): + # used.append((save, dir, x, y, word)) + used.append(save) + break + + # Fill up the remaining spaces + for i in range(rows): + for j in range(cols): + if grid[i][j] == "": + grid[i][j] = random.choice(alph) + + return grid, used + + +def word_finder(): + from nltk.corpus import words + + wordlist = words.words() + random.shuffle(wordlist) + wordlist = wordlist[:200] + wordlist = [w for w in wordlist if 3 <= len(w) <= 12] + grid, used = wordfinder(wordlist) + + print("Word Finder\n") + for i in range(len(grid)): + for j in range(len(grid[i])): + print(grid[i][j], end=" ") + print() + print() + + for i in range(len(used)): + print("%d:" % (i + 1), used[i]) + + +if __name__ == "__main__": + word_finder() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2a7fa30126f8c626b8bd3d98a58a52c94a5cf1a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_counter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_counter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d45798b3ae31ce95f6dd774167becce54ad29239 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_counter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_models.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da7e83725cb4c3c646646fc66b01f5f7e3d1c885 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_models.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_preprocessing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_preprocessing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2eec038312a85461a7b635a014559608c3446199 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_preprocessing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_vocabulary.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_vocabulary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b346335b41b32e0e6e4d06934b7fc1546cf5450 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_vocabulary.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_counter.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_counter.py new file mode 100644 index 0000000000000000000000000000000000000000..f28b361cb76121f76d633d709aca6b5e32acb14d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_counter.py @@ -0,0 +1,116 @@ +# Natural Language Toolkit: Language Model Unit Tests +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT + +import unittest + +import pytest + +from nltk import FreqDist +from nltk.lm import NgramCounter +from nltk.util import everygrams + + +class TestNgramCounter: + """Tests for NgramCounter that only involve lookup, no modification.""" + + @classmethod + def setup_class(self): + text = [list("abcd"), list("egdbe")] + self.trigram_counter = NgramCounter( + everygrams(sent, max_len=3) for sent in text + ) + self.bigram_counter = NgramCounter(everygrams(sent, max_len=2) for sent in text) + self.case = unittest.TestCase() + + def test_N(self): + assert self.bigram_counter.N() == 16 + assert self.trigram_counter.N() == 21 + + def test_counter_len_changes_with_lookup(self): + assert len(self.bigram_counter) == 2 + self.bigram_counter[50] + assert len(self.bigram_counter) == 3 + + def test_ngram_order_access_unigrams(self): + assert self.bigram_counter[1] == self.bigram_counter.unigrams + + def test_ngram_conditional_freqdist(self): + case = unittest.TestCase() + expected_trigram_contexts = [ + ("a", "b"), + ("b", "c"), + ("e", "g"), + ("g", "d"), + ("d", "b"), + ] + expected_bigram_contexts = [("a",), ("b",), ("d",), ("e",), ("c",), ("g",)] + + bigrams = self.trigram_counter[2] + trigrams = self.trigram_counter[3] + + self.case.assertCountEqual(expected_bigram_contexts, bigrams.conditions()) + self.case.assertCountEqual(expected_trigram_contexts, trigrams.conditions()) + + def test_bigram_counts_seen_ngrams(self): + assert self.bigram_counter[["a"]]["b"] == 1 + assert self.bigram_counter[["b"]]["c"] == 1 + + def test_bigram_counts_unseen_ngrams(self): + assert self.bigram_counter[["b"]]["z"] == 0 + + def test_unigram_counts_seen_words(self): + assert self.bigram_counter["b"] == 2 + + def test_unigram_counts_completely_unseen_words(self): + assert self.bigram_counter["z"] == 0 + + +class TestNgramCounterTraining: + @classmethod + def setup_class(self): + self.counter = NgramCounter() + self.case = unittest.TestCase() + + @pytest.mark.parametrize("case", ["", [], None]) + def test_empty_inputs(self, case): + test = NgramCounter(case) + assert 2 not in test + assert test[1] == FreqDist() + + def test_train_on_unigrams(self): + words = list("abcd") + counter = NgramCounter([[(w,) for w in words]]) + + assert not counter[3] + assert not counter[2] + self.case.assertCountEqual(words, counter[1].keys()) + + def test_train_on_illegal_sentences(self): + str_sent = ["Check", "this", "out", "!"] + list_sent = [["Check", "this"], ["this", "out"], ["out", "!"]] + + with pytest.raises(TypeError): + NgramCounter([str_sent]) + + with pytest.raises(TypeError): + NgramCounter([list_sent]) + + def test_train_on_bigrams(self): + bigram_sent = [("a", "b"), ("c", "d")] + counter = NgramCounter([bigram_sent]) + assert not bool(counter[3]) + + def test_train_on_mix(self): + mixed_sent = [("a", "b"), ("c", "d"), ("e", "f", "g"), ("h",)] + counter = NgramCounter([mixed_sent]) + unigrams = ["h"] + bigram_contexts = [("a",), ("c",)] + trigram_contexts = [("e", "f")] + + self.case.assertCountEqual(unigrams, counter[1].keys()) + self.case.assertCountEqual(bigram_contexts, counter[2].keys()) + self.case.assertCountEqual(trigram_contexts, counter[3].keys()) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_models.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_models.py new file mode 100644 index 0000000000000000000000000000000000000000..c0649fcc255b8452ec7de20afd04d099bf8e644d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_models.py @@ -0,0 +1,610 @@ +# Natural Language Toolkit: Language Model Unit Tests +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT +import math +from operator import itemgetter + +import pytest + +from nltk.lm import ( + MLE, + AbsoluteDiscountingInterpolated, + KneserNeyInterpolated, + Laplace, + Lidstone, + StupidBackoff, + Vocabulary, + WittenBellInterpolated, +) +from nltk.lm.preprocessing import padded_everygrams + + +@pytest.fixture(scope="session") +def vocabulary(): + return Vocabulary(["a", "b", "c", "d", "z", "", ""], unk_cutoff=1) + + +@pytest.fixture(scope="session") +def training_data(): + return [["a", "b", "c", "d"], ["e", "g", "a", "d", "b", "e"]] + + +@pytest.fixture(scope="session") +def bigram_training_data(training_data): + return [list(padded_everygrams(2, sent)) for sent in training_data] + + +@pytest.fixture(scope="session") +def trigram_training_data(training_data): + return [list(padded_everygrams(3, sent)) for sent in training_data] + + +@pytest.fixture +def mle_bigram_model(vocabulary, bigram_training_data): + model = MLE(2, vocabulary=vocabulary) + model.fit(bigram_training_data) + return model + + +@pytest.mark.parametrize( + "word, context, expected_score", + [ + ("d", ["c"], 1), + # Unseen ngrams should yield 0 + ("d", ["e"], 0), + # Unigrams should also be 0 + ("z", None, 0), + # N unigrams = 14 + # count('a') = 2 + ("a", None, 2.0 / 14), + # count('y') = 3 + ("y", None, 3.0 / 14), + ], +) +def test_mle_bigram_scores(mle_bigram_model, word, context, expected_score): + assert pytest.approx(mle_bigram_model.score(word, context), 1e-4) == expected_score + + +def test_mle_bigram_logscore_for_zero_score(mle_bigram_model): + assert math.isinf(mle_bigram_model.logscore("d", ["e"])) + + +def test_mle_bigram_entropy_perplexity_seen(mle_bigram_model): + # ngrams seen during training + trained = [ + ("", "a"), + ("a", "b"), + ("b", ""), + ("", "a"), + ("a", "d"), + ("d", ""), + ] + # Ngram = Log score + # , a = -1 + # a, b = -1 + # b, UNK = -1 + # UNK, a = -1.585 + # a, d = -1 + # d, = -1 + # TOTAL logscores = -6.585 + # - AVG logscores = 1.0975 + H = 1.0975 + perplexity = 2.1398 + assert pytest.approx(mle_bigram_model.entropy(trained), 1e-4) == H + assert pytest.approx(mle_bigram_model.perplexity(trained), 1e-4) == perplexity + + +def test_mle_bigram_entropy_perplexity_unseen(mle_bigram_model): + # In MLE, even one unseen ngram should make entropy and perplexity infinite + untrained = [("", "a"), ("a", "c"), ("c", "d"), ("d", "")] + + assert math.isinf(mle_bigram_model.entropy(untrained)) + assert math.isinf(mle_bigram_model.perplexity(untrained)) + + +def test_mle_bigram_entropy_perplexity_unigrams(mle_bigram_model): + # word = score, log score + # = 0.1429, -2.8074 + # a = 0.1429, -2.8074 + # c = 0.0714, -3.8073 + # UNK = 0.2143, -2.2224 + # d = 0.1429, -2.8074 + # c = 0.0714, -3.8073 + # = 0.1429, -2.8074 + # TOTAL logscores = -21.6243 + # - AVG logscores = 3.0095 + H = 3.0095 + perplexity = 8.0529 + + text = [("",), ("a",), ("c",), ("-",), ("d",), ("c",), ("",)] + + assert pytest.approx(mle_bigram_model.entropy(text), 1e-4) == H + assert pytest.approx(mle_bigram_model.perplexity(text), 1e-4) == perplexity + + +@pytest.fixture +def mle_trigram_model(trigram_training_data, vocabulary): + model = MLE(order=3, vocabulary=vocabulary) + model.fit(trigram_training_data) + return model + + +@pytest.mark.parametrize( + "word, context, expected_score", + [ + # count(d | b, c) = 1 + # count(b, c) = 1 + ("d", ("b", "c"), 1), + # count(d | c) = 1 + # count(c) = 1 + ("d", ["c"], 1), + # total number of tokens is 18, of which "a" occurred 2 times + ("a", None, 2.0 / 18), + # in vocabulary but unseen + ("z", None, 0), + # out of vocabulary should use "UNK" score + ("y", None, 3.0 / 18), + ], +) +def test_mle_trigram_scores(mle_trigram_model, word, context, expected_score): + assert pytest.approx(mle_trigram_model.score(word, context), 1e-4) == expected_score + + +@pytest.fixture +def lidstone_bigram_model(bigram_training_data, vocabulary): + model = Lidstone(0.1, order=2, vocabulary=vocabulary) + model.fit(bigram_training_data) + return model + + +@pytest.mark.parametrize( + "word, context, expected_score", + [ + # count(d | c) = 1 + # *count(d | c) = 1.1 + # Count(w | c for w in vocab) = 1 + # *Count(w | c for w in vocab) = 1.8 + ("d", ["c"], 1.1 / 1.8), + # Total unigrams: 14 + # Vocab size: 8 + # Denominator: 14 + 0.8 = 14.8 + # count("a") = 2 + # *count("a") = 2.1 + ("a", None, 2.1 / 14.8), + # in vocabulary but unseen + # count("z") = 0 + # *count("z") = 0.1 + ("z", None, 0.1 / 14.8), + # out of vocabulary should use "UNK" score + # count("") = 3 + # *count("") = 3.1 + ("y", None, 3.1 / 14.8), + ], +) +def test_lidstone_bigram_score(lidstone_bigram_model, word, context, expected_score): + assert ( + pytest.approx(lidstone_bigram_model.score(word, context), 1e-4) + == expected_score + ) + + +def test_lidstone_entropy_perplexity(lidstone_bigram_model): + text = [ + ("", "a"), + ("a", "c"), + ("c", ""), + ("", "d"), + ("d", "c"), + ("c", ""), + ] + # Unlike MLE this should be able to handle completely novel ngrams + # Ngram = score, log score + # , a = 0.3929, -1.3479 + # a, c = 0.0357, -4.8074 + # c, UNK = 0.0(5), -4.1699 + # UNK, d = 0.0263, -5.2479 + # d, c = 0.0357, -4.8074 + # c, = 0.0(5), -4.1699 + # TOTAL logscore: −24.5504 + # - AVG logscore: 4.0917 + H = 4.0917 + perplexity = 17.0504 + assert pytest.approx(lidstone_bigram_model.entropy(text), 1e-4) == H + assert pytest.approx(lidstone_bigram_model.perplexity(text), 1e-4) == perplexity + + +@pytest.fixture +def lidstone_trigram_model(trigram_training_data, vocabulary): + model = Lidstone(0.1, order=3, vocabulary=vocabulary) + model.fit(trigram_training_data) + return model + + +@pytest.mark.parametrize( + "word, context, expected_score", + [ + # Logic behind this is the same as for bigram model + ("d", ["c"], 1.1 / 1.8), + # if we choose a word that hasn't appeared after (b, c) + ("e", ["c"], 0.1 / 1.8), + # Trigram score now + ("d", ["b", "c"], 1.1 / 1.8), + ("e", ["b", "c"], 0.1 / 1.8), + ], +) +def test_lidstone_trigram_score(lidstone_trigram_model, word, context, expected_score): + assert ( + pytest.approx(lidstone_trigram_model.score(word, context), 1e-4) + == expected_score + ) + + +@pytest.fixture +def laplace_bigram_model(bigram_training_data, vocabulary): + model = Laplace(2, vocabulary=vocabulary) + model.fit(bigram_training_data) + return model + + +@pytest.mark.parametrize( + "word, context, expected_score", + [ + # basic sanity-check: + # count(d | c) = 1 + # *count(d | c) = 2 + # Count(w | c for w in vocab) = 1 + # *Count(w | c for w in vocab) = 9 + ("d", ["c"], 2.0 / 9), + # Total unigrams: 14 + # Vocab size: 8 + # Denominator: 14 + 8 = 22 + # count("a") = 2 + # *count("a") = 3 + ("a", None, 3.0 / 22), + # in vocabulary but unseen + # count("z") = 0 + # *count("z") = 1 + ("z", None, 1.0 / 22), + # out of vocabulary should use "UNK" score + # count("") = 3 + # *count("") = 4 + ("y", None, 4.0 / 22), + ], +) +def test_laplace_bigram_score(laplace_bigram_model, word, context, expected_score): + assert ( + pytest.approx(laplace_bigram_model.score(word, context), 1e-4) == expected_score + ) + + +def test_laplace_bigram_entropy_perplexity(laplace_bigram_model): + text = [ + ("", "a"), + ("a", "c"), + ("c", ""), + ("", "d"), + ("d", "c"), + ("c", ""), + ] + # Unlike MLE this should be able to handle completely novel ngrams + # Ngram = score, log score + # , a = 0.2, -2.3219 + # a, c = 0.1, -3.3219 + # c, UNK = 0.(1), -3.1699 + # UNK, d = 0.(09), 3.4594 + # d, c = 0.1 -3.3219 + # c, = 0.(1), -3.1699 + # Total logscores: −18.7651 + # - AVG logscores: 3.1275 + H = 3.1275 + perplexity = 8.7393 + assert pytest.approx(laplace_bigram_model.entropy(text), 1e-4) == H + assert pytest.approx(laplace_bigram_model.perplexity(text), 1e-4) == perplexity + + +def test_laplace_gamma(laplace_bigram_model): + assert laplace_bigram_model.gamma == 1 + + +@pytest.fixture +def wittenbell_trigram_model(trigram_training_data, vocabulary): + model = WittenBellInterpolated(3, vocabulary=vocabulary) + model.fit(trigram_training_data) + return model + + +@pytest.mark.parametrize( + "word, context, expected_score", + [ + # For unigram scores by default revert to regular MLE + # Total unigrams: 18 + # Vocab Size = 7 + # count('c'): 1 + ("c", None, 1.0 / 18), + # in vocabulary but unseen + # count("z") = 0 + ("z", None, 0 / 18), + # out of vocabulary should use "UNK" score + # count("") = 3 + ("y", None, 3.0 / 18), + # 2 words follow b and b occurred a total of 2 times + # gamma(['b']) = 2 / (2 + 2) = 0.5 + # mle.score('c', ['b']) = 0.5 + # mle('c') = 1 / 18 = 0.055 + # (1 - gamma) * mle + gamma * mle('c') ~= 0.27 + 0.055 + ("c", ["b"], (1 - 0.5) * 0.5 + 0.5 * 1 / 18), + # building on that, let's try 'a b c' as the trigram + # 1 word follows 'a b' and 'a b' occurred 1 time + # gamma(['a', 'b']) = 1 / (1 + 1) = 0.5 + # mle("c", ["a", "b"]) = 1 + ("c", ["a", "b"], (1 - 0.5) + 0.5 * ((1 - 0.5) * 0.5 + 0.5 * 1 / 18)), + # P(c|zb) + # The ngram 'zbc' was not seen, so we use P(c|b). See issue #2332. + ("c", ["z", "b"], ((1 - 0.5) * 0.5 + 0.5 * 1 / 18)), + ], +) +def test_wittenbell_trigram_score( + wittenbell_trigram_model, word, context, expected_score +): + assert ( + pytest.approx(wittenbell_trigram_model.score(word, context), 1e-4) + == expected_score + ) + + +############################################################################### +# Notation Explained # +############################################################################### +# For all subsequent calculations we use the following notation: +# 1. '*': Placeholder for any word/character. E.g. '*b' stands for +# all bigrams that end in 'b'. '*b*' stands for all trigrams that +# contain 'b' in the middle. +# 1. count(ngram): Count all instances (tokens) of an ngram. +# 1. unique(ngram): Count unique instances (types) of an ngram. + + +@pytest.fixture +def kneserney_trigram_model(trigram_training_data, vocabulary): + model = KneserNeyInterpolated(order=3, discount=0.75, vocabulary=vocabulary) + model.fit(trigram_training_data) + return model + + +@pytest.mark.parametrize( + "word, context, expected_score", + [ + # P(c) = count('*c') / unique('**') + # = 1 / 14 + ("c", None, 1.0 / 14), + # P(z) = count('*z') / unique('**') + # = 0 / 14 + # 'z' is in the vocabulary, but it was not seen during training. + ("z", None, 0.0 / 14), + # P(y) + # Out of vocabulary should use "UNK" score. + # P(y) = P(UNK) = count('*UNK') / unique('**') + ("y", None, 3 / 14), + # We start with P(c|b) + # P(c|b) = alpha('bc') + gamma('b') * P(c) + # alpha('bc') = max(unique('*bc') - discount, 0) / unique('*b*') + # = max(1 - 0.75, 0) / 2 + # = 0.125 + # gamma('b') = discount * unique('b*') / unique('*b*') + # = (0.75 * 2) / 2 + # = 0.75 + ("c", ["b"], (0.125 + 0.75 * (1 / 14))), + # Building on that, let's try P(c|ab). + # P(c|ab) = alpha('abc') + gamma('ab') * P(c|b) + # alpha('abc') = max(count('abc') - discount, 0) / count('ab*') + # = max(1 - 0.75, 0) / 1 + # = 0.25 + # gamma('ab') = (discount * unique('ab*')) / count('ab*') + # = 0.75 * 1 / 1 + ("c", ["a", "b"], 0.25 + 0.75 * (0.125 + 0.75 * (1 / 14))), + # P(c|zb) + # The ngram 'zbc' was not seen, so we use P(c|b). See issue #2332. + ("c", ["z", "b"], (0.125 + 0.75 * (1 / 14))), + ], +) +def test_kneserney_trigram_score( + kneserney_trigram_model, word, context, expected_score +): + assert ( + pytest.approx(kneserney_trigram_model.score(word, context), 1e-4) + == expected_score + ) + + +@pytest.fixture +def absolute_discounting_trigram_model(trigram_training_data, vocabulary): + model = AbsoluteDiscountingInterpolated(order=3, vocabulary=vocabulary) + model.fit(trigram_training_data) + return model + + +@pytest.mark.parametrize( + "word, context, expected_score", + [ + # For unigram scores revert to uniform + # P(c) = count('c') / count('**') + ("c", None, 1.0 / 18), + # in vocabulary but unseen + # count('z') = 0 + ("z", None, 0.0 / 18), + # out of vocabulary should use "UNK" score + # count('') = 3 + ("y", None, 3 / 18), + # P(c|b) = alpha('bc') + gamma('b') * P(c) + # alpha('bc') = max(count('bc') - discount, 0) / count('b*') + # = max(1 - 0.75, 0) / 2 + # = 0.125 + # gamma('b') = discount * unique('b*') / count('b*') + # = (0.75 * 2) / 2 + # = 0.75 + ("c", ["b"], (0.125 + 0.75 * (2 / 2) * (1 / 18))), + # Building on that, let's try P(c|ab). + # P(c|ab) = alpha('abc') + gamma('ab') * P(c|b) + # alpha('abc') = max(count('abc') - discount, 0) / count('ab*') + # = max(1 - 0.75, 0) / 1 + # = 0.25 + # gamma('ab') = (discount * unique('ab*')) / count('ab*') + # = 0.75 * 1 / 1 + ("c", ["a", "b"], 0.25 + 0.75 * (0.125 + 0.75 * (2 / 2) * (1 / 18))), + # P(c|zb) + # The ngram 'zbc' was not seen, so we use P(c|b). See issue #2332. + ("c", ["z", "b"], (0.125 + 0.75 * (2 / 2) * (1 / 18))), + ], +) +def test_absolute_discounting_trigram_score( + absolute_discounting_trigram_model, word, context, expected_score +): + assert ( + pytest.approx(absolute_discounting_trigram_model.score(word, context), 1e-4) + == expected_score + ) + + +@pytest.fixture +def stupid_backoff_trigram_model(trigram_training_data, vocabulary): + model = StupidBackoff(order=3, vocabulary=vocabulary) + model.fit(trigram_training_data) + return model + + +@pytest.mark.parametrize( + "word, context, expected_score", + [ + # For unigram scores revert to uniform + # total bigrams = 18 + ("c", None, 1.0 / 18), + # in vocabulary but unseen + # bigrams ending with z = 0 + ("z", None, 0.0 / 18), + # out of vocabulary should use "UNK" score + # count(''): 3 + ("y", None, 3 / 18), + # c follows 1 time out of 2 after b + ("c", ["b"], 1 / 2), + # c always follows ab + ("c", ["a", "b"], 1 / 1), + # The ngram 'z b c' was not seen, so we backoff to + # the score of the ngram 'b c' * smoothing factor + ("c", ["z", "b"], (0.4 * (1 / 2))), + ], +) +def test_stupid_backoff_trigram_score( + stupid_backoff_trigram_model, word, context, expected_score +): + assert ( + pytest.approx(stupid_backoff_trigram_model.score(word, context), 1e-4) + == expected_score + ) + + +############################################################################### +# Probability Distributions Should Sum up to Unity # +############################################################################### + + +@pytest.fixture(scope="session") +def kneserney_bigram_model(bigram_training_data, vocabulary): + model = KneserNeyInterpolated(order=2, vocabulary=vocabulary) + model.fit(bigram_training_data) + return model + + +@pytest.mark.parametrize( + "model_fixture", + [ + "mle_bigram_model", + "mle_trigram_model", + "lidstone_bigram_model", + "laplace_bigram_model", + "wittenbell_trigram_model", + "absolute_discounting_trigram_model", + "kneserney_bigram_model", + pytest.param( + "stupid_backoff_trigram_model", + marks=pytest.mark.xfail( + reason="Stupid Backoff is not a valid distribution" + ), + ), + ], +) +@pytest.mark.parametrize( + "context", + [("a",), ("c",), ("",), ("b",), ("",), ("d",), ("e",), ("r",), ("w",)], + ids=itemgetter(0), +) +def test_sums_to_1(model_fixture, context, request): + model = request.getfixturevalue(model_fixture) + scores_for_context = sum(model.score(w, context) for w in model.vocab) + assert pytest.approx(scores_for_context, 1e-7) == 1.0 + + +############################################################################### +# Generating Text # +############################################################################### + + +def test_generate_one_no_context(mle_trigram_model): + assert mle_trigram_model.generate(random_seed=3) == "" + + +def test_generate_one_from_limiting_context(mle_trigram_model): + # We don't need random_seed for contexts with only one continuation + assert mle_trigram_model.generate(text_seed=["c"]) == "d" + assert mle_trigram_model.generate(text_seed=["b", "c"]) == "d" + assert mle_trigram_model.generate(text_seed=["a", "c"]) == "d" + + +def test_generate_one_from_varied_context(mle_trigram_model): + # When context doesn't limit our options enough, seed the random choice + assert mle_trigram_model.generate(text_seed=("a", ""), random_seed=2) == "a" + + +def test_generate_cycle(mle_trigram_model): + # Add a cycle to the model: bd -> b, db -> d + more_training_text = [padded_everygrams(mle_trigram_model.order, list("bdbdbd"))] + + mle_trigram_model.fit(more_training_text) + # Test that we can escape the cycle + assert mle_trigram_model.generate(7, text_seed=("b", "d"), random_seed=5) == [ + "b", + "d", + "b", + "d", + "b", + "d", + "", + ] + + +def test_generate_with_text_seed(mle_trigram_model): + assert mle_trigram_model.generate(5, text_seed=("", "e"), random_seed=3) == [ + "", + "a", + "d", + "b", + "", + ] + + +def test_generate_oov_text_seed(mle_trigram_model): + assert mle_trigram_model.generate( + text_seed=("aliens",), random_seed=3 + ) == mle_trigram_model.generate(text_seed=("",), random_seed=3) + + +def test_generate_None_text_seed(mle_trigram_model): + # should crash with type error when we try to look it up in vocabulary + with pytest.raises(TypeError): + mle_trigram_model.generate(text_seed=(None,)) + + # This will work + assert mle_trigram_model.generate( + text_seed=None, random_seed=3 + ) == mle_trigram_model.generate(random_seed=3) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_preprocessing.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..e517a83266fce7c30e2a18c9d0a52a0e1cd1fdfc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_preprocessing.py @@ -0,0 +1,30 @@ +# Natural Language Toolkit: Language Model Unit Tests +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT +import unittest + +from nltk.lm.preprocessing import padded_everygram_pipeline + + +class TestPreprocessing(unittest.TestCase): + def test_padded_everygram_pipeline(self): + expected_train = [ + [ + ("",), + ("", "a"), + ("a",), + ("a", "b"), + ("b",), + ("b", "c"), + ("c",), + ("c", ""), + ("",), + ] + ] + expected_vocab = ["", "a", "b", "c", ""] + train_data, vocab_data = padded_everygram_pipeline(2, [["a", "b", "c"]]) + self.assertEqual([list(sent) for sent in train_data], expected_train) + self.assertEqual(list(vocab_data), expected_vocab) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_vocabulary.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_vocabulary.py new file mode 100644 index 0000000000000000000000000000000000000000..39249454f144912d6715b8a396de2caa9619ae18 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_vocabulary.py @@ -0,0 +1,156 @@ +# Natural Language Toolkit: Language Model Unit Tests +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT + +import unittest +from collections import Counter +from timeit import timeit + +from nltk.lm import Vocabulary + + +class NgramModelVocabularyTests(unittest.TestCase): + """tests Vocabulary Class""" + + @classmethod + def setUpClass(cls): + cls.vocab = Vocabulary( + ["z", "a", "b", "c", "f", "d", "e", "g", "a", "d", "b", "e", "w"], + unk_cutoff=2, + ) + + def test_truthiness(self): + self.assertTrue(self.vocab) + + def test_cutoff_value_set_correctly(self): + self.assertEqual(self.vocab.cutoff, 2) + + def test_unable_to_change_cutoff(self): + with self.assertRaises(AttributeError): + self.vocab.cutoff = 3 + + def test_cutoff_setter_checks_value(self): + with self.assertRaises(ValueError) as exc_info: + Vocabulary("abc", unk_cutoff=0) + expected_error_msg = "Cutoff value cannot be less than 1. Got: 0" + self.assertEqual(expected_error_msg, str(exc_info.exception)) + + def test_counts_set_correctly(self): + self.assertEqual(self.vocab.counts["a"], 2) + self.assertEqual(self.vocab.counts["b"], 2) + self.assertEqual(self.vocab.counts["c"], 1) + + def test_membership_check_respects_cutoff(self): + # a was seen 2 times, so it should be considered part of the vocabulary + self.assertTrue("a" in self.vocab) + # "c" was seen once, it shouldn't be considered part of the vocab + self.assertFalse("c" in self.vocab) + # "z" was never seen at all, also shouldn't be considered in the vocab + self.assertFalse("z" in self.vocab) + + def test_vocab_len_respects_cutoff(self): + # Vocab size is the number of unique tokens that occur at least as often + # as the cutoff value, plus 1 to account for unknown words. + self.assertEqual(5, len(self.vocab)) + + def test_vocab_iter_respects_cutoff(self): + vocab_counts = ["a", "b", "c", "d", "e", "f", "g", "w", "z"] + vocab_items = ["a", "b", "d", "e", ""] + + self.assertCountEqual(vocab_counts, list(self.vocab.counts.keys())) + self.assertCountEqual(vocab_items, list(self.vocab)) + + def test_update_empty_vocab(self): + empty = Vocabulary(unk_cutoff=2) + self.assertEqual(len(empty), 0) + self.assertFalse(empty) + self.assertIn(empty.unk_label, empty) + + empty.update(list("abcde")) + self.assertIn(empty.unk_label, empty) + + def test_lookup(self): + self.assertEqual(self.vocab.lookup("a"), "a") + self.assertEqual(self.vocab.lookup("c"), "") + + def test_lookup_iterables(self): + self.assertEqual(self.vocab.lookup(["a", "b"]), ("a", "b")) + self.assertEqual(self.vocab.lookup(("a", "b")), ("a", "b")) + self.assertEqual(self.vocab.lookup(("a", "c")), ("a", "")) + self.assertEqual( + self.vocab.lookup(map(str, range(3))), ("", "", "") + ) + + def test_lookup_empty_iterables(self): + self.assertEqual(self.vocab.lookup(()), ()) + self.assertEqual(self.vocab.lookup([]), ()) + self.assertEqual(self.vocab.lookup(iter([])), ()) + self.assertEqual(self.vocab.lookup(n for n in range(0, 0)), ()) + + def test_lookup_recursive(self): + self.assertEqual( + self.vocab.lookup([["a", "b"], ["a", "c"]]), (("a", "b"), ("a", "")) + ) + self.assertEqual(self.vocab.lookup([["a", "b"], "c"]), (("a", "b"), "")) + self.assertEqual(self.vocab.lookup([[[[["a", "b"]]]]]), ((((("a", "b"),),),),)) + + def test_lookup_None(self): + with self.assertRaises(TypeError): + self.vocab.lookup(None) + with self.assertRaises(TypeError): + list(self.vocab.lookup([None, None])) + + def test_lookup_int(self): + with self.assertRaises(TypeError): + self.vocab.lookup(1) + with self.assertRaises(TypeError): + list(self.vocab.lookup([1, 2])) + + def test_lookup_empty_str(self): + self.assertEqual(self.vocab.lookup(""), "") + + def test_eqality(self): + v1 = Vocabulary(["a", "b", "c"], unk_cutoff=1) + v2 = Vocabulary(["a", "b", "c"], unk_cutoff=1) + v3 = Vocabulary(["a", "b", "c"], unk_cutoff=1, unk_label="blah") + v4 = Vocabulary(["a", "b"], unk_cutoff=1) + + self.assertEqual(v1, v2) + self.assertNotEqual(v1, v3) + self.assertNotEqual(v1, v4) + + def test_str(self): + self.assertEqual( + str(self.vocab), "" + ) + + def test_creation_with_counter(self): + self.assertEqual( + self.vocab, + Vocabulary( + Counter( + ["z", "a", "b", "c", "f", "d", "e", "g", "a", "d", "b", "e", "w"] + ), + unk_cutoff=2, + ), + ) + + @unittest.skip( + reason="Test is known to be flaky as it compares (runtime) performance." + ) + def test_len_is_constant(self): + # Given an obviously small and an obviously large vocabulary. + small_vocab = Vocabulary("abcde") + from nltk.corpus.europarl_raw import english + + large_vocab = Vocabulary(english.words()) + + # If we time calling `len` on them. + small_vocab_len_time = timeit("len(small_vocab)", globals=locals()) + large_vocab_len_time = timeit("len(large_vocab)", globals=locals()) + + # The timing should be the same order of magnitude. + self.assertAlmostEqual(small_vocab_len_time, large_vocab_len_time, places=1) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c307b5c9fef99ec3c54318b5cb21af5bbaaf4df9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_bleu.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_bleu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b857880c714d4298070cd9e9ab21a2af442e30ed Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_bleu.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_gdfa.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_gdfa.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a711ae2d9e29b8fa2c4b8221c6d8e6596cde4ef8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_gdfa.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm1.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af2c7ea5050009930bea1d68e65a7fddd0072f68 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm1.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm2.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0239746e66994a11ac94829f652c37a7789a01e7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm2.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm5.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..639a11f9e4ce56b618ae910b71efd42feef75d42 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm5.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm_model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9aee7ee5bf9195ccf8e54822277ba47baa5a2568 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm_model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_gdfa.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_gdfa.py new file mode 100644 index 0000000000000000000000000000000000000000..1824be45265762050ad4f61fb181a822d5aaa7a7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_gdfa.py @@ -0,0 +1,154 @@ +""" +Tests GDFA alignments +""" + +import unittest + +from nltk.translate.gdfa import grow_diag_final_and + + +class TestGDFA(unittest.TestCase): + def test_from_eflomal_outputs(self): + """ + Testing GDFA with first 10 eflomal outputs from issue #1829 + https://github.com/nltk/nltk/issues/1829 + """ + # Input. + forwards = [ + "0-0 1-2", + "0-0 1-1", + "0-0 2-1 3-2 4-3 5-4 6-5 7-6 8-7 7-8 9-9 10-10 9-11 11-12 12-13 13-14", + "0-0 1-1 1-2 2-3 3-4 4-5 4-6 5-7 6-8 8-9 9-10", + "0-0 14-1 15-2 16-3 20-5 21-6 22-7 5-8 6-9 7-10 8-11 9-12 10-13 11-14 12-15 13-16 14-17 17-18 18-19 19-20 20-21 23-22 24-23 25-24 26-25 27-27 28-28 29-29 30-30 31-31", + "0-0 1-1 0-2 2-3", + "0-0 2-2 4-4", + "0-0 1-1 2-3 3-4 5-5 7-6 8-7 9-8 10-9 11-10 12-11 13-12 14-13 15-14 16-16 17-17 18-18 19-19 20-20", + "3-0 4-1 6-2 5-3 6-4 7-5 8-6 9-7 10-8 11-9 16-10 9-12 10-13 12-14", + "1-0", + ] + backwards = [ + "0-0 1-2", + "0-0 1-1", + "0-0 2-1 3-2 4-3 5-4 6-5 7-6 8-7 9-8 10-10 11-12 12-11 13-13", + "0-0 1-2 2-3 3-4 4-6 6-8 7-5 8-7 9-8", + "0-0 1-8 2-9 3-10 4-11 5-12 6-11 8-13 9-14 10-15 11-16 12-17 13-18 14-19 15-20 16-21 17-22 18-23 19-24 20-29 21-30 22-31 23-2 24-3 25-4 26-5 27-5 28-6 29-7 30-28 31-31", + "0-0 1-1 2-3", + "0-0 1-1 2-3 4-4", + "0-0 1-1 2-3 3-4 5-5 7-6 8-7 9-8 10-9 11-10 12-11 13-12 14-13 15-14 16-16 17-17 18-18 19-19 20-16 21-18", + "0-0 1-1 3-2 4-1 5-3 6-4 7-5 8-6 9-7 10-8 11-9 12-8 13-9 14-8 15-9 16-10", + "1-0", + ] + source_lens = [2, 3, 3, 15, 11, 33, 4, 6, 23, 18] + target_lens = [2, 4, 3, 16, 12, 33, 5, 6, 22, 16] + # Expected Output. + expected = [ + [(0, 0), (1, 2)], + [(0, 0), (1, 1)], + [ + (0, 0), + (2, 1), + (3, 2), + (4, 3), + (5, 4), + (6, 5), + (7, 6), + (8, 7), + (10, 10), + (11, 12), + ], + [ + (0, 0), + (1, 1), + (1, 2), + (2, 3), + (3, 4), + (4, 5), + (4, 6), + (5, 7), + (6, 8), + (7, 5), + (8, 7), + (8, 9), + (9, 8), + (9, 10), + ], + [ + (0, 0), + (1, 8), + (2, 9), + (3, 10), + (4, 11), + (5, 8), + (6, 9), + (6, 11), + (7, 10), + (8, 11), + (31, 31), + ], + [(0, 0), (0, 2), (1, 1), (2, 3)], + [(0, 0), (1, 1), (2, 2), (2, 3), (4, 4)], + [ + (0, 0), + (1, 1), + (2, 3), + (3, 4), + (5, 5), + (7, 6), + (8, 7), + (9, 8), + (10, 9), + (11, 10), + (12, 11), + (13, 12), + (14, 13), + (15, 14), + (16, 16), + (17, 17), + (18, 18), + (19, 19), + ], + [ + (0, 0), + (1, 1), + (3, 0), + (3, 2), + (4, 1), + (5, 3), + (6, 2), + (6, 4), + (7, 5), + (8, 6), + (9, 7), + (9, 12), + (10, 8), + (10, 13), + (11, 9), + (12, 8), + (12, 14), + (13, 9), + (14, 8), + (15, 9), + (16, 10), + ], + [(1, 0)], + [ + (0, 0), + (1, 1), + (3, 2), + (4, 3), + (5, 4), + (6, 5), + (7, 6), + (9, 10), + (10, 12), + (11, 13), + (12, 14), + (13, 15), + ], + ] + + # Iterate through all 10 examples and check for expected outputs. + for fw, bw, src_len, trg_len, expect in zip( + forwards, backwards, source_lens, target_lens, expected + ): + self.assertListEqual(expect, grow_diag_final_and(src_len, trg_len, fw, bw)) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm1.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm1.py new file mode 100644 index 0000000000000000000000000000000000000000..a4f32ef73cae1f789ee587b6d3d214cfeb0e70d2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm1.py @@ -0,0 +1,73 @@ +""" +Tests for IBM Model 1 training methods +""" + +import unittest +from collections import defaultdict + +from nltk.translate import AlignedSent, IBMModel, IBMModel1 +from nltk.translate.ibm_model import AlignmentInfo + + +class TestIBMModel1(unittest.TestCase): + def test_set_uniform_translation_probabilities(self): + # arrange + corpus = [ + AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), + AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), + ] + model1 = IBMModel1(corpus, 0) + + # act + model1.set_uniform_probabilities(corpus) + + # assert + # expected_prob = 1.0 / (target vocab size + 1) + self.assertEqual(model1.translation_table["ham"]["eier"], 1.0 / 3) + self.assertEqual(model1.translation_table["eggs"][None], 1.0 / 3) + + def test_set_uniform_translation_probabilities_of_non_domain_values(self): + # arrange + corpus = [ + AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), + AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), + ] + model1 = IBMModel1(corpus, 0) + + # act + model1.set_uniform_probabilities(corpus) + + # assert + # examine target words that are not in the training data domain + self.assertEqual(model1.translation_table["parrot"]["eier"], IBMModel.MIN_PROB) + + def test_prob_t_a_given_s(self): + # arrange + src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"] + trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"] + corpus = [AlignedSent(trg_sentence, src_sentence)] + alignment_info = AlignmentInfo( + (0, 1, 4, 0, 2, 5, 5), + [None] + src_sentence, + ["UNUSED"] + trg_sentence, + None, + ) + + translation_table = defaultdict(lambda: defaultdict(float)) + translation_table["i"]["ich"] = 0.98 + translation_table["love"]["gern"] = 0.98 + translation_table["to"][None] = 0.98 + translation_table["eat"]["esse"] = 0.98 + translation_table["smoked"]["räucherschinken"] = 0.98 + translation_table["ham"]["räucherschinken"] = 0.98 + + model1 = IBMModel1(corpus, 0) + model1.translation_table = translation_table + + # act + probability = model1.prob_t_a_given_s(alignment_info) + + # assert + lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 + expected_probability = lexical_translation + self.assertEqual(round(probability, 4), round(expected_probability, 4)) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm3.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm3.py new file mode 100644 index 0000000000000000000000000000000000000000..14d89d6d9857f0ff62bb4388cf1e8f04c2f90d46 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm3.py @@ -0,0 +1,105 @@ +""" +Tests for IBM Model 3 training methods +""" + +import unittest +from collections import defaultdict + +from nltk.translate import AlignedSent, IBMModel, IBMModel3 +from nltk.translate.ibm_model import AlignmentInfo + + +class TestIBMModel3(unittest.TestCase): + def test_set_uniform_distortion_probabilities(self): + # arrange + corpus = [ + AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), + AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), + ] + model3 = IBMModel3(corpus, 0) + + # act + model3.set_uniform_probabilities(corpus) + + # assert + # expected_prob = 1.0 / length of target sentence + self.assertEqual(model3.distortion_table[1][0][3][2], 1.0 / 2) + self.assertEqual(model3.distortion_table[4][2][2][4], 1.0 / 4) + + def test_set_uniform_distortion_probabilities_of_non_domain_values(self): + # arrange + corpus = [ + AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), + AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), + ] + model3 = IBMModel3(corpus, 0) + + # act + model3.set_uniform_probabilities(corpus) + + # assert + # examine i and j values that are not in the training data domain + self.assertEqual(model3.distortion_table[0][0][3][2], IBMModel.MIN_PROB) + self.assertEqual(model3.distortion_table[9][2][2][4], IBMModel.MIN_PROB) + self.assertEqual(model3.distortion_table[2][9][2][4], IBMModel.MIN_PROB) + + def test_prob_t_a_given_s(self): + # arrange + src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"] + trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"] + corpus = [AlignedSent(trg_sentence, src_sentence)] + alignment_info = AlignmentInfo( + (0, 1, 4, 0, 2, 5, 5), + [None] + src_sentence, + ["UNUSED"] + trg_sentence, + [[3], [1], [4], [], [2], [5, 6]], + ) + + distortion_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(float))) + ) + distortion_table[1][1][5][6] = 0.97 # i -> ich + distortion_table[2][4][5][6] = 0.97 # love -> gern + distortion_table[3][0][5][6] = 0.97 # to -> NULL + distortion_table[4][2][5][6] = 0.97 # eat -> esse + distortion_table[5][5][5][6] = 0.97 # smoked -> räucherschinken + distortion_table[6][5][5][6] = 0.97 # ham -> räucherschinken + + translation_table = defaultdict(lambda: defaultdict(float)) + translation_table["i"]["ich"] = 0.98 + translation_table["love"]["gern"] = 0.98 + translation_table["to"][None] = 0.98 + translation_table["eat"]["esse"] = 0.98 + translation_table["smoked"]["räucherschinken"] = 0.98 + translation_table["ham"]["räucherschinken"] = 0.98 + + fertility_table = defaultdict(lambda: defaultdict(float)) + fertility_table[1]["ich"] = 0.99 + fertility_table[1]["esse"] = 0.99 + fertility_table[0]["ja"] = 0.99 + fertility_table[1]["gern"] = 0.99 + fertility_table[2]["räucherschinken"] = 0.999 + fertility_table[1][None] = 0.99 + + probabilities = { + "p1": 0.167, + "translation_table": translation_table, + "distortion_table": distortion_table, + "fertility_table": fertility_table, + "alignment_table": None, + } + + model3 = IBMModel3(corpus, 0, probabilities) + + # act + probability = model3.prob_t_a_given_s(alignment_info) + + # assert + null_generation = 5 * pow(0.167, 1) * pow(0.833, 4) + fertility = 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 2 * 0.999 + lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 + distortion = 0.97 * 0.97 * 0.97 * 0.97 * 0.97 * 0.97 + expected_probability = ( + null_generation * fertility * lexical_translation * distortion + ) + self.assertEqual(round(probability, 4), round(expected_probability, 4)) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm4.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm4.py new file mode 100644 index 0000000000000000000000000000000000000000..674b2bc37aaae3a42711d13f05a9bd9d0b35a717 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm4.py @@ -0,0 +1,120 @@ +""" +Tests for IBM Model 4 training methods +""" + +import unittest +from collections import defaultdict + +from nltk.translate import AlignedSent, IBMModel, IBMModel4 +from nltk.translate.ibm_model import AlignmentInfo + + +class TestIBMModel4(unittest.TestCase): + def test_set_uniform_distortion_probabilities_of_max_displacements(self): + # arrange + src_classes = {"schinken": 0, "eier": 0, "spam": 1} + trg_classes = {"ham": 0, "eggs": 1, "spam": 2} + corpus = [ + AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), + AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), + ] + model4 = IBMModel4(corpus, 0, src_classes, trg_classes) + + # act + model4.set_uniform_probabilities(corpus) + + # assert + # number of displacement values = + # 2 *(number of words in longest target sentence - 1) + expected_prob = 1.0 / (2 * (4 - 1)) + + # examine the boundary values for (displacement, src_class, trg_class) + self.assertEqual(model4.head_distortion_table[3][0][0], expected_prob) + self.assertEqual(model4.head_distortion_table[-3][1][2], expected_prob) + self.assertEqual(model4.non_head_distortion_table[3][0], expected_prob) + self.assertEqual(model4.non_head_distortion_table[-3][2], expected_prob) + + def test_set_uniform_distortion_probabilities_of_non_domain_values(self): + # arrange + src_classes = {"schinken": 0, "eier": 0, "spam": 1} + trg_classes = {"ham": 0, "eggs": 1, "spam": 2} + corpus = [ + AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), + AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), + ] + model4 = IBMModel4(corpus, 0, src_classes, trg_classes) + + # act + model4.set_uniform_probabilities(corpus) + + # assert + # examine displacement values that are not in the training data domain + self.assertEqual(model4.head_distortion_table[4][0][0], IBMModel.MIN_PROB) + self.assertEqual(model4.head_distortion_table[100][1][2], IBMModel.MIN_PROB) + self.assertEqual(model4.non_head_distortion_table[4][0], IBMModel.MIN_PROB) + self.assertEqual(model4.non_head_distortion_table[100][2], IBMModel.MIN_PROB) + + def test_prob_t_a_given_s(self): + # arrange + src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"] + trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"] + src_classes = {"räucherschinken": 0, "ja": 1, "ich": 2, "esse": 3, "gern": 4} + trg_classes = {"ham": 0, "smoked": 1, "i": 3, "love": 4, "to": 2, "eat": 4} + corpus = [AlignedSent(trg_sentence, src_sentence)] + alignment_info = AlignmentInfo( + (0, 1, 4, 0, 2, 5, 5), + [None] + src_sentence, + ["UNUSED"] + trg_sentence, + [[3], [1], [4], [], [2], [5, 6]], + ) + + head_distortion_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(float)) + ) + head_distortion_table[1][None][3] = 0.97 # None, i + head_distortion_table[3][2][4] = 0.97 # ich, eat + head_distortion_table[-2][3][4] = 0.97 # esse, love + head_distortion_table[3][4][1] = 0.97 # gern, smoked + + non_head_distortion_table = defaultdict(lambda: defaultdict(float)) + non_head_distortion_table[1][0] = 0.96 # ham + + translation_table = defaultdict(lambda: defaultdict(float)) + translation_table["i"]["ich"] = 0.98 + translation_table["love"]["gern"] = 0.98 + translation_table["to"][None] = 0.98 + translation_table["eat"]["esse"] = 0.98 + translation_table["smoked"]["räucherschinken"] = 0.98 + translation_table["ham"]["räucherschinken"] = 0.98 + + fertility_table = defaultdict(lambda: defaultdict(float)) + fertility_table[1]["ich"] = 0.99 + fertility_table[1]["esse"] = 0.99 + fertility_table[0]["ja"] = 0.99 + fertility_table[1]["gern"] = 0.99 + fertility_table[2]["räucherschinken"] = 0.999 + fertility_table[1][None] = 0.99 + + probabilities = { + "p1": 0.167, + "translation_table": translation_table, + "head_distortion_table": head_distortion_table, + "non_head_distortion_table": non_head_distortion_table, + "fertility_table": fertility_table, + "alignment_table": None, + } + + model4 = IBMModel4(corpus, 0, src_classes, trg_classes, probabilities) + + # act + probability = model4.prob_t_a_given_s(alignment_info) + + # assert + null_generation = 5 * pow(0.167, 1) * pow(0.833, 4) + fertility = 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 2 * 0.999 + lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 + distortion = 0.97 * 0.97 * 1 * 0.97 * 0.97 * 0.96 + expected_probability = ( + null_generation * fertility * lexical_translation * distortion + ) + self.assertEqual(round(probability, 4), round(expected_probability, 4)) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_meteor.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_meteor.py new file mode 100644 index 0000000000000000000000000000000000000000..13d8e311c9337266a9cdc1b2ecfd67ef58cfb5b2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_meteor.py @@ -0,0 +1,20 @@ +import unittest + +from nltk.translate.meteor_score import meteor_score + + +class TestMETEOR(unittest.TestCase): + reference = [["this", "is", "a", "test"], ["this", "is" "test"]] + candidate = ["THIS", "Is", "a", "tEST"] + + def test_meteor(self): + score = meteor_score(self.reference, self.candidate, preprocess=str.lower) + assert score == 0.9921875 + + def test_reference_type_check(self): + str_reference = [" ".join(ref) for ref in self.reference] + self.assertRaises(TypeError, meteor_score, str_reference, self.candidate) + + def test_candidate_type_check(self): + str_candidate = " ".join(self.candidate) + self.assertRaises(TypeError, meteor_score, self.reference, str_candidate) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..39eb7195124c71050b5a7fb4dd030fdadf410967 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__init__.py @@ -0,0 +1,52 @@ +# Natural Language Toolkit: Machine Translation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Peter Ljunglöf +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK Tree Package + +This package may be used for representing hierarchical language +structures, such as syntax trees and morphological trees. +""" + +# TODO: add LabelledTree (can be used for dependency trees) + +from nltk.tree.immutable import ( + ImmutableMultiParentedTree, + ImmutableParentedTree, + ImmutableProbabilisticTree, + ImmutableTree, +) +from nltk.tree.parented import MultiParentedTree, ParentedTree +from nltk.tree.parsing import bracket_parse, sinica_parse +from nltk.tree.prettyprinter import TreePrettyPrinter +from nltk.tree.probabilistic import ProbabilisticTree +from nltk.tree.transforms import ( + chomsky_normal_form, + collapse_unary, + un_chomsky_normal_form, +) +from nltk.tree.tree import Tree + +__all__ = [ + "ImmutableMultiParentedTree", + "ImmutableParentedTree", + "ImmutableProbabilisticTree", + "ImmutableTree", + "MultiParentedTree", + "ParentedTree", + "bracket_parse", + "sinica_parse", + "TreePrettyPrinter", + "ProbabilisticTree", + "chomsky_normal_form", + "collapse_unary", + "un_chomsky_normal_form", + "Tree", +] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c96640fa3284985df72561b70f6c940725d4d51 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/immutable.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/immutable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3292a4bf96e8e8ebec07eb77c9df5b4365c42c4a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/immutable.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/parented.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/parented.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72045c089afa7434ee7e1a2f89d8f5da3273976c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/parented.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/parsing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/parsing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9820b9e0f1c93e3b942a8cd34552151e50604531 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/parsing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/prettyprinter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/prettyprinter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eddd8785f2121bfa76c5c8c2aba98f285a789664 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/prettyprinter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/probabilistic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/probabilistic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..496437ddc97f85dc65aec978961a2fe1ae6caa93 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/probabilistic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/transforms.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cd6b23990fe4c33128f711342de6f943583dfd4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/transforms.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/immutable.py b/llmeval-env/lib/python3.10/site-packages/nltk/tree/immutable.py new file mode 100644 index 0000000000000000000000000000000000000000..94e7ef9473a9c02b988db5318f3b282eb153e439 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tree/immutable.py @@ -0,0 +1,124 @@ +# Natural Language Toolkit: Text Trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Peter Ljunglöf +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +from nltk.probability import ProbabilisticMixIn +from nltk.tree.parented import MultiParentedTree, ParentedTree +from nltk.tree.tree import Tree + + +class ImmutableTree(Tree): + def __init__(self, node, children=None): + super().__init__(node, children) + # Precompute our hash value. This ensures that we're really + # immutable. It also means we only have to calculate it once. + try: + self._hash = hash((self._label, tuple(self))) + except (TypeError, ValueError) as e: + raise ValueError( + "%s: node value and children " "must be immutable" % type(self).__name__ + ) from e + + def __setitem__(self, index, value): + raise ValueError("%s may not be modified" % type(self).__name__) + + def __setslice__(self, i, j, value): + raise ValueError("%s may not be modified" % type(self).__name__) + + def __delitem__(self, index): + raise ValueError("%s may not be modified" % type(self).__name__) + + def __delslice__(self, i, j): + raise ValueError("%s may not be modified" % type(self).__name__) + + def __iadd__(self, other): + raise ValueError("%s may not be modified" % type(self).__name__) + + def __imul__(self, other): + raise ValueError("%s may not be modified" % type(self).__name__) + + def append(self, v): + raise ValueError("%s may not be modified" % type(self).__name__) + + def extend(self, v): + raise ValueError("%s may not be modified" % type(self).__name__) + + def pop(self, v=None): + raise ValueError("%s may not be modified" % type(self).__name__) + + def remove(self, v): + raise ValueError("%s may not be modified" % type(self).__name__) + + def reverse(self): + raise ValueError("%s may not be modified" % type(self).__name__) + + def sort(self): + raise ValueError("%s may not be modified" % type(self).__name__) + + def __hash__(self): + return self._hash + + def set_label(self, value): + """ + Set the node label. This will only succeed the first time the + node label is set, which should occur in ImmutableTree.__init__(). + """ + if hasattr(self, "_label"): + raise ValueError("%s may not be modified" % type(self).__name__) + self._label = value + + +class ImmutableProbabilisticTree(ImmutableTree, ProbabilisticMixIn): + def __init__(self, node, children=None, **prob_kwargs): + ImmutableTree.__init__(self, node, children) + ProbabilisticMixIn.__init__(self, **prob_kwargs) + self._hash = hash((self._label, tuple(self), self.prob())) + + # We have to patch up these methods to make them work right: + def _frozen_class(self): + return ImmutableProbabilisticTree + + def __repr__(self): + return f"{Tree.__repr__(self)} [{self.prob()}]" + + def __str__(self): + return f"{self.pformat(margin=60)} [{self.prob()}]" + + def copy(self, deep=False): + if not deep: + return type(self)(self._label, self, prob=self.prob()) + else: + return type(self).convert(self) + + @classmethod + def convert(cls, val): + if isinstance(val, Tree): + children = [cls.convert(child) for child in val] + if isinstance(val, ProbabilisticMixIn): + return cls(val._label, children, prob=val.prob()) + else: + return cls(val._label, children, prob=1.0) + else: + return val + + +class ImmutableParentedTree(ImmutableTree, ParentedTree): + pass + + +class ImmutableMultiParentedTree(ImmutableTree, MultiParentedTree): + pass + + +__all__ = [ + "ImmutableProbabilisticTree", + "ImmutableTree", + "ImmutableParentedTree", + "ImmutableMultiParentedTree", +] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/parented.py b/llmeval-env/lib/python3.10/site-packages/nltk/tree/parented.py new file mode 100644 index 0000000000000000000000000000000000000000..c43788f1a79902d02c10b0f699cd2f6026b2d646 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tree/parented.py @@ -0,0 +1,590 @@ +# Natural Language Toolkit: Text Trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Peter Ljunglöf +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +import warnings +from abc import ABCMeta, abstractmethod + +from nltk.tree.tree import Tree +from nltk.util import slice_bounds + + +###################################################################### +## Parented trees +###################################################################### +class AbstractParentedTree(Tree, metaclass=ABCMeta): + """ + An abstract base class for a ``Tree`` that automatically maintains + pointers to parent nodes. These parent pointers are updated + whenever any change is made to a tree's structure. Two subclasses + are currently defined: + + - ``ParentedTree`` is used for tree structures where each subtree + has at most one parent. This class should be used in cases + where there is no"sharing" of subtrees. + + - ``MultiParentedTree`` is used for tree structures where a + subtree may have zero or more parents. This class should be + used in cases where subtrees may be shared. + + Subclassing + =========== + The ``AbstractParentedTree`` class redefines all operations that + modify a tree's structure to call two methods, which are used by + subclasses to update parent information: + + - ``_setparent()`` is called whenever a new child is added. + - ``_delparent()`` is called whenever a child is removed. + """ + + def __init__(self, node, children=None): + super().__init__(node, children) + # If children is None, the tree is read from node, and + # all parents will be set during parsing. + if children is not None: + # Otherwise we have to set the parent of the children. + # Iterate over self, and *not* children, because children + # might be an iterator. + for i, child in enumerate(self): + if isinstance(child, Tree): + self._setparent(child, i, dry_run=True) + for i, child in enumerate(self): + if isinstance(child, Tree): + self._setparent(child, i) + + # //////////////////////////////////////////////////////////// + # Parent management + # //////////////////////////////////////////////////////////// + @abstractmethod + def _setparent(self, child, index, dry_run=False): + """ + Update the parent pointer of ``child`` to point to ``self``. This + method is only called if the type of ``child`` is ``Tree``; + i.e., it is not called when adding a leaf to a tree. This method + is always called before the child is actually added to the + child list of ``self``. + + :type child: Tree + :type index: int + :param index: The index of ``child`` in ``self``. + :raise TypeError: If ``child`` is a tree with an impropriate + type. Typically, if ``child`` is a tree, then its type needs + to match the type of ``self``. This prevents mixing of + different tree types (single-parented, multi-parented, and + non-parented). + :param dry_run: If true, the don't actually set the child's + parent pointer; just check for any error conditions, and + raise an exception if one is found. + """ + + @abstractmethod + def _delparent(self, child, index): + """ + Update the parent pointer of ``child`` to not point to self. This + method is only called if the type of ``child`` is ``Tree``; i.e., it + is not called when removing a leaf from a tree. This method + is always called before the child is actually removed from the + child list of ``self``. + + :type child: Tree + :type index: int + :param index: The index of ``child`` in ``self``. + """ + + # //////////////////////////////////////////////////////////// + # Methods that add/remove children + # //////////////////////////////////////////////////////////// + # Every method that adds or removes a child must make + # appropriate calls to _setparent() and _delparent(). + + def __delitem__(self, index): + # del ptree[start:stop] + if isinstance(index, slice): + start, stop, step = slice_bounds(self, index, allow_step=True) + # Clear all the children pointers. + for i in range(start, stop, step): + if isinstance(self[i], Tree): + self._delparent(self[i], i) + # Delete the children from our child list. + super().__delitem__(index) + + # del ptree[i] + elif isinstance(index, int): + if index < 0: + index += len(self) + if index < 0: + raise IndexError("index out of range") + # Clear the child's parent pointer. + if isinstance(self[index], Tree): + self._delparent(self[index], index) + # Remove the child from our child list. + super().__delitem__(index) + + elif isinstance(index, (list, tuple)): + # del ptree[()] + if len(index) == 0: + raise IndexError("The tree position () may not be deleted.") + # del ptree[(i,)] + elif len(index) == 1: + del self[index[0]] + # del ptree[i1, i2, i3] + else: + del self[index[0]][index[1:]] + + else: + raise TypeError( + "%s indices must be integers, not %s" + % (type(self).__name__, type(index).__name__) + ) + + def __setitem__(self, index, value): + # ptree[start:stop] = value + if isinstance(index, slice): + start, stop, step = slice_bounds(self, index, allow_step=True) + # make a copy of value, in case it's an iterator + if not isinstance(value, (list, tuple)): + value = list(value) + # Check for any error conditions, so we can avoid ending + # up in an inconsistent state if an error does occur. + for i, child in enumerate(value): + if isinstance(child, Tree): + self._setparent(child, start + i * step, dry_run=True) + # clear the child pointers of all parents we're removing + for i in range(start, stop, step): + if isinstance(self[i], Tree): + self._delparent(self[i], i) + # set the child pointers of the new children. We do this + # after clearing *all* child pointers, in case we're e.g. + # reversing the elements in a tree. + for i, child in enumerate(value): + if isinstance(child, Tree): + self._setparent(child, start + i * step) + # finally, update the content of the child list itself. + super().__setitem__(index, value) + + # ptree[i] = value + elif isinstance(index, int): + if index < 0: + index += len(self) + if index < 0: + raise IndexError("index out of range") + # if the value is not changing, do nothing. + if value is self[index]: + return + # Set the new child's parent pointer. + if isinstance(value, Tree): + self._setparent(value, index) + # Remove the old child's parent pointer + if isinstance(self[index], Tree): + self._delparent(self[index], index) + # Update our child list. + super().__setitem__(index, value) + + elif isinstance(index, (list, tuple)): + # ptree[()] = value + if len(index) == 0: + raise IndexError("The tree position () may not be assigned to.") + # ptree[(i,)] = value + elif len(index) == 1: + self[index[0]] = value + # ptree[i1, i2, i3] = value + else: + self[index[0]][index[1:]] = value + + else: + raise TypeError( + "%s indices must be integers, not %s" + % (type(self).__name__, type(index).__name__) + ) + + def append(self, child): + if isinstance(child, Tree): + self._setparent(child, len(self)) + super().append(child) + + def extend(self, children): + for child in children: + if isinstance(child, Tree): + self._setparent(child, len(self)) + super().append(child) + + def insert(self, index, child): + # Handle negative indexes. Note that if index < -len(self), + # we do *not* raise an IndexError, unlike __getitem__. This + # is done for consistency with list.__getitem__ and list.index. + if index < 0: + index += len(self) + if index < 0: + index = 0 + # Set the child's parent, and update our child list. + if isinstance(child, Tree): + self._setparent(child, index) + super().insert(index, child) + + def pop(self, index=-1): + if index < 0: + index += len(self) + if index < 0: + raise IndexError("index out of range") + if isinstance(self[index], Tree): + self._delparent(self[index], index) + return super().pop(index) + + # n.b.: like `list`, this is done by equality, not identity! + # To remove a specific child, use del ptree[i]. + def remove(self, child): + index = self.index(child) + if isinstance(self[index], Tree): + self._delparent(self[index], index) + super().remove(child) + + # We need to implement __getslice__ and friends, even though + # they're deprecated, because otherwise list.__getslice__ will get + # called (since we're subclassing from list). Just delegate to + # __getitem__ etc., but use max(0, start) and max(0, stop) because + # because negative indices are already handled *before* + # __getslice__ is called; and we don't want to double-count them. + if hasattr(list, "__getslice__"): + + def __getslice__(self, start, stop): + return self.__getitem__(slice(max(0, start), max(0, stop))) + + def __delslice__(self, start, stop): + return self.__delitem__(slice(max(0, start), max(0, stop))) + + def __setslice__(self, start, stop, value): + return self.__setitem__(slice(max(0, start), max(0, stop)), value) + + def __getnewargs__(self): + """Method used by the pickle module when un-pickling. + This method provides the arguments passed to ``__new__`` + upon un-pickling. Without this method, ParentedTree instances + cannot be pickled and unpickled in Python 3.7+ onwards. + + :return: Tuple of arguments for ``__new__``, i.e. the label + and the children of this node. + :rtype: Tuple[Any, List[AbstractParentedTree]] + """ + return (self._label, list(self)) + + +class ParentedTree(AbstractParentedTree): + """ + A ``Tree`` that automatically maintains parent pointers for + single-parented trees. The following are methods for querying + the structure of a parented tree: ``parent``, ``parent_index``, + ``left_sibling``, ``right_sibling``, ``root``, ``treeposition``. + + Each ``ParentedTree`` may have at most one parent. In + particular, subtrees may not be shared. Any attempt to reuse a + single ``ParentedTree`` as a child of more than one parent (or + as multiple children of the same parent) will cause a + ``ValueError`` exception to be raised. + + ``ParentedTrees`` should never be used in the same tree as ``Trees`` + or ``MultiParentedTrees``. Mixing tree implementations may result + in incorrect parent pointers and in ``TypeError`` exceptions. + """ + + def __init__(self, node, children=None): + self._parent = None + """The parent of this Tree, or None if it has no parent.""" + super().__init__(node, children) + if children is None: + # If children is None, the tree is read from node. + # After parsing, the parent of the immediate children + # will point to an intermediate tree, not self. + # We fix this by brute force: + for i, child in enumerate(self): + if isinstance(child, Tree): + child._parent = None + self._setparent(child, i) + + def _frozen_class(self): + from nltk.tree.immutable import ImmutableParentedTree + + return ImmutableParentedTree + + def copy(self, deep=False): + if not deep: + warnings.warn( + f"{self.__class__.__name__} objects do not support shallow copies. Defaulting to a deep copy." + ) + return super().copy(deep=True) + + # ///////////////////////////////////////////////////////////////// + # Methods + # ///////////////////////////////////////////////////////////////// + + def parent(self): + """The parent of this tree, or None if it has no parent.""" + return self._parent + + def parent_index(self): + """ + The index of this tree in its parent. I.e., + ``ptree.parent()[ptree.parent_index()] is ptree``. Note that + ``ptree.parent_index()`` is not necessarily equal to + ``ptree.parent.index(ptree)``, since the ``index()`` method + returns the first child that is equal to its argument. + """ + if self._parent is None: + return None + for i, child in enumerate(self._parent): + if child is self: + return i + assert False, "expected to find self in self._parent!" + + def left_sibling(self): + """The left sibling of this tree, or None if it has none.""" + parent_index = self.parent_index() + if self._parent and parent_index > 0: + return self._parent[parent_index - 1] + return None # no left sibling + + def right_sibling(self): + """The right sibling of this tree, or None if it has none.""" + parent_index = self.parent_index() + if self._parent and parent_index < (len(self._parent) - 1): + return self._parent[parent_index + 1] + return None # no right sibling + + def root(self): + """ + The root of this tree. I.e., the unique ancestor of this tree + whose parent is None. If ``ptree.parent()`` is None, then + ``ptree`` is its own root. + """ + root = self + while root.parent() is not None: + root = root.parent() + return root + + def treeposition(self): + """ + The tree position of this tree, relative to the root of the + tree. I.e., ``ptree.root[ptree.treeposition] is ptree``. + """ + if self.parent() is None: + return () + else: + return self.parent().treeposition() + (self.parent_index(),) + + # ///////////////////////////////////////////////////////////////// + # Parent Management + # ///////////////////////////////////////////////////////////////// + + def _delparent(self, child, index): + # Sanity checks + assert isinstance(child, ParentedTree) + assert self[index] is child + assert child._parent is self + + # Delete child's parent pointer. + child._parent = None + + def _setparent(self, child, index, dry_run=False): + # If the child's type is incorrect, then complain. + if not isinstance(child, ParentedTree): + raise TypeError("Can not insert a non-ParentedTree into a ParentedTree") + + # If child already has a parent, then complain. + if hasattr(child, "_parent") and child._parent is not None: + raise ValueError("Can not insert a subtree that already has a parent.") + + # Set child's parent pointer & index. + if not dry_run: + child._parent = self + + +class MultiParentedTree(AbstractParentedTree): + """ + A ``Tree`` that automatically maintains parent pointers for + multi-parented trees. The following are methods for querying the + structure of a multi-parented tree: ``parents()``, ``parent_indices()``, + ``left_siblings()``, ``right_siblings()``, ``roots``, ``treepositions``. + + Each ``MultiParentedTree`` may have zero or more parents. In + particular, subtrees may be shared. If a single + ``MultiParentedTree`` is used as multiple children of the same + parent, then that parent will appear multiple times in its + ``parents()`` method. + + ``MultiParentedTrees`` should never be used in the same tree as + ``Trees`` or ``ParentedTrees``. Mixing tree implementations may + result in incorrect parent pointers and in ``TypeError`` exceptions. + """ + + def __init__(self, node, children=None): + self._parents = [] + """A list of this tree's parents. This list should not + contain duplicates, even if a parent contains this tree + multiple times.""" + super().__init__(node, children) + if children is None: + # If children is None, the tree is read from node. + # After parsing, the parent(s) of the immediate children + # will point to an intermediate tree, not self. + # We fix this by brute force: + for i, child in enumerate(self): + if isinstance(child, Tree): + child._parents = [] + self._setparent(child, i) + + def _frozen_class(self): + from nltk.tree.immutable import ImmutableMultiParentedTree + + return ImmutableMultiParentedTree + + # ///////////////////////////////////////////////////////////////// + # Methods + # ///////////////////////////////////////////////////////////////// + + def parents(self): + """ + The set of parents of this tree. If this tree has no parents, + then ``parents`` is the empty set. To check if a tree is used + as multiple children of the same parent, use the + ``parent_indices()`` method. + + :type: list(MultiParentedTree) + """ + return list(self._parents) + + def left_siblings(self): + """ + A list of all left siblings of this tree, in any of its parent + trees. A tree may be its own left sibling if it is used as + multiple contiguous children of the same parent. A tree may + appear multiple times in this list if it is the left sibling + of this tree with respect to multiple parents. + + :type: list(MultiParentedTree) + """ + return [ + parent[index - 1] + for (parent, index) in self._get_parent_indices() + if index > 0 + ] + + def right_siblings(self): + """ + A list of all right siblings of this tree, in any of its parent + trees. A tree may be its own right sibling if it is used as + multiple contiguous children of the same parent. A tree may + appear multiple times in this list if it is the right sibling + of this tree with respect to multiple parents. + + :type: list(MultiParentedTree) + """ + return [ + parent[index + 1] + for (parent, index) in self._get_parent_indices() + if index < (len(parent) - 1) + ] + + def _get_parent_indices(self): + return [ + (parent, index) + for parent in self._parents + for index, child in enumerate(parent) + if child is self + ] + + def roots(self): + """ + The set of all roots of this tree. This set is formed by + tracing all possible parent paths until trees with no parents + are found. + + :type: list(MultiParentedTree) + """ + return list(self._get_roots_helper({}).values()) + + def _get_roots_helper(self, result): + if self._parents: + for parent in self._parents: + parent._get_roots_helper(result) + else: + result[id(self)] = self + return result + + def parent_indices(self, parent): + """ + Return a list of the indices where this tree occurs as a child + of ``parent``. If this child does not occur as a child of + ``parent``, then the empty list is returned. The following is + always true:: + + for parent_index in ptree.parent_indices(parent): + parent[parent_index] is ptree + """ + if parent not in self._parents: + return [] + else: + return [index for (index, child) in enumerate(parent) if child is self] + + def treepositions(self, root): + """ + Return a list of all tree positions that can be used to reach + this multi-parented tree starting from ``root``. I.e., the + following is always true:: + + for treepos in ptree.treepositions(root): + root[treepos] is ptree + """ + if self is root: + return [()] + else: + return [ + treepos + (index,) + for parent in self._parents + for treepos in parent.treepositions(root) + for (index, child) in enumerate(parent) + if child is self + ] + + # ///////////////////////////////////////////////////////////////// + # Parent Management + # ///////////////////////////////////////////////////////////////// + + def _delparent(self, child, index): + # Sanity checks + assert isinstance(child, MultiParentedTree) + assert self[index] is child + assert len([p for p in child._parents if p is self]) == 1 + + # If the only copy of child in self is at index, then delete + # self from child's parent list. + for i, c in enumerate(self): + if c is child and i != index: + break + else: + child._parents.remove(self) + + def _setparent(self, child, index, dry_run=False): + # If the child's type is incorrect, then complain. + if not isinstance(child, MultiParentedTree): + raise TypeError( + "Can not insert a non-MultiParentedTree into a MultiParentedTree" + ) + + # Add self as a parent pointer if it's not already listed. + if not dry_run: + for parent in child._parents: + if parent is self: + break + else: + child._parents.append(self) + + +__all__ = [ + "ParentedTree", + "MultiParentedTree", +] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/parsing.py b/llmeval-env/lib/python3.10/site-packages/nltk/tree/parsing.py new file mode 100644 index 0000000000000000000000000000000000000000..c2df4e166832b940eb5e961a9e7bf685d95b5e63 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tree/parsing.py @@ -0,0 +1,66 @@ +# Natural Language Toolkit: Text Trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Peter Ljunglöf +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +import re + +from nltk.tree.tree import Tree + +###################################################################### +## Parsing +###################################################################### + + +def bracket_parse(s): + """ + Use Tree.read(s, remove_empty_top_bracketing=True) instead. + """ + raise NameError("Use Tree.read(s, remove_empty_top_bracketing=True) instead.") + + +def sinica_parse(s): + """ + Parse a Sinica Treebank string and return a tree. Trees are represented as nested brackettings, + as shown in the following example (X represents a Chinese character): + S(goal:NP(Head:Nep:XX)|theme:NP(Head:Nhaa:X)|quantity:Dab:X|Head:VL2:X)#0(PERIODCATEGORY) + + :return: A tree corresponding to the string representation. + :rtype: Tree + :param s: The string to be converted + :type s: str + """ + tokens = re.split(r"([()| ])", s) + for i in range(len(tokens)): + if tokens[i] == "(": + tokens[i - 1], tokens[i] = ( + tokens[i], + tokens[i - 1], + ) # pull nonterminal inside parens + elif ":" in tokens[i]: + fields = tokens[i].split(":") + if len(fields) == 2: # non-terminal + tokens[i] = fields[1] + else: + tokens[i] = "(" + fields[-2] + " " + fields[-1] + ")" + elif tokens[i] == "|": + tokens[i] = "" + + treebank_string = " ".join(tokens) + return Tree.fromstring(treebank_string, remove_empty_top_bracketing=True) + + +# s = re.sub(r'^#[^\s]*\s', '', s) # remove leading identifier +# s = re.sub(r'\w+:', '', s) # remove role tags + +# return s + +__all__ = [ + "bracket_parse", + "sinica_parse", +] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/prettyprinter.py b/llmeval-env/lib/python3.10/site-packages/nltk/tree/prettyprinter.py new file mode 100644 index 0000000000000000000000000000000000000000..33e1a93208b17a48e3ff4bbcbbb1d4017e89f198 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tree/prettyprinter.py @@ -0,0 +1,627 @@ +# Natural Language Toolkit: ASCII visualization of NLTK trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Andreas van Cranenburgh +# Peter Ljunglöf +# URL: +# For license information, see LICENSE.TXT + +""" +Pretty-printing of discontinuous trees. +Adapted from the disco-dop project, by Andreas van Cranenburgh. +https://github.com/andreasvc/disco-dop + +Interesting reference (not used for this code): +T. Eschbach et al., Orth. Hypergraph Drawing, Journal of +Graph Algorithms and Applications, 10(2) 141--157 (2006)149. +https://jgaa.info/accepted/2006/EschbachGuentherBecker2006.10.2.pdf +""" + +import re + +try: + from html import escape +except ImportError: + from cgi import escape + +from collections import defaultdict +from operator import itemgetter + +from nltk.tree.tree import Tree +from nltk.util import OrderedDict + +ANSICOLOR = { + "black": 30, + "red": 31, + "green": 32, + "yellow": 33, + "blue": 34, + "magenta": 35, + "cyan": 36, + "white": 37, +} + + +class TreePrettyPrinter: + """ + Pretty-print a tree in text format, either as ASCII or Unicode. + The tree can be a normal tree, or discontinuous. + + ``TreePrettyPrinter(tree, sentence=None, highlight=())`` + creates an object from which different visualizations can be created. + + :param tree: a Tree object. + :param sentence: a list of words (strings). If `sentence` is given, + `tree` must contain integers as leaves, which are taken as indices + in `sentence`. Using this you can display a discontinuous tree. + :param highlight: Optionally, a sequence of Tree objects in `tree` which + should be highlighted. Has the effect of only applying colors to nodes + in this sequence (nodes should be given as Tree objects, terminals as + indices). + + >>> from nltk.tree import Tree + >>> tree = Tree.fromstring('(S (NP Mary) (VP walks))') + >>> print(TreePrettyPrinter(tree).text()) + ... # doctest: +NORMALIZE_WHITESPACE + S + ____|____ + NP VP + | | + Mary walks + """ + + def __init__(self, tree, sentence=None, highlight=()): + if sentence is None: + leaves = tree.leaves() + if ( + leaves + and all(len(a) > 0 for a in tree.subtrees()) + and all(isinstance(a, int) for a in leaves) + ): + sentence = [str(a) for a in leaves] + else: + # this deals with empty nodes (frontier non-terminals) + # and multiple/mixed terminals under non-terminals. + tree = tree.copy(True) + sentence = [] + for a in tree.subtrees(): + if len(a) == 0: + a.append(len(sentence)) + sentence.append(None) + elif any(not isinstance(b, Tree) for b in a): + for n, b in enumerate(a): + if not isinstance(b, Tree): + a[n] = len(sentence) + if type(b) == tuple: + b = "/".join(b) + sentence.append("%s" % b) + self.nodes, self.coords, self.edges, self.highlight = self.nodecoords( + tree, sentence, highlight + ) + + def __str__(self): + return self.text() + + def __repr__(self): + return "" % len(self.nodes) + + @staticmethod + def nodecoords(tree, sentence, highlight): + """ + Produce coordinates of nodes on a grid. + + Objective: + + - Produce coordinates for a non-overlapping placement of nodes and + horizontal lines. + - Order edges so that crossing edges cross a minimal number of previous + horizontal lines (never vertical lines). + + Approach: + + - bottom up level order traversal (start at terminals) + - at each level, identify nodes which cannot be on the same row + - identify nodes which cannot be in the same column + - place nodes into a grid at (row, column) + - order child-parent edges with crossing edges last + + Coordinates are (row, column); the origin (0, 0) is at the top left; + the root node is on row 0. Coordinates do not consider the size of a + node (which depends on font, &c), so the width of a column of the grid + should be automatically determined by the element with the greatest + width in that column. Alternatively, the integer coordinates could be + converted to coordinates in which the distances between adjacent nodes + are non-uniform. + + Produces tuple (nodes, coords, edges, highlighted) where: + + - nodes[id]: Tree object for the node with this integer id + - coords[id]: (n, m) coordinate where to draw node with id in the grid + - edges[id]: parent id of node with this id (ordered dictionary) + - highlighted: set of ids that should be highlighted + """ + + def findcell(m, matrix, startoflevel, children): + """ + Find vacant row, column index for node ``m``. + Iterate over current rows for this level (try lowest first) + and look for cell between first and last child of this node, + add new row to level if no free row available. + """ + candidates = [a for _, a in children[m]] + minidx, maxidx = min(candidates), max(candidates) + leaves = tree[m].leaves() + center = scale * sum(leaves) // len(leaves) # center of gravity + if minidx < maxidx and not minidx < center < maxidx: + center = sum(candidates) // len(candidates) + if max(candidates) - min(candidates) > 2 * scale: + center -= center % scale # round to unscaled coordinate + if minidx < maxidx and not minidx < center < maxidx: + center += scale + if ids[m] == 0: + startoflevel = len(matrix) + for rowidx in range(startoflevel, len(matrix) + 1): + if rowidx == len(matrix): # need to add a new row + matrix.append( + [ + vertline if a not in (corner, None) else None + for a in matrix[-1] + ] + ) + row = matrix[rowidx] + if len(children[m]) == 1: # place unaries directly above child + return rowidx, next(iter(children[m]))[1] + elif all( + a is None or a == vertline + for a in row[min(candidates) : max(candidates) + 1] + ): + # find free column + for n in range(scale): + i = j = center + n + while j > minidx or i < maxidx: + if i < maxidx and ( + matrix[rowidx][i] is None or i in candidates + ): + return rowidx, i + elif j > minidx and ( + matrix[rowidx][j] is None or j in candidates + ): + return rowidx, j + i += scale + j -= scale + raise ValueError( + "could not find a free cell for:\n%s\n%s" + "min=%d; max=%d" % (tree[m], minidx, maxidx, dumpmatrix()) + ) + + def dumpmatrix(): + """Dump matrix contents for debugging purposes.""" + return "\n".join( + "%2d: %s" % (n, " ".join(("%2r" % i)[:2] for i in row)) + for n, row in enumerate(matrix) + ) + + leaves = tree.leaves() + if not all(isinstance(n, int) for n in leaves): + raise ValueError("All leaves must be integer indices.") + if len(leaves) != len(set(leaves)): + raise ValueError("Indices must occur at most once.") + if not all(0 <= n < len(sentence) for n in leaves): + raise ValueError( + "All leaves must be in the interval 0..n " + "with n=len(sentence)\ntokens: %d indices: " + "%r\nsentence: %s" % (len(sentence), tree.leaves(), sentence) + ) + vertline, corner = -1, -2 # constants + tree = tree.copy(True) + for a in tree.subtrees(): + a.sort(key=lambda n: min(n.leaves()) if isinstance(n, Tree) else n) + scale = 2 + crossed = set() + # internal nodes and lexical nodes (no frontiers) + positions = tree.treepositions() + maxdepth = max(map(len, positions)) + 1 + childcols = defaultdict(set) + matrix = [[None] * (len(sentence) * scale)] + nodes = {} + ids = {a: n for n, a in enumerate(positions)} + highlighted_nodes = { + n for a, n in ids.items() if not highlight or tree[a] in highlight + } + levels = {n: [] for n in range(maxdepth - 1)} + terminals = [] + for a in positions: + node = tree[a] + if isinstance(node, Tree): + levels[maxdepth - node.height()].append(a) + else: + terminals.append(a) + + for n in levels: + levels[n].sort(key=lambda n: max(tree[n].leaves()) - min(tree[n].leaves())) + terminals.sort() + positions = set(positions) + + for m in terminals: + i = int(tree[m]) * scale + assert matrix[0][i] is None, (matrix[0][i], m, i) + matrix[0][i] = ids[m] + nodes[ids[m]] = sentence[tree[m]] + if nodes[ids[m]] is None: + nodes[ids[m]] = "..." + highlighted_nodes.discard(ids[m]) + positions.remove(m) + childcols[m[:-1]].add((0, i)) + + # add other nodes centered on their children, + # if the center is already taken, back off + # to the left and right alternately, until an empty cell is found. + for n in sorted(levels, reverse=True): + nodesatdepth = levels[n] + startoflevel = len(matrix) + matrix.append( + [vertline if a not in (corner, None) else None for a in matrix[-1]] + ) + for m in nodesatdepth: # [::-1]: + if n < maxdepth - 1 and childcols[m]: + _, pivot = min(childcols[m], key=itemgetter(1)) + if { + a[:-1] + for row in matrix[:-1] + for a in row[:pivot] + if isinstance(a, tuple) + } & { + a[:-1] + for row in matrix[:-1] + for a in row[pivot:] + if isinstance(a, tuple) + }: + crossed.add(m) + + rowidx, i = findcell(m, matrix, startoflevel, childcols) + positions.remove(m) + + # block positions where children of this node branch out + for _, x in childcols[m]: + matrix[rowidx][x] = corner + # assert m == () or matrix[rowidx][i] in (None, corner), ( + # matrix[rowidx][i], m, str(tree), ' '.join(sentence)) + # node itself + matrix[rowidx][i] = ids[m] + nodes[ids[m]] = tree[m] + # add column to the set of children for its parent + if len(m) > 0: + childcols[m[:-1]].add((rowidx, i)) + assert len(positions) == 0 + + # remove unused columns, right to left + for m in range(scale * len(sentence) - 1, -1, -1): + if not any(isinstance(row[m], (Tree, int)) for row in matrix): + for row in matrix: + del row[m] + + # remove unused rows, reverse + matrix = [ + row + for row in reversed(matrix) + if not all(a is None or a == vertline for a in row) + ] + + # collect coordinates of nodes + coords = {} + for n, _ in enumerate(matrix): + for m, i in enumerate(matrix[n]): + if isinstance(i, int) and i >= 0: + coords[i] = n, m + + # move crossed edges last + positions = sorted( + (a for level in levels.values() for a in level), + key=lambda a: a[:-1] in crossed, + ) + + # collect edges from node to node + edges = OrderedDict() + for i in reversed(positions): + for j, _ in enumerate(tree[i]): + edges[ids[i + (j,)]] = ids[i] + + return nodes, coords, edges, highlighted_nodes + + def text( + self, + nodedist=1, + unicodelines=False, + html=False, + ansi=False, + nodecolor="blue", + leafcolor="red", + funccolor="green", + abbreviate=None, + maxwidth=16, + ): + """ + :return: ASCII art for a discontinuous tree. + + :param unicodelines: whether to use Unicode line drawing characters + instead of plain (7-bit) ASCII. + :param html: whether to wrap output in html code (default plain text). + :param ansi: whether to produce colors with ANSI escape sequences + (only effective when html==False). + :param leafcolor, nodecolor: specify colors of leaves and phrasal + nodes; effective when either html or ansi is True. + :param abbreviate: if True, abbreviate labels longer than 5 characters. + If integer, abbreviate labels longer than `abbr` characters. + :param maxwidth: maximum number of characters before a label starts to + wrap; pass None to disable. + """ + if abbreviate == True: + abbreviate = 5 + if unicodelines: + horzline = "\u2500" + leftcorner = "\u250c" + rightcorner = "\u2510" + vertline = " \u2502 " + tee = horzline + "\u252C" + horzline + bottom = horzline + "\u2534" + horzline + cross = horzline + "\u253c" + horzline + ellipsis = "\u2026" + else: + horzline = "_" + leftcorner = rightcorner = " " + vertline = " | " + tee = 3 * horzline + cross = bottom = "_|_" + ellipsis = "." + + def crosscell(cur, x=vertline): + """Overwrite center of this cell with a vertical branch.""" + splitl = len(cur) - len(cur) // 2 - len(x) // 2 - 1 + lst = list(cur) + lst[splitl : splitl + len(x)] = list(x) + return "".join(lst) + + result = [] + matrix = defaultdict(dict) + maxnodewith = defaultdict(lambda: 3) + maxnodeheight = defaultdict(lambda: 1) + maxcol = 0 + minchildcol = {} + maxchildcol = {} + childcols = defaultdict(set) + labels = {} + wrapre = re.compile( + "(.{%d,%d}\\b\\W*|.{%d})" % (maxwidth - 4, maxwidth, maxwidth) + ) + # collect labels and coordinates + for a in self.nodes: + row, column = self.coords[a] + matrix[row][column] = a + maxcol = max(maxcol, column) + label = ( + self.nodes[a].label() + if isinstance(self.nodes[a], Tree) + else self.nodes[a] + ) + if abbreviate and len(label) > abbreviate: + label = label[:abbreviate] + ellipsis + if maxwidth and len(label) > maxwidth: + label = wrapre.sub(r"\1\n", label).strip() + label = label.split("\n") + maxnodeheight[row] = max(maxnodeheight[row], len(label)) + maxnodewith[column] = max(maxnodewith[column], max(map(len, label))) + labels[a] = label + if a not in self.edges: + continue # e.g., root + parent = self.edges[a] + childcols[parent].add((row, column)) + minchildcol[parent] = min(minchildcol.get(parent, column), column) + maxchildcol[parent] = max(maxchildcol.get(parent, column), column) + # bottom up level order traversal + for row in sorted(matrix, reverse=True): + noderows = [ + ["".center(maxnodewith[col]) for col in range(maxcol + 1)] + for _ in range(maxnodeheight[row]) + ] + branchrow = ["".center(maxnodewith[col]) for col in range(maxcol + 1)] + for col in matrix[row]: + n = matrix[row][col] + node = self.nodes[n] + text = labels[n] + if isinstance(node, Tree): + # draw horizontal branch towards children for this node + if n in minchildcol and minchildcol[n] < maxchildcol[n]: + i, j = minchildcol[n], maxchildcol[n] + a, b = (maxnodewith[i] + 1) // 2 - 1, maxnodewith[j] // 2 + branchrow[i] = ((" " * a) + leftcorner).ljust( + maxnodewith[i], horzline + ) + branchrow[j] = (rightcorner + (" " * b)).rjust( + maxnodewith[j], horzline + ) + for i in range(minchildcol[n] + 1, maxchildcol[n]): + if i == col and any(a == i for _, a in childcols[n]): + line = cross + elif i == col: + line = bottom + elif any(a == i for _, a in childcols[n]): + line = tee + else: + line = horzline + branchrow[i] = line.center(maxnodewith[i], horzline) + else: # if n and n in minchildcol: + branchrow[col] = crosscell(branchrow[col]) + text = [a.center(maxnodewith[col]) for a in text] + color = nodecolor if isinstance(node, Tree) else leafcolor + if isinstance(node, Tree) and node.label().startswith("-"): + color = funccolor + if html: + text = [escape(a, quote=False) for a in text] + if n in self.highlight: + text = [f"{a}" for a in text] + elif ansi and n in self.highlight: + text = ["\x1b[%d;1m%s\x1b[0m" % (ANSICOLOR[color], a) for a in text] + for x in range(maxnodeheight[row]): + # draw vertical lines in partially filled multiline node + # labels, but only if it's not a frontier node. + noderows[x][col] = ( + text[x] + if x < len(text) + else (vertline if childcols[n] else " ").center( + maxnodewith[col], " " + ) + ) + # for each column, if there is a node below us which has a parent + # above us, draw a vertical branch in that column. + if row != max(matrix): + for n, (childrow, col) in self.coords.items(): + if n > 0 and self.coords[self.edges[n]][0] < row < childrow: + branchrow[col] = crosscell(branchrow[col]) + if col not in matrix[row]: + for noderow in noderows: + noderow[col] = crosscell(noderow[col]) + branchrow = [ + a + ((a[-1] if a[-1] != " " else b[0]) * nodedist) + for a, b in zip(branchrow, branchrow[1:] + [" "]) + ] + result.append("".join(branchrow)) + result.extend( + (" " * nodedist).join(noderow) for noderow in reversed(noderows) + ) + return "\n".join(reversed(result)) + "\n" + + def svg(self, nodecolor="blue", leafcolor="red", funccolor="green"): + """ + :return: SVG representation of a tree. + """ + fontsize = 12 + hscale = 40 + vscale = 25 + hstart = vstart = 20 + width = max(col for _, col in self.coords.values()) + height = max(row for row, _ in self.coords.values()) + result = [ + '' + % ( + width * 3, + height * 2.5, + -hstart, + -vstart, + width * hscale + 3 * hstart, + height * vscale + 3 * vstart, + ) + ] + + children = defaultdict(set) + for n in self.nodes: + if n: + children[self.edges[n]].add(n) + + # horizontal branches from nodes to children + for node in self.nodes: + if not children[node]: + continue + y, x = self.coords[node] + x *= hscale + y *= vscale + x += hstart + y += vstart + fontsize // 2 + childx = [self.coords[c][1] for c in children[node]] + xmin = hstart + hscale * min(childx) + xmax = hstart + hscale * max(childx) + result.append( + '\t' % (xmin, y, xmax, y) + ) + result.append( + '\t' % (x, y, x, y - fontsize // 3) + ) + + # vertical branches from children to parents + for child, parent in self.edges.items(): + y, _ = self.coords[parent] + y *= vscale + y += vstart + fontsize // 2 + childy, childx = self.coords[child] + childx *= hscale + childy *= vscale + childx += hstart + childy += vstart - fontsize + result += [ + '\t' % (childx, childy, childx, y + 5), + '\t' % (childx, childy, childx, y), + ] + + # write nodes with coordinates + for n, (row, column) in self.coords.items(): + node = self.nodes[n] + x = column * hscale + hstart + y = row * vscale + vstart + if n in self.highlight: + color = nodecolor if isinstance(node, Tree) else leafcolor + if isinstance(node, Tree) and node.label().startswith("-"): + color = funccolor + else: + color = "black" + result += [ + '\t%s' + % ( + color, + fontsize, + x, + y, + escape( + node.label() if isinstance(node, Tree) else node, quote=False + ), + ) + ] + + result += [""] + return "\n".join(result) + + +def test(): + """Do some tree drawing tests.""" + + def print_tree(n, tree, sentence=None, ansi=True, **xargs): + print() + print('{}: "{}"'.format(n, " ".join(sentence or tree.leaves()))) + print(tree) + print() + drawtree = TreePrettyPrinter(tree, sentence) + try: + print(drawtree.text(unicodelines=ansi, ansi=ansi, **xargs)) + except (UnicodeDecodeError, UnicodeEncodeError): + print(drawtree.text(unicodelines=False, ansi=False, **xargs)) + + from nltk.corpus import treebank + + for n in [0, 1440, 1591, 2771, 2170]: + tree = treebank.parsed_sents()[n] + print_tree(n, tree, nodedist=2, maxwidth=8) + print() + print("ASCII version:") + print(TreePrettyPrinter(tree).text(nodedist=2)) + + tree = Tree.fromstring( + "(top (punct 8) (smain (noun 0) (verb 1) (inf (verb 5) (inf (verb 6) " + "(conj (inf (pp (prep 2) (np (det 3) (noun 4))) (verb 7)) (inf (verb 9)) " + "(vg 10) (inf (verb 11)))))) (punct 12))", + read_leaf=int, + ) + sentence = ( + "Ze had met haar moeder kunnen gaan winkelen ," + " zwemmen of terrassen .".split() + ) + print_tree("Discontinuous tree", tree, sentence, nodedist=2) + + +__all__ = ["TreePrettyPrinter"] + +if __name__ == "__main__": + test() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/probabilistic.py b/llmeval-env/lib/python3.10/site-packages/nltk/tree/probabilistic.py new file mode 100644 index 0000000000000000000000000000000000000000..79a4c798ad5f73b7c515e20456a7149232958f17 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tree/probabilistic.py @@ -0,0 +1,74 @@ +# Natural Language Toolkit: Text Trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Peter Ljunglöf +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + + +from nltk.internals import raise_unorderable_types +from nltk.probability import ProbabilisticMixIn +from nltk.tree.immutable import ImmutableProbabilisticTree +from nltk.tree.tree import Tree + +###################################################################### +## Probabilistic trees +###################################################################### + + +class ProbabilisticTree(Tree, ProbabilisticMixIn): + def __init__(self, node, children=None, **prob_kwargs): + Tree.__init__(self, node, children) + ProbabilisticMixIn.__init__(self, **prob_kwargs) + + # We have to patch up these methods to make them work right: + def _frozen_class(self): + return ImmutableProbabilisticTree + + def __repr__(self): + return f"{Tree.__repr__(self)} (p={self.prob()!r})" + + def __str__(self): + return f"{self.pformat(margin=60)} (p={self.prob():.6g})" + + def copy(self, deep=False): + if not deep: + return type(self)(self._label, self, prob=self.prob()) + else: + return type(self).convert(self) + + @classmethod + def convert(cls, val): + if isinstance(val, Tree): + children = [cls.convert(child) for child in val] + if isinstance(val, ProbabilisticMixIn): + return cls(val._label, children, prob=val.prob()) + else: + return cls(val._label, children, prob=1.0) + else: + return val + + def __eq__(self, other): + return self.__class__ is other.__class__ and ( + self._label, + list(self), + self.prob(), + ) == (other._label, list(other), other.prob()) + + def __lt__(self, other): + if not isinstance(other, Tree): + raise_unorderable_types("<", self, other) + if self.__class__ is other.__class__: + return (self._label, list(self), self.prob()) < ( + other._label, + list(other), + other.prob(), + ) + else: + return self.__class__.__name__ < other.__class__.__name__ + + +__all__ = ["ProbabilisticTree"] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/transforms.py b/llmeval-env/lib/python3.10/site-packages/nltk/tree/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..99cd6893ce9f168ffa024f2bb8c39177617dced2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tree/transforms.py @@ -0,0 +1,338 @@ +# Natural Language Toolkit: Tree Transformations +# +# Copyright (C) 2005-2007 Oregon Graduate Institute +# Author: Nathan Bodenstab +# URL: +# For license information, see LICENSE.TXT + +r""" +A collection of methods for tree (grammar) transformations used +in parsing natural language. + +Although many of these methods are technically grammar transformations +(ie. Chomsky Norm Form), when working with treebanks it is much more +natural to visualize these modifications in a tree structure. Hence, +we will do all transformation directly to the tree itself. +Transforming the tree directly also allows us to do parent annotation. +A grammar can then be simply induced from the modified tree. + +The following is a short tutorial on the available transformations. + + 1. Chomsky Normal Form (binarization) + + It is well known that any grammar has a Chomsky Normal Form (CNF) + equivalent grammar where CNF is defined by every production having + either two non-terminals or one terminal on its right hand side. + When we have hierarchically structured data (ie. a treebank), it is + natural to view this in terms of productions where the root of every + subtree is the head (left hand side) of the production and all of + its children are the right hand side constituents. In order to + convert a tree into CNF, we simply need to ensure that every subtree + has either two subtrees as children (binarization), or one leaf node + (non-terminal). In order to binarize a subtree with more than two + children, we must introduce artificial nodes. + + There are two popular methods to convert a tree into CNF: left + factoring and right factoring. The following example demonstrates + the difference between them. Example:: + + Original Right-Factored Left-Factored + + A A A + / | \ / \ / \ + B C D ==> B A| OR A| D + / \ / \ + C D B C + + 2. Parent Annotation + + In addition to binarizing the tree, there are two standard + modifications to node labels we can do in the same traversal: parent + annotation and Markov order-N smoothing (or sibling smoothing). + + The purpose of parent annotation is to refine the probabilities of + productions by adding a small amount of context. With this simple + addition, a CYK (inside-outside, dynamic programming chart parse) + can improve from 74% to 79% accuracy. A natural generalization from + parent annotation is to grandparent annotation and beyond. The + tradeoff becomes accuracy gain vs. computational complexity. We + must also keep in mind data sparcity issues. Example:: + + Original Parent Annotation + + A A^ + / | \ / \ + B C D ==> B^ A|^ where ? is the + / \ parent of A + C^ D^ + + + 3. Markov order-N smoothing + + Markov smoothing combats data sparcity issues as well as decreasing + computational requirements by limiting the number of children + included in artificial nodes. In practice, most people use an order + 2 grammar. Example:: + + Original No Smoothing Markov order 1 Markov order 2 etc. + + __A__ A A A + / /|\ \ / \ / \ / \ + B C D E F ==> B A| ==> B A| ==> B A| + / \ / \ / \ + C ... C ... C ... + + + + Annotation decisions can be thought about in the vertical direction + (parent, grandparent, etc) and the horizontal direction (number of + siblings to keep). Parameters to the following functions specify + these values. For more information see: + + Dan Klein and Chris Manning (2003) "Accurate Unlexicalized + Parsing", ACL-03. https://www.aclweb.org/anthology/P03-1054 + + 4. Unary Collapsing + + Collapse unary productions (ie. subtrees with a single child) into a + new non-terminal (Tree node). This is useful when working with + algorithms that do not allow unary productions, yet you do not wish + to lose the parent information. Example:: + + A + | + B ==> A+B + / \ / \ + C D C D + +""" + +from nltk.tree.tree import Tree + + +def chomsky_normal_form( + tree, factor="right", horzMarkov=None, vertMarkov=0, childChar="|", parentChar="^" +): + # assume all subtrees have homogeneous children + # assume all terminals have no siblings + + # A semi-hack to have elegant looking code below. As a result, + # any subtree with a branching factor greater than 999 will be incorrectly truncated. + if horzMarkov is None: + horzMarkov = 999 + + # Traverse the tree depth-first keeping a list of ancestor nodes to the root. + # I chose not to use the tree.treepositions() method since it requires + # two traversals of the tree (one to get the positions, one to iterate + # over them) and node access time is proportional to the height of the node. + # This method is 7x faster which helps when parsing 40,000 sentences. + + nodeList = [(tree, [tree.label()])] + while nodeList != []: + node, parent = nodeList.pop() + if isinstance(node, Tree): + + # parent annotation + parentString = "" + originalNode = node.label() + if vertMarkov != 0 and node != tree and isinstance(node[0], Tree): + parentString = "{}<{}>".format(parentChar, "-".join(parent)) + node.set_label(node.label() + parentString) + parent = [originalNode] + parent[: vertMarkov - 1] + + # add children to the agenda before we mess with them + for child in node: + nodeList.append((child, parent)) + + # chomsky normal form factorization + if len(node) > 2: + childNodes = [child.label() for child in node] + nodeCopy = node.copy() + node[0:] = [] # delete the children + + curNode = node + numChildren = len(nodeCopy) + for i in range(1, numChildren - 1): + if factor == "right": + newHead = "{}{}<{}>{}".format( + originalNode, + childChar, + "-".join( + childNodes[i : min([i + horzMarkov, numChildren])] + ), + parentString, + ) # create new head + newNode = Tree(newHead, []) + curNode[0:] = [nodeCopy.pop(0), newNode] + else: + newHead = "{}{}<{}>{}".format( + originalNode, + childChar, + "-".join( + childNodes[max([numChildren - i - horzMarkov, 0]) : -i] + ), + parentString, + ) + newNode = Tree(newHead, []) + curNode[0:] = [newNode, nodeCopy.pop()] + + curNode = newNode + + curNode[0:] = [child for child in nodeCopy] + + +def un_chomsky_normal_form( + tree, expandUnary=True, childChar="|", parentChar="^", unaryChar="+" +): + # Traverse the tree-depth first keeping a pointer to the parent for modification purposes. + nodeList = [(tree, [])] + while nodeList != []: + node, parent = nodeList.pop() + if isinstance(node, Tree): + # if the node contains the 'childChar' character it means that + # it is an artificial node and can be removed, although we still need + # to move its children to its parent + childIndex = node.label().find(childChar) + if childIndex != -1: + nodeIndex = parent.index(node) + parent.remove(parent[nodeIndex]) + # Generated node was on the left if the nodeIndex is 0 which + # means the grammar was left factored. We must insert the children + # at the beginning of the parent's children + if nodeIndex == 0: + parent.insert(0, node[0]) + parent.insert(1, node[1]) + else: + parent.extend([node[0], node[1]]) + + # parent is now the current node so the children of parent will be added to the agenda + node = parent + else: + parentIndex = node.label().find(parentChar) + if parentIndex != -1: + # strip the node name of the parent annotation + node.set_label(node.label()[:parentIndex]) + + # expand collapsed unary productions + if expandUnary == True: + unaryIndex = node.label().find(unaryChar) + if unaryIndex != -1: + newNode = Tree( + node.label()[unaryIndex + 1 :], [i for i in node] + ) + node.set_label(node.label()[:unaryIndex]) + node[0:] = [newNode] + + for child in node: + nodeList.append((child, node)) + + +def collapse_unary(tree, collapsePOS=False, collapseRoot=False, joinChar="+"): + """ + Collapse subtrees with a single child (ie. unary productions) + into a new non-terminal (Tree node) joined by 'joinChar'. + This is useful when working with algorithms that do not allow + unary productions, and completely removing the unary productions + would require loss of useful information. The Tree is modified + directly (since it is passed by reference) and no value is returned. + + :param tree: The Tree to be collapsed + :type tree: Tree + :param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie. + Part-of-Speech tags) since they are always unary productions + :type collapsePOS: bool + :param collapseRoot: 'False' (default) will not modify the root production + if it is unary. For the Penn WSJ treebank corpus, this corresponds + to the TOP -> productions. + :type collapseRoot: bool + :param joinChar: A string used to connect collapsed node values (default = "+") + :type joinChar: str + """ + + if collapseRoot == False and isinstance(tree, Tree) and len(tree) == 1: + nodeList = [tree[0]] + else: + nodeList = [tree] + + # depth-first traversal of tree + while nodeList != []: + node = nodeList.pop() + if isinstance(node, Tree): + if ( + len(node) == 1 + and isinstance(node[0], Tree) + and (collapsePOS == True or isinstance(node[0, 0], Tree)) + ): + node.set_label(node.label() + joinChar + node[0].label()) + node[0:] = [child for child in node[0]] + # since we assigned the child's children to the current node, + # evaluate the current node again + nodeList.append(node) + else: + for child in node: + nodeList.append(child) + + +################################################################# +# Demonstration +################################################################# + + +def demo(): + """ + A demonstration showing how each tree transform can be used. + """ + + from copy import deepcopy + + from nltk.draw.tree import draw_trees + from nltk.tree.tree import Tree + + # original tree from WSJ bracketed text + sentence = """(TOP + (S + (S + (VP + (VBN Turned) + (ADVP (RB loose)) + (PP + (IN in) + (NP + (NP (NNP Shane) (NNP Longman) (POS 's)) + (NN trading) + (NN room))))) + (, ,) + (NP (DT the) (NN yuppie) (NNS dealers)) + (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right)))) + (. .)))""" + t = Tree.fromstring(sentence, remove_empty_top_bracketing=True) + + # collapse subtrees with only one child + collapsedTree = deepcopy(t) + collapse_unary(collapsedTree) + + # convert the tree to CNF + cnfTree = deepcopy(collapsedTree) + chomsky_normal_form(cnfTree) + + # convert the tree to CNF with parent annotation (one level) and horizontal smoothing of order two + parentTree = deepcopy(collapsedTree) + chomsky_normal_form(parentTree, horzMarkov=2, vertMarkov=1) + + # convert the tree back to its original form (used to make CYK results comparable) + original = deepcopy(parentTree) + un_chomsky_normal_form(original) + + # convert tree back to bracketed text + sentence2 = original.pprint() + print(sentence) + print(sentence2) + print("Sentences the same? ", sentence == sentence2) + + draw_trees(t, collapsedTree, cnfTree, parentTree, original) + + +if __name__ == "__main__": + demo() + +__all__ = ["chomsky_normal_form", "un_chomsky_normal_form", "collapse_unary"] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/tree.py b/llmeval-env/lib/python3.10/site-packages/nltk/tree/tree.py new file mode 100644 index 0000000000000000000000000000000000000000..d21be4091c5f6855a42a12dcddc53f21aafa16ea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tree/tree.py @@ -0,0 +1,982 @@ +# Natural Language Toolkit: Text Trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Peter Ljunglöf +# Nathan Bodenstab (tree transforms) +# Eric Kafe (Tree.fromlist()) +# Mohaned mashaly (Deprecating methods) +# URL: +# For license information, see LICENSE.TXT + +""" +Class for representing hierarchical language structures, such as +syntax trees and morphological trees. +""" + +import re + +from nltk.grammar import Nonterminal, Production +from nltk.internals import deprecated + +###################################################################### +## Trees +###################################################################### + + +class Tree(list): + r""" + A Tree represents a hierarchical grouping of leaves and subtrees. + For example, each constituent in a syntax tree is represented by a single Tree. + + A tree's children are encoded as a list of leaves and subtrees, + where a leaf is a basic (non-tree) value; and a subtree is a + nested Tree. + + >>> from nltk.tree import Tree + >>> print(Tree(1, [2, Tree(3, [4]), 5])) + (1 2 (3 4) 5) + >>> vp = Tree('VP', [Tree('V', ['saw']), + ... Tree('NP', ['him'])]) + >>> s = Tree('S', [Tree('NP', ['I']), vp]) + >>> print(s) + (S (NP I) (VP (V saw) (NP him))) + >>> print(s[1]) + (VP (V saw) (NP him)) + >>> print(s[1,1]) + (NP him) + >>> t = Tree.fromstring("(S (NP I) (VP (V saw) (NP him)))") + >>> s == t + True + >>> t[1][1].set_label('X') + >>> t[1][1].label() + 'X' + >>> print(t) + (S (NP I) (VP (V saw) (X him))) + >>> t[0], t[1,1] = t[1,1], t[0] + >>> print(t) + (S (X him) (VP (V saw) (NP I))) + + The length of a tree is the number of children it has. + + >>> len(t) + 2 + + The set_label() and label() methods allow individual constituents + to be labeled. For example, syntax trees use this label to specify + phrase tags, such as "NP" and "VP". + + Several Tree methods use "tree positions" to specify + children or descendants of a tree. Tree positions are defined as + follows: + + - The tree position *i* specifies a Tree's *i*\ th child. + - The tree position ``()`` specifies the Tree itself. + - If *p* is the tree position of descendant *d*, then + *p+i* specifies the *i*\ th child of *d*. + + I.e., every tree position is either a single index *i*, + specifying ``tree[i]``; or a sequence *i1, i2, ..., iN*, + specifying ``tree[i1][i2]...[iN]``. + + Construct a new tree. This constructor can be called in one + of two ways: + + - ``Tree(label, children)`` constructs a new tree with the + specified label and list of children. + + - ``Tree.fromstring(s)`` constructs a new tree by parsing the string ``s``. + """ + + def __init__(self, node, children=None): + if children is None: + raise TypeError( + "%s: Expected a node value and child list " % type(self).__name__ + ) + elif isinstance(children, str): + raise TypeError( + "%s() argument 2 should be a list, not a " + "string" % type(self).__name__ + ) + else: + list.__init__(self, children) + self._label = node + + # //////////////////////////////////////////////////////////// + # Comparison operators + # //////////////////////////////////////////////////////////// + + def __eq__(self, other): + return self.__class__ is other.__class__ and (self._label, list(self)) == ( + other._label, + list(other), + ) + + def __lt__(self, other): + if not isinstance(other, Tree): + # raise_unorderable_types("<", self, other) + # Sometimes children can be pure strings, + # so we need to be able to compare with non-trees: + return self.__class__.__name__ < other.__class__.__name__ + elif self.__class__ is other.__class__: + return (self._label, list(self)) < (other._label, list(other)) + else: + return self.__class__.__name__ < other.__class__.__name__ + + # @total_ordering doesn't work here, since the class inherits from a builtin class + __ne__ = lambda self, other: not self == other + __gt__ = lambda self, other: not (self < other or self == other) + __le__ = lambda self, other: self < other or self == other + __ge__ = lambda self, other: not self < other + + # //////////////////////////////////////////////////////////// + # Disabled list operations + # //////////////////////////////////////////////////////////// + + def __mul__(self, v): + raise TypeError("Tree does not support multiplication") + + def __rmul__(self, v): + raise TypeError("Tree does not support multiplication") + + def __add__(self, v): + raise TypeError("Tree does not support addition") + + def __radd__(self, v): + raise TypeError("Tree does not support addition") + + # //////////////////////////////////////////////////////////// + # Indexing (with support for tree positions) + # //////////////////////////////////////////////////////////// + + def __getitem__(self, index): + if isinstance(index, (int, slice)): + return list.__getitem__(self, index) + elif isinstance(index, (list, tuple)): + if len(index) == 0: + return self + elif len(index) == 1: + return self[index[0]] + else: + return self[index[0]][index[1:]] + else: + raise TypeError( + "%s indices must be integers, not %s" + % (type(self).__name__, type(index).__name__) + ) + + def __setitem__(self, index, value): + if isinstance(index, (int, slice)): + return list.__setitem__(self, index, value) + elif isinstance(index, (list, tuple)): + if len(index) == 0: + raise IndexError("The tree position () may not be " "assigned to.") + elif len(index) == 1: + self[index[0]] = value + else: + self[index[0]][index[1:]] = value + else: + raise TypeError( + "%s indices must be integers, not %s" + % (type(self).__name__, type(index).__name__) + ) + + def __delitem__(self, index): + if isinstance(index, (int, slice)): + return list.__delitem__(self, index) + elif isinstance(index, (list, tuple)): + if len(index) == 0: + raise IndexError("The tree position () may not be deleted.") + elif len(index) == 1: + del self[index[0]] + else: + del self[index[0]][index[1:]] + else: + raise TypeError( + "%s indices must be integers, not %s" + % (type(self).__name__, type(index).__name__) + ) + + # //////////////////////////////////////////////////////////// + # Basic tree operations + # //////////////////////////////////////////////////////////// + @deprecated("Use label() instead") + def _get_node(self): + """Outdated method to access the node value; use the label() method instead.""" + + @deprecated("Use set_label() instead") + def _set_node(self, value): + """Outdated method to set the node value; use the set_label() method instead.""" + + node = property(_get_node, _set_node) + + def label(self): + """ + Return the node label of the tree. + + >>> t = Tree.fromstring('(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))') + >>> t.label() + 'S' + + :return: the node label (typically a string) + :rtype: any + """ + return self._label + + def set_label(self, label): + """ + Set the node label of the tree. + + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> t.set_label("T") + >>> print(t) + (T (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat)))) + + :param label: the node label (typically a string) + :type label: any + """ + self._label = label + + def leaves(self): + """ + Return the leaves of the tree. + + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> t.leaves() + ['the', 'dog', 'chased', 'the', 'cat'] + + :return: a list containing this tree's leaves. + The order reflects the order of the + leaves in the tree's hierarchical structure. + :rtype: list + """ + leaves = [] + for child in self: + if isinstance(child, Tree): + leaves.extend(child.leaves()) + else: + leaves.append(child) + return leaves + + def flatten(self): + """ + Return a flat version of the tree, with all non-root non-terminals removed. + + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> print(t.flatten()) + (S the dog chased the cat) + + :return: a tree consisting of this tree's root connected directly to + its leaves, omitting all intervening non-terminal nodes. + :rtype: Tree + """ + return Tree(self.label(), self.leaves()) + + def height(self): + """ + Return the height of the tree. + + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> t.height() + 5 + >>> print(t[0,0]) + (D the) + >>> t[0,0].height() + 2 + + :return: The height of this tree. The height of a tree + containing no children is 1; the height of a tree + containing only leaves is 2; and the height of any other + tree is one plus the maximum of its children's + heights. + :rtype: int + """ + max_child_height = 0 + for child in self: + if isinstance(child, Tree): + max_child_height = max(max_child_height, child.height()) + else: + max_child_height = max(max_child_height, 1) + return 1 + max_child_height + + def treepositions(self, order="preorder"): + """ + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> t.treepositions() # doctest: +ELLIPSIS + [(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0), (1, 0, 0), ...] + >>> for pos in t.treepositions('leaves'): + ... t[pos] = t[pos][::-1].upper() + >>> print(t) + (S (NP (D EHT) (N GOD)) (VP (V DESAHC) (NP (D EHT) (N TAC)))) + + :param order: One of: ``preorder``, ``postorder``, ``bothorder``, + ``leaves``. + """ + positions = [] + if order in ("preorder", "bothorder"): + positions.append(()) + for i, child in enumerate(self): + if isinstance(child, Tree): + childpos = child.treepositions(order) + positions.extend((i,) + p for p in childpos) + else: + positions.append((i,)) + if order in ("postorder", "bothorder"): + positions.append(()) + return positions + + def subtrees(self, filter=None): + """ + Generate all the subtrees of this tree, optionally restricted + to trees matching the filter function. + + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> for s in t.subtrees(lambda t: t.height() == 2): + ... print(s) + (D the) + (N dog) + (V chased) + (D the) + (N cat) + + :type filter: function + :param filter: the function to filter all local trees + """ + if not filter or filter(self): + yield self + for child in self: + if isinstance(child, Tree): + yield from child.subtrees(filter) + + def productions(self): + """ + Generate the productions that correspond to the non-terminal nodes of the tree. + For each subtree of the form (P: C1 C2 ... Cn) this produces a production of the + form P -> C1 C2 ... Cn. + + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> t.productions() # doctest: +NORMALIZE_WHITESPACE + [S -> NP VP, NP -> D N, D -> 'the', N -> 'dog', VP -> V NP, V -> 'chased', + NP -> D N, D -> 'the', N -> 'cat'] + + :rtype: list(Production) + """ + + if not isinstance(self._label, str): + raise TypeError( + "Productions can only be generated from trees having node labels that are strings" + ) + + prods = [Production(Nonterminal(self._label), _child_names(self))] + for child in self: + if isinstance(child, Tree): + prods += child.productions() + return prods + + def pos(self): + """ + Return a sequence of pos-tagged words extracted from the tree. + + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> t.pos() + [('the', 'D'), ('dog', 'N'), ('chased', 'V'), ('the', 'D'), ('cat', 'N')] + + :return: a list of tuples containing leaves and pre-terminals (part-of-speech tags). + The order reflects the order of the leaves in the tree's hierarchical structure. + :rtype: list(tuple) + """ + pos = [] + for child in self: + if isinstance(child, Tree): + pos.extend(child.pos()) + else: + pos.append((child, self._label)) + return pos + + def leaf_treeposition(self, index): + """ + :return: The tree position of the ``index``-th leaf in this + tree. I.e., if ``tp=self.leaf_treeposition(i)``, then + ``self[tp]==self.leaves()[i]``. + + :raise IndexError: If this tree contains fewer than ``index+1`` + leaves, or if ``index<0``. + """ + if index < 0: + raise IndexError("index must be non-negative") + + stack = [(self, ())] + while stack: + value, treepos = stack.pop() + if not isinstance(value, Tree): + if index == 0: + return treepos + else: + index -= 1 + else: + for i in range(len(value) - 1, -1, -1): + stack.append((value[i], treepos + (i,))) + + raise IndexError("index must be less than or equal to len(self)") + + def treeposition_spanning_leaves(self, start, end): + """ + :return: The tree position of the lowest descendant of this + tree that dominates ``self.leaves()[start:end]``. + :raise ValueError: if ``end <= start`` + """ + if end <= start: + raise ValueError("end must be greater than start") + # Find the tree positions of the start & end leaves, and + # take the longest common subsequence. + start_treepos = self.leaf_treeposition(start) + end_treepos = self.leaf_treeposition(end - 1) + # Find the first index where they mismatch: + for i in range(len(start_treepos)): + if i == len(end_treepos) or start_treepos[i] != end_treepos[i]: + return start_treepos[:i] + return start_treepos + + # //////////////////////////////////////////////////////////// + # Transforms + # //////////////////////////////////////////////////////////// + + def chomsky_normal_form( + self, + factor="right", + horzMarkov=None, + vertMarkov=0, + childChar="|", + parentChar="^", + ): + """ + This method can modify a tree in three ways: + + 1. Convert a tree into its Chomsky Normal Form (CNF) + equivalent -- Every subtree has either two non-terminals + or one terminal as its children. This process requires + the creation of more"artificial" non-terminal nodes. + 2. Markov (vertical) smoothing of children in new artificial + nodes + 3. Horizontal (parent) annotation of nodes + + :param factor: Right or left factoring method (default = "right") + :type factor: str = [left|right] + :param horzMarkov: Markov order for sibling smoothing in artificial nodes (None (default) = include all siblings) + :type horzMarkov: int | None + :param vertMarkov: Markov order for parent smoothing (0 (default) = no vertical annotation) + :type vertMarkov: int | None + :param childChar: A string used in construction of the artificial nodes, separating the head of the + original subtree from the child nodes that have yet to be expanded (default = "|") + :type childChar: str + :param parentChar: A string used to separate the node representation from its vertical annotation + :type parentChar: str + """ + from nltk.tree.transforms import chomsky_normal_form + + chomsky_normal_form(self, factor, horzMarkov, vertMarkov, childChar, parentChar) + + def un_chomsky_normal_form( + self, expandUnary=True, childChar="|", parentChar="^", unaryChar="+" + ): + """ + This method modifies the tree in three ways: + + 1. Transforms a tree in Chomsky Normal Form back to its + original structure (branching greater than two) + 2. Removes any parent annotation (if it exists) + 3. (optional) expands unary subtrees (if previously + collapsed with collapseUnary(...) ) + + :param expandUnary: Flag to expand unary or not (default = True) + :type expandUnary: bool + :param childChar: A string separating the head node from its children in an artificial node (default = "|") + :type childChar: str + :param parentChar: A string separating the node label from its parent annotation (default = "^") + :type parentChar: str + :param unaryChar: A string joining two non-terminals in a unary production (default = "+") + :type unaryChar: str + """ + from nltk.tree.transforms import un_chomsky_normal_form + + un_chomsky_normal_form(self, expandUnary, childChar, parentChar, unaryChar) + + def collapse_unary(self, collapsePOS=False, collapseRoot=False, joinChar="+"): + """ + Collapse subtrees with a single child (ie. unary productions) + into a new non-terminal (Tree node) joined by 'joinChar'. + This is useful when working with algorithms that do not allow + unary productions, and completely removing the unary productions + would require loss of useful information. The Tree is modified + directly (since it is passed by reference) and no value is returned. + + :param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie. + Part-of-Speech tags) since they are always unary productions + :type collapsePOS: bool + :param collapseRoot: 'False' (default) will not modify the root production + if it is unary. For the Penn WSJ treebank corpus, this corresponds + to the TOP -> productions. + :type collapseRoot: bool + :param joinChar: A string used to connect collapsed node values (default = "+") + :type joinChar: str + """ + from nltk.tree.transforms import collapse_unary + + collapse_unary(self, collapsePOS, collapseRoot, joinChar) + + # //////////////////////////////////////////////////////////// + # Convert, copy + # //////////////////////////////////////////////////////////// + + @classmethod + def convert(cls, tree): + """ + Convert a tree between different subtypes of Tree. ``cls`` determines + which class will be used to encode the new tree. + + :type tree: Tree + :param tree: The tree that should be converted. + :return: The new Tree. + """ + if isinstance(tree, Tree): + children = [cls.convert(child) for child in tree] + return cls(tree._label, children) + else: + return tree + + def __copy__(self): + return self.copy() + + def __deepcopy__(self, memo): + return self.copy(deep=True) + + def copy(self, deep=False): + if not deep: + return type(self)(self._label, self) + else: + return type(self).convert(self) + + def _frozen_class(self): + from nltk.tree.immutable import ImmutableTree + + return ImmutableTree + + def freeze(self, leaf_freezer=None): + frozen_class = self._frozen_class() + if leaf_freezer is None: + newcopy = frozen_class.convert(self) + else: + newcopy = self.copy(deep=True) + for pos in newcopy.treepositions("leaves"): + newcopy[pos] = leaf_freezer(newcopy[pos]) + newcopy = frozen_class.convert(newcopy) + hash(newcopy) # Make sure the leaves are hashable. + return newcopy + + # //////////////////////////////////////////////////////////// + # Parsing + # //////////////////////////////////////////////////////////// + + @classmethod + def fromstring( + cls, + s, + brackets="()", + read_node=None, + read_leaf=None, + node_pattern=None, + leaf_pattern=None, + remove_empty_top_bracketing=False, + ): + """ + Read a bracketed tree string and return the resulting tree. + Trees are represented as nested brackettings, such as:: + + (S (NP (NNP John)) (VP (V runs))) + + :type s: str + :param s: The string to read + + :type brackets: str (length=2) + :param brackets: The bracket characters used to mark the + beginning and end of trees and subtrees. + + :type read_node: function + :type read_leaf: function + :param read_node, read_leaf: If specified, these functions + are applied to the substrings of ``s`` corresponding to + nodes and leaves (respectively) to obtain the values for + those nodes and leaves. They should have the following + signature: + + read_node(str) -> value + + For example, these functions could be used to process nodes + and leaves whose values should be some type other than + string (such as ``FeatStruct``). + Note that by default, node strings and leaf strings are + delimited by whitespace and brackets; to override this + default, use the ``node_pattern`` and ``leaf_pattern`` + arguments. + + :type node_pattern: str + :type leaf_pattern: str + :param node_pattern, leaf_pattern: Regular expression patterns + used to find node and leaf substrings in ``s``. By + default, both nodes patterns are defined to match any + sequence of non-whitespace non-bracket characters. + + :type remove_empty_top_bracketing: bool + :param remove_empty_top_bracketing: If the resulting tree has + an empty node label, and is length one, then return its + single child instead. This is useful for treebank trees, + which sometimes contain an extra level of bracketing. + + :return: A tree corresponding to the string representation ``s``. + If this class method is called using a subclass of Tree, + then it will return a tree of that type. + :rtype: Tree + """ + if not isinstance(brackets, str) or len(brackets) != 2: + raise TypeError("brackets must be a length-2 string") + if re.search(r"\s", brackets): + raise TypeError("whitespace brackets not allowed") + # Construct a regexp that will tokenize the string. + open_b, close_b = brackets + open_pattern, close_pattern = (re.escape(open_b), re.escape(close_b)) + if node_pattern is None: + node_pattern = rf"[^\s{open_pattern}{close_pattern}]+" + if leaf_pattern is None: + leaf_pattern = rf"[^\s{open_pattern}{close_pattern}]+" + token_re = re.compile( + r"%s\s*(%s)?|%s|(%s)" + % (open_pattern, node_pattern, close_pattern, leaf_pattern) + ) + # Walk through each token, updating a stack of trees. + stack = [(None, [])] # list of (node, children) tuples + for match in token_re.finditer(s): + token = match.group() + # Beginning of a tree/subtree + if token[0] == open_b: + if len(stack) == 1 and len(stack[0][1]) > 0: + cls._parse_error(s, match, "end-of-string") + label = token[1:].lstrip() + if read_node is not None: + label = read_node(label) + stack.append((label, [])) + # End of a tree/subtree + elif token == close_b: + if len(stack) == 1: + if len(stack[0][1]) == 0: + cls._parse_error(s, match, open_b) + else: + cls._parse_error(s, match, "end-of-string") + label, children = stack.pop() + stack[-1][1].append(cls(label, children)) + # Leaf node + else: + if len(stack) == 1: + cls._parse_error(s, match, open_b) + if read_leaf is not None: + token = read_leaf(token) + stack[-1][1].append(token) + + # check that we got exactly one complete tree. + if len(stack) > 1: + cls._parse_error(s, "end-of-string", close_b) + elif len(stack[0][1]) == 0: + cls._parse_error(s, "end-of-string", open_b) + else: + assert stack[0][0] is None + assert len(stack[0][1]) == 1 + tree = stack[0][1][0] + + # If the tree has an extra level with node='', then get rid of + # it. E.g.: "((S (NP ...) (VP ...)))" + if remove_empty_top_bracketing and tree._label == "" and len(tree) == 1: + tree = tree[0] + # return the tree. + return tree + + @classmethod + def _parse_error(cls, s, match, expecting): + """ + Display a friendly error message when parsing a tree string fails. + :param s: The string we're parsing. + :param match: regexp match of the problem token. + :param expecting: what we expected to see instead. + """ + # Construct a basic error message + if match == "end-of-string": + pos, token = len(s), "end-of-string" + else: + pos, token = match.start(), match.group() + msg = "%s.read(): expected %r but got %r\n%sat index %d." % ( + cls.__name__, + expecting, + token, + " " * 12, + pos, + ) + # Add a display showing the error token itsels: + s = s.replace("\n", " ").replace("\t", " ") + offset = pos + if len(s) > pos + 10: + s = s[: pos + 10] + "..." + if pos > 10: + s = "..." + s[pos - 10 :] + offset = 13 + msg += '\n{}"{}"\n{}^'.format(" " * 16, s, " " * (17 + offset)) + raise ValueError(msg) + + @classmethod + def fromlist(cls, l): + """ + :type l: list + :param l: a tree represented as nested lists + + :return: A tree corresponding to the list representation ``l``. + :rtype: Tree + + Convert nested lists to a NLTK Tree + """ + if type(l) == list and len(l) > 0: + label = repr(l[0]) + if len(l) > 1: + return Tree(label, [cls.fromlist(child) for child in l[1:]]) + else: + return label + + # //////////////////////////////////////////////////////////// + # Visualization & String Representation + # //////////////////////////////////////////////////////////// + + def draw(self): + """ + Open a new window containing a graphical diagram of this tree. + """ + from nltk.draw.tree import draw_trees + + draw_trees(self) + + def pretty_print(self, sentence=None, highlight=(), stream=None, **kwargs): + """ + Pretty-print this tree as ASCII or Unicode art. + For explanation of the arguments, see the documentation for + `nltk.tree.prettyprinter.TreePrettyPrinter`. + """ + from nltk.tree.prettyprinter import TreePrettyPrinter + + print(TreePrettyPrinter(self, sentence, highlight).text(**kwargs), file=stream) + + def __repr__(self): + childstr = ", ".join(repr(c) for c in self) + return "{}({}, [{}])".format( + type(self).__name__, + repr(self._label), + childstr, + ) + + def _repr_svg_(self): + from svgling import draw_tree + + return draw_tree(self)._repr_svg_() + + def __str__(self): + return self.pformat() + + def pprint(self, **kwargs): + """ + Print a string representation of this Tree to 'stream' + """ + + if "stream" in kwargs: + stream = kwargs["stream"] + del kwargs["stream"] + else: + stream = None + print(self.pformat(**kwargs), file=stream) + + def pformat(self, margin=70, indent=0, nodesep="", parens="()", quotes=False): + """ + :return: A pretty-printed string representation of this tree. + :rtype: str + :param margin: The right margin at which to do line-wrapping. + :type margin: int + :param indent: The indentation level at which printing + begins. This number is used to decide how far to indent + subsequent lines. + :type indent: int + :param nodesep: A string that is used to separate the node + from the children. E.g., the default value ``':'`` gives + trees like ``(S: (NP: I) (VP: (V: saw) (NP: it)))``. + """ + + # Try writing it on one line. + s = self._pformat_flat(nodesep, parens, quotes) + if len(s) + indent < margin: + return s + + # If it doesn't fit on one line, then write it on multi-lines. + if isinstance(self._label, str): + s = f"{parens[0]}{self._label}{nodesep}" + else: + s = f"{parens[0]}{repr(self._label)}{nodesep}" + for child in self: + if isinstance(child, Tree): + s += ( + "\n" + + " " * (indent + 2) + + child.pformat(margin, indent + 2, nodesep, parens, quotes) + ) + elif isinstance(child, tuple): + s += "\n" + " " * (indent + 2) + "/".join(child) + elif isinstance(child, str) and not quotes: + s += "\n" + " " * (indent + 2) + "%s" % child + else: + s += "\n" + " " * (indent + 2) + repr(child) + return s + parens[1] + + def pformat_latex_qtree(self): + r""" + Returns a representation of the tree compatible with the + LaTeX qtree package. This consists of the string ``\Tree`` + followed by the tree represented in bracketed notation. + + For example, the following result was generated from a parse tree of + the sentence ``The announcement astounded us``:: + + \Tree [.I'' [.N'' [.D The ] [.N' [.N announcement ] ] ] + [.I' [.V'' [.V' [.V astounded ] [.N'' [.N' [.N us ] ] ] ] ] ] ] + + See https://www.ling.upenn.edu/advice/latex.html for the LaTeX + style file for the qtree package. + + :return: A latex qtree representation of this tree. + :rtype: str + """ + reserved_chars = re.compile(r"([#\$%&~_\{\}])") + + pformat = self.pformat(indent=6, nodesep="", parens=("[.", " ]")) + return r"\Tree " + re.sub(reserved_chars, r"\\\1", pformat) + + def _pformat_flat(self, nodesep, parens, quotes): + childstrs = [] + for child in self: + if isinstance(child, Tree): + childstrs.append(child._pformat_flat(nodesep, parens, quotes)) + elif isinstance(child, tuple): + childstrs.append("/".join(child)) + elif isinstance(child, str) and not quotes: + childstrs.append("%s" % child) + else: + childstrs.append(repr(child)) + if isinstance(self._label, str): + return "{}{}{} {}{}".format( + parens[0], + self._label, + nodesep, + " ".join(childstrs), + parens[1], + ) + else: + return "{}{}{} {}{}".format( + parens[0], + repr(self._label), + nodesep, + " ".join(childstrs), + parens[1], + ) + + +def _child_names(tree): + names = [] + for child in tree: + if isinstance(child, Tree): + names.append(Nonterminal(child._label)) + else: + names.append(child) + return names + + +###################################################################### +## Demonstration +###################################################################### + + +def demo(): + """ + A demonstration showing how Trees and Trees can be + used. This demonstration creates a Tree, and loads a + Tree from the Treebank corpus, + and shows the results of calling several of their methods. + """ + + from nltk import ProbabilisticTree, Tree + + # Demonstrate tree parsing. + s = "(S (NP (DT the) (NN cat)) (VP (VBD ate) (NP (DT a) (NN cookie))))" + t = Tree.fromstring(s) + print("Convert bracketed string into tree:") + print(t) + print(t.__repr__()) + + print("Display tree properties:") + print(t.label()) # tree's constituent type + print(t[0]) # tree's first child + print(t[1]) # tree's second child + print(t.height()) + print(t.leaves()) + print(t[1]) + print(t[1, 1]) + print(t[1, 1, 0]) + + # Demonstrate tree modification. + the_cat = t[0] + the_cat.insert(1, Tree.fromstring("(JJ big)")) + print("Tree modification:") + print(t) + t[1, 1, 1] = Tree.fromstring("(NN cake)") + print(t) + print() + + # Tree transforms + print("Collapse unary:") + t.collapse_unary() + print(t) + print("Chomsky normal form:") + t.chomsky_normal_form() + print(t) + print() + + # Demonstrate probabilistic trees. + pt = ProbabilisticTree("x", ["y", "z"], prob=0.5) + print("Probabilistic Tree:") + print(pt) + print() + + # Demonstrate parsing of treebank output format. + t = Tree.fromstring(t.pformat()) + print("Convert tree to bracketed string and back again:") + print(t) + print() + + # Demonstrate LaTeX output + print("LaTeX output:") + print(t.pformat_latex_qtree()) + print() + + # Demonstrate Productions + print("Production output:") + print(t.productions()) + print() + + # Demonstrate tree nodes containing objects other than strings + t.set_label(("test", 3)) + print(t) + + +__all__ = [ + "Tree", +]