diff --git a/llmeval-env/lib/python3.10/site-packages/click/__init__.py b/llmeval-env/lib/python3.10/site-packages/click/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9a1dab048917edc420af440c73bd1d689de6b3fa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/click/__init__.py @@ -0,0 +1,73 @@ +""" +Click is a simple Python module inspired by the stdlib optparse to make +writing command line scripts fun. Unlike other modules, it's based +around a simple API that does not come with too much magic and is +composable. +""" +from .core import Argument as Argument +from .core import BaseCommand as BaseCommand +from .core import Command as Command +from .core import CommandCollection as CommandCollection +from .core import Context as Context +from .core import Group as Group +from .core import MultiCommand as MultiCommand +from .core import Option as Option +from .core import Parameter as Parameter +from .decorators import argument as argument +from .decorators import command as command +from .decorators import confirmation_option as confirmation_option +from .decorators import group as group +from .decorators import help_option as help_option +from .decorators import make_pass_decorator as make_pass_decorator +from .decorators import option as option +from .decorators import pass_context as pass_context +from .decorators import pass_obj as pass_obj +from .decorators import password_option as password_option +from .decorators import version_option as version_option +from .exceptions import Abort as Abort +from .exceptions import BadArgumentUsage as BadArgumentUsage +from .exceptions import BadOptionUsage as BadOptionUsage +from .exceptions import BadParameter as BadParameter +from .exceptions import ClickException as ClickException +from .exceptions import FileError as FileError +from .exceptions import MissingParameter as MissingParameter +from .exceptions import NoSuchOption as NoSuchOption +from .exceptions import UsageError as UsageError +from .formatting import HelpFormatter as HelpFormatter +from .formatting import wrap_text as wrap_text +from .globals import get_current_context as get_current_context +from .parser import OptionParser as OptionParser +from .termui import clear as clear +from .termui import confirm as confirm +from .termui import echo_via_pager as echo_via_pager +from .termui import edit as edit +from .termui import getchar as getchar +from .termui import launch as launch +from .termui import pause as pause +from .termui import progressbar as progressbar +from .termui import prompt as prompt +from .termui import secho as secho +from .termui import style as style +from .termui import unstyle as unstyle +from .types import BOOL as BOOL +from .types import Choice as Choice +from .types import DateTime as DateTime +from .types import File as File +from .types import FLOAT as FLOAT +from .types import FloatRange as FloatRange +from .types import INT as INT +from .types import IntRange as IntRange +from .types import ParamType as ParamType +from .types import Path as Path +from .types import STRING as STRING +from .types import Tuple as Tuple +from .types import UNPROCESSED as UNPROCESSED +from .types import UUID as UUID +from .utils import echo as echo +from .utils import format_filename as format_filename +from .utils import get_app_dir as get_app_dir +from .utils import get_binary_stream as get_binary_stream +from .utils import get_text_stream as get_text_stream +from .utils import open_file as open_file + +__version__ = "8.1.7" diff --git a/llmeval-env/lib/python3.10/site-packages/click/_textwrap.py b/llmeval-env/lib/python3.10/site-packages/click/_textwrap.py new file mode 100644 index 0000000000000000000000000000000000000000..b47dcbd4264e86715adfae1c5124c288b67a983e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/click/_textwrap.py @@ -0,0 +1,49 @@ +import textwrap +import typing as t +from contextlib import contextmanager + + +class TextWrapper(textwrap.TextWrapper): + def _handle_long_word( + self, + reversed_chunks: t.List[str], + cur_line: t.List[str], + cur_len: int, + width: int, + ) -> None: + space_left = max(width - cur_len, 1) + + if self.break_long_words: + last = reversed_chunks[-1] + cut = last[:space_left] + res = last[space_left:] + cur_line.append(cut) + reversed_chunks[-1] = res + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + @contextmanager + def extra_indent(self, indent: str) -> t.Iterator[None]: + old_initial_indent = self.initial_indent + old_subsequent_indent = self.subsequent_indent + self.initial_indent += indent + self.subsequent_indent += indent + + try: + yield + finally: + self.initial_indent = old_initial_indent + self.subsequent_indent = old_subsequent_indent + + def indent_only(self, text: str) -> str: + rv = [] + + for idx, line in enumerate(text.splitlines()): + indent = self.initial_indent + + if idx > 0: + indent = self.subsequent_indent + + rv.append(f"{indent}{line}") + + return "\n".join(rv) diff --git a/llmeval-env/lib/python3.10/site-packages/click/core.py b/llmeval-env/lib/python3.10/site-packages/click/core.py new file mode 100644 index 0000000000000000000000000000000000000000..cc65e896bf2d754d74b54a84ac501b80127f83ca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/click/core.py @@ -0,0 +1,3042 @@ +import enum +import errno +import inspect +import os +import sys +import typing as t +from collections import abc +from contextlib import contextmanager +from contextlib import ExitStack +from functools import update_wrapper +from gettext import gettext as _ +from gettext import ngettext +from itertools import repeat +from types import TracebackType + +from . import types +from .exceptions import Abort +from .exceptions import BadParameter +from .exceptions import ClickException +from .exceptions import Exit +from .exceptions import MissingParameter +from .exceptions import UsageError +from .formatting import HelpFormatter +from .formatting import join_options +from .globals import pop_context +from .globals import push_context +from .parser import _flag_needs_value +from .parser import OptionParser +from .parser import split_opt +from .termui import confirm +from .termui import prompt +from .termui import style +from .utils import _detect_program_name +from .utils import _expand_args +from .utils import echo +from .utils import make_default_short_help +from .utils import make_str +from .utils import PacifyFlushWrapper + +if t.TYPE_CHECKING: + import typing_extensions as te + from .shell_completion import CompletionItem + +F = t.TypeVar("F", bound=t.Callable[..., t.Any]) +V = t.TypeVar("V") + + +def _complete_visible_commands( + ctx: "Context", incomplete: str +) -> t.Iterator[t.Tuple[str, "Command"]]: + """List all the subcommands of a group that start with the + incomplete value and aren't hidden. + + :param ctx: Invocation context for the group. + :param incomplete: Value being completed. May be empty. + """ + multi = t.cast(MultiCommand, ctx.command) + + for name in multi.list_commands(ctx): + if name.startswith(incomplete): + command = multi.get_command(ctx, name) + + if command is not None and not command.hidden: + yield name, command + + +def _check_multicommand( + base_command: "MultiCommand", cmd_name: str, cmd: "Command", register: bool = False +) -> None: + if not base_command.chain or not isinstance(cmd, MultiCommand): + return + if register: + hint = ( + "It is not possible to add multi commands as children to" + " another multi command that is in chain mode." + ) + else: + hint = ( + "Found a multi command as subcommand to a multi command" + " that is in chain mode. This is not supported." + ) + raise RuntimeError( + f"{hint}. Command {base_command.name!r} is set to chain and" + f" {cmd_name!r} was added as a subcommand but it in itself is a" + f" multi command. ({cmd_name!r} is a {type(cmd).__name__}" + f" within a chained {type(base_command).__name__} named" + f" {base_command.name!r})." + ) + + +def batch(iterable: t.Iterable[V], batch_size: int) -> t.List[t.Tuple[V, ...]]: + return list(zip(*repeat(iter(iterable), batch_size))) + + +@contextmanager +def augment_usage_errors( + ctx: "Context", param: t.Optional["Parameter"] = None +) -> t.Iterator[None]: + """Context manager that attaches extra information to exceptions.""" + try: + yield + except BadParameter as e: + if e.ctx is None: + e.ctx = ctx + if param is not None and e.param is None: + e.param = param + raise + except UsageError as e: + if e.ctx is None: + e.ctx = ctx + raise + + +def iter_params_for_processing( + invocation_order: t.Sequence["Parameter"], + declaration_order: t.Sequence["Parameter"], +) -> t.List["Parameter"]: + """Given a sequence of parameters in the order as should be considered + for processing and an iterable of parameters that exist, this returns + a list in the correct order as they should be processed. + """ + + def sort_key(item: "Parameter") -> t.Tuple[bool, float]: + try: + idx: float = invocation_order.index(item) + except ValueError: + idx = float("inf") + + return not item.is_eager, idx + + return sorted(declaration_order, key=sort_key) + + +class ParameterSource(enum.Enum): + """This is an :class:`~enum.Enum` that indicates the source of a + parameter's value. + + Use :meth:`click.Context.get_parameter_source` to get the + source for a parameter by name. + + .. versionchanged:: 8.0 + Use :class:`~enum.Enum` and drop the ``validate`` method. + + .. versionchanged:: 8.0 + Added the ``PROMPT`` value. + """ + + COMMANDLINE = enum.auto() + """The value was provided by the command line args.""" + ENVIRONMENT = enum.auto() + """The value was provided with an environment variable.""" + DEFAULT = enum.auto() + """Used the default specified by the parameter.""" + DEFAULT_MAP = enum.auto() + """Used a default provided by :attr:`Context.default_map`.""" + PROMPT = enum.auto() + """Used a prompt to confirm a default or provide a value.""" + + +class Context: + """The context is a special internal object that holds state relevant + for the script execution at every single level. It's normally invisible + to commands unless they opt-in to getting access to it. + + The context is useful as it can pass internal objects around and can + control special execution features such as reading data from + environment variables. + + A context can be used as context manager in which case it will call + :meth:`close` on teardown. + + :param command: the command class for this context. + :param parent: the parent context. + :param info_name: the info name for this invocation. Generally this + is the most descriptive name for the script or + command. For the toplevel script it is usually + the name of the script, for commands below it it's + the name of the script. + :param obj: an arbitrary object of user data. + :param auto_envvar_prefix: the prefix to use for automatic environment + variables. If this is `None` then reading + from environment variables is disabled. This + does not affect manually set environment + variables which are always read. + :param default_map: a dictionary (like object) with default values + for parameters. + :param terminal_width: the width of the terminal. The default is + inherit from parent context. If no context + defines the terminal width then auto + detection will be applied. + :param max_content_width: the maximum width for content rendered by + Click (this currently only affects help + pages). This defaults to 80 characters if + not overridden. In other words: even if the + terminal is larger than that, Click will not + format things wider than 80 characters by + default. In addition to that, formatters might + add some safety mapping on the right. + :param resilient_parsing: if this flag is enabled then Click will + parse without any interactivity or callback + invocation. Default values will also be + ignored. This is useful for implementing + things such as completion support. + :param allow_extra_args: if this is set to `True` then extra arguments + at the end will not raise an error and will be + kept on the context. The default is to inherit + from the command. + :param allow_interspersed_args: if this is set to `False` then options + and arguments cannot be mixed. The + default is to inherit from the command. + :param ignore_unknown_options: instructs click to ignore options it does + not know and keeps them for later + processing. + :param help_option_names: optionally a list of strings that define how + the default help parameter is named. The + default is ``['--help']``. + :param token_normalize_func: an optional function that is used to + normalize tokens (options, choices, + etc.). This for instance can be used to + implement case insensitive behavior. + :param color: controls if the terminal supports ANSI colors or not. The + default is autodetection. This is only needed if ANSI + codes are used in texts that Click prints which is by + default not the case. This for instance would affect + help output. + :param show_default: Show the default value for commands. If this + value is not set, it defaults to the value from the parent + context. ``Command.show_default`` overrides this default for the + specific command. + + .. versionchanged:: 8.1 + The ``show_default`` parameter is overridden by + ``Command.show_default``, instead of the other way around. + + .. versionchanged:: 8.0 + The ``show_default`` parameter defaults to the value from the + parent context. + + .. versionchanged:: 7.1 + Added the ``show_default`` parameter. + + .. versionchanged:: 4.0 + Added the ``color``, ``ignore_unknown_options``, and + ``max_content_width`` parameters. + + .. versionchanged:: 3.0 + Added the ``allow_extra_args`` and ``allow_interspersed_args`` + parameters. + + .. versionchanged:: 2.0 + Added the ``resilient_parsing``, ``help_option_names``, and + ``token_normalize_func`` parameters. + """ + + #: The formatter class to create with :meth:`make_formatter`. + #: + #: .. versionadded:: 8.0 + formatter_class: t.Type["HelpFormatter"] = HelpFormatter + + def __init__( + self, + command: "Command", + parent: t.Optional["Context"] = None, + info_name: t.Optional[str] = None, + obj: t.Optional[t.Any] = None, + auto_envvar_prefix: t.Optional[str] = None, + default_map: t.Optional[t.MutableMapping[str, t.Any]] = None, + terminal_width: t.Optional[int] = None, + max_content_width: t.Optional[int] = None, + resilient_parsing: bool = False, + allow_extra_args: t.Optional[bool] = None, + allow_interspersed_args: t.Optional[bool] = None, + ignore_unknown_options: t.Optional[bool] = None, + help_option_names: t.Optional[t.List[str]] = None, + token_normalize_func: t.Optional[t.Callable[[str], str]] = None, + color: t.Optional[bool] = None, + show_default: t.Optional[bool] = None, + ) -> None: + #: the parent context or `None` if none exists. + self.parent = parent + #: the :class:`Command` for this context. + self.command = command + #: the descriptive information name + self.info_name = info_name + #: Map of parameter names to their parsed values. Parameters + #: with ``expose_value=False`` are not stored. + self.params: t.Dict[str, t.Any] = {} + #: the leftover arguments. + self.args: t.List[str] = [] + #: protected arguments. These are arguments that are prepended + #: to `args` when certain parsing scenarios are encountered but + #: must be never propagated to another arguments. This is used + #: to implement nested parsing. + self.protected_args: t.List[str] = [] + #: the collected prefixes of the command's options. + self._opt_prefixes: t.Set[str] = set(parent._opt_prefixes) if parent else set() + + if obj is None and parent is not None: + obj = parent.obj + + #: the user object stored. + self.obj: t.Any = obj + self._meta: t.Dict[str, t.Any] = getattr(parent, "meta", {}) + + #: A dictionary (-like object) with defaults for parameters. + if ( + default_map is None + and info_name is not None + and parent is not None + and parent.default_map is not None + ): + default_map = parent.default_map.get(info_name) + + self.default_map: t.Optional[t.MutableMapping[str, t.Any]] = default_map + + #: This flag indicates if a subcommand is going to be executed. A + #: group callback can use this information to figure out if it's + #: being executed directly or because the execution flow passes + #: onwards to a subcommand. By default it's None, but it can be + #: the name of the subcommand to execute. + #: + #: If chaining is enabled this will be set to ``'*'`` in case + #: any commands are executed. It is however not possible to + #: figure out which ones. If you require this knowledge you + #: should use a :func:`result_callback`. + self.invoked_subcommand: t.Optional[str] = None + + if terminal_width is None and parent is not None: + terminal_width = parent.terminal_width + + #: The width of the terminal (None is autodetection). + self.terminal_width: t.Optional[int] = terminal_width + + if max_content_width is None and parent is not None: + max_content_width = parent.max_content_width + + #: The maximum width of formatted content (None implies a sensible + #: default which is 80 for most things). + self.max_content_width: t.Optional[int] = max_content_width + + if allow_extra_args is None: + allow_extra_args = command.allow_extra_args + + #: Indicates if the context allows extra args or if it should + #: fail on parsing. + #: + #: .. versionadded:: 3.0 + self.allow_extra_args = allow_extra_args + + if allow_interspersed_args is None: + allow_interspersed_args = command.allow_interspersed_args + + #: Indicates if the context allows mixing of arguments and + #: options or not. + #: + #: .. versionadded:: 3.0 + self.allow_interspersed_args: bool = allow_interspersed_args + + if ignore_unknown_options is None: + ignore_unknown_options = command.ignore_unknown_options + + #: Instructs click to ignore options that a command does not + #: understand and will store it on the context for later + #: processing. This is primarily useful for situations where you + #: want to call into external programs. Generally this pattern is + #: strongly discouraged because it's not possibly to losslessly + #: forward all arguments. + #: + #: .. versionadded:: 4.0 + self.ignore_unknown_options: bool = ignore_unknown_options + + if help_option_names is None: + if parent is not None: + help_option_names = parent.help_option_names + else: + help_option_names = ["--help"] + + #: The names for the help options. + self.help_option_names: t.List[str] = help_option_names + + if token_normalize_func is None and parent is not None: + token_normalize_func = parent.token_normalize_func + + #: An optional normalization function for tokens. This is + #: options, choices, commands etc. + self.token_normalize_func: t.Optional[ + t.Callable[[str], str] + ] = token_normalize_func + + #: Indicates if resilient parsing is enabled. In that case Click + #: will do its best to not cause any failures and default values + #: will be ignored. Useful for completion. + self.resilient_parsing: bool = resilient_parsing + + # If there is no envvar prefix yet, but the parent has one and + # the command on this level has a name, we can expand the envvar + # prefix automatically. + if auto_envvar_prefix is None: + if ( + parent is not None + and parent.auto_envvar_prefix is not None + and self.info_name is not None + ): + auto_envvar_prefix = ( + f"{parent.auto_envvar_prefix}_{self.info_name.upper()}" + ) + else: + auto_envvar_prefix = auto_envvar_prefix.upper() + + if auto_envvar_prefix is not None: + auto_envvar_prefix = auto_envvar_prefix.replace("-", "_") + + self.auto_envvar_prefix: t.Optional[str] = auto_envvar_prefix + + if color is None and parent is not None: + color = parent.color + + #: Controls if styling output is wanted or not. + self.color: t.Optional[bool] = color + + if show_default is None and parent is not None: + show_default = parent.show_default + + #: Show option default values when formatting help text. + self.show_default: t.Optional[bool] = show_default + + self._close_callbacks: t.List[t.Callable[[], t.Any]] = [] + self._depth = 0 + self._parameter_source: t.Dict[str, ParameterSource] = {} + self._exit_stack = ExitStack() + + def to_info_dict(self) -> t.Dict[str, t.Any]: + """Gather information that could be useful for a tool generating + user-facing documentation. This traverses the entire CLI + structure. + + .. code-block:: python + + with Context(cli) as ctx: + info = ctx.to_info_dict() + + .. versionadded:: 8.0 + """ + return { + "command": self.command.to_info_dict(self), + "info_name": self.info_name, + "allow_extra_args": self.allow_extra_args, + "allow_interspersed_args": self.allow_interspersed_args, + "ignore_unknown_options": self.ignore_unknown_options, + "auto_envvar_prefix": self.auto_envvar_prefix, + } + + def __enter__(self) -> "Context": + self._depth += 1 + push_context(self) + return self + + def __exit__( + self, + exc_type: t.Optional[t.Type[BaseException]], + exc_value: t.Optional[BaseException], + tb: t.Optional[TracebackType], + ) -> None: + self._depth -= 1 + if self._depth == 0: + self.close() + pop_context() + + @contextmanager + def scope(self, cleanup: bool = True) -> t.Iterator["Context"]: + """This helper method can be used with the context object to promote + it to the current thread local (see :func:`get_current_context`). + The default behavior of this is to invoke the cleanup functions which + can be disabled by setting `cleanup` to `False`. The cleanup + functions are typically used for things such as closing file handles. + + If the cleanup is intended the context object can also be directly + used as a context manager. + + Example usage:: + + with ctx.scope(): + assert get_current_context() is ctx + + This is equivalent:: + + with ctx: + assert get_current_context() is ctx + + .. versionadded:: 5.0 + + :param cleanup: controls if the cleanup functions should be run or + not. The default is to run these functions. In + some situations the context only wants to be + temporarily pushed in which case this can be disabled. + Nested pushes automatically defer the cleanup. + """ + if not cleanup: + self._depth += 1 + try: + with self as rv: + yield rv + finally: + if not cleanup: + self._depth -= 1 + + @property + def meta(self) -> t.Dict[str, t.Any]: + """This is a dictionary which is shared with all the contexts + that are nested. It exists so that click utilities can store some + state here if they need to. It is however the responsibility of + that code to manage this dictionary well. + + The keys are supposed to be unique dotted strings. For instance + module paths are a good choice for it. What is stored in there is + irrelevant for the operation of click. However what is important is + that code that places data here adheres to the general semantics of + the system. + + Example usage:: + + LANG_KEY = f'{__name__}.lang' + + def set_language(value): + ctx = get_current_context() + ctx.meta[LANG_KEY] = value + + def get_language(): + return get_current_context().meta.get(LANG_KEY, 'en_US') + + .. versionadded:: 5.0 + """ + return self._meta + + def make_formatter(self) -> HelpFormatter: + """Creates the :class:`~click.HelpFormatter` for the help and + usage output. + + To quickly customize the formatter class used without overriding + this method, set the :attr:`formatter_class` attribute. + + .. versionchanged:: 8.0 + Added the :attr:`formatter_class` attribute. + """ + return self.formatter_class( + width=self.terminal_width, max_width=self.max_content_width + ) + + def with_resource(self, context_manager: t.ContextManager[V]) -> V: + """Register a resource as if it were used in a ``with`` + statement. The resource will be cleaned up when the context is + popped. + + Uses :meth:`contextlib.ExitStack.enter_context`. It calls the + resource's ``__enter__()`` method and returns the result. When + the context is popped, it closes the stack, which calls the + resource's ``__exit__()`` method. + + To register a cleanup function for something that isn't a + context manager, use :meth:`call_on_close`. Or use something + from :mod:`contextlib` to turn it into a context manager first. + + .. code-block:: python + + @click.group() + @click.option("--name") + @click.pass_context + def cli(ctx): + ctx.obj = ctx.with_resource(connect_db(name)) + + :param context_manager: The context manager to enter. + :return: Whatever ``context_manager.__enter__()`` returns. + + .. versionadded:: 8.0 + """ + return self._exit_stack.enter_context(context_manager) + + def call_on_close(self, f: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: + """Register a function to be called when the context tears down. + + This can be used to close resources opened during the script + execution. Resources that support Python's context manager + protocol which would be used in a ``with`` statement should be + registered with :meth:`with_resource` instead. + + :param f: The function to execute on teardown. + """ + return self._exit_stack.callback(f) + + def close(self) -> None: + """Invoke all close callbacks registered with + :meth:`call_on_close`, and exit all context managers entered + with :meth:`with_resource`. + """ + self._exit_stack.close() + # In case the context is reused, create a new exit stack. + self._exit_stack = ExitStack() + + @property + def command_path(self) -> str: + """The computed command path. This is used for the ``usage`` + information on the help page. It's automatically created by + combining the info names of the chain of contexts to the root. + """ + rv = "" + if self.info_name is not None: + rv = self.info_name + if self.parent is not None: + parent_command_path = [self.parent.command_path] + + if isinstance(self.parent.command, Command): + for param in self.parent.command.get_params(self): + parent_command_path.extend(param.get_usage_pieces(self)) + + rv = f"{' '.join(parent_command_path)} {rv}" + return rv.lstrip() + + def find_root(self) -> "Context": + """Finds the outermost context.""" + node = self + while node.parent is not None: + node = node.parent + return node + + def find_object(self, object_type: t.Type[V]) -> t.Optional[V]: + """Finds the closest object of a given type.""" + node: t.Optional["Context"] = self + + while node is not None: + if isinstance(node.obj, object_type): + return node.obj + + node = node.parent + + return None + + def ensure_object(self, object_type: t.Type[V]) -> V: + """Like :meth:`find_object` but sets the innermost object to a + new instance of `object_type` if it does not exist. + """ + rv = self.find_object(object_type) + if rv is None: + self.obj = rv = object_type() + return rv + + @t.overload + def lookup_default( + self, name: str, call: "te.Literal[True]" = True + ) -> t.Optional[t.Any]: + ... + + @t.overload + def lookup_default( + self, name: str, call: "te.Literal[False]" = ... + ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: + ... + + def lookup_default(self, name: str, call: bool = True) -> t.Optional[t.Any]: + """Get the default for a parameter from :attr:`default_map`. + + :param name: Name of the parameter. + :param call: If the default is a callable, call it. Disable to + return the callable instead. + + .. versionchanged:: 8.0 + Added the ``call`` parameter. + """ + if self.default_map is not None: + value = self.default_map.get(name) + + if call and callable(value): + return value() + + return value + + return None + + def fail(self, message: str) -> "te.NoReturn": + """Aborts the execution of the program with a specific error + message. + + :param message: the error message to fail with. + """ + raise UsageError(message, self) + + def abort(self) -> "te.NoReturn": + """Aborts the script.""" + raise Abort() + + def exit(self, code: int = 0) -> "te.NoReturn": + """Exits the application with a given exit code.""" + raise Exit(code) + + def get_usage(self) -> str: + """Helper method to get formatted usage string for the current + context and command. + """ + return self.command.get_usage(self) + + def get_help(self) -> str: + """Helper method to get formatted help page for the current + context and command. + """ + return self.command.get_help(self) + + def _make_sub_context(self, command: "Command") -> "Context": + """Create a new context of the same type as this context, but + for a new command. + + :meta private: + """ + return type(self)(command, info_name=command.name, parent=self) + + @t.overload + def invoke( + __self, # noqa: B902 + __callback: "t.Callable[..., V]", + *args: t.Any, + **kwargs: t.Any, + ) -> V: + ... + + @t.overload + def invoke( + __self, # noqa: B902 + __callback: "Command", + *args: t.Any, + **kwargs: t.Any, + ) -> t.Any: + ... + + def invoke( + __self, # noqa: B902 + __callback: t.Union["Command", "t.Callable[..., V]"], + *args: t.Any, + **kwargs: t.Any, + ) -> t.Union[t.Any, V]: + """Invokes a command callback in exactly the way it expects. There + are two ways to invoke this method: + + 1. the first argument can be a callback and all other arguments and + keyword arguments are forwarded directly to the function. + 2. the first argument is a click command object. In that case all + arguments are forwarded as well but proper click parameters + (options and click arguments) must be keyword arguments and Click + will fill in defaults. + + Note that before Click 3.2 keyword arguments were not properly filled + in against the intention of this code and no context was created. For + more information about this change and why it was done in a bugfix + release see :ref:`upgrade-to-3.2`. + + .. versionchanged:: 8.0 + All ``kwargs`` are tracked in :attr:`params` so they will be + passed if :meth:`forward` is called at multiple levels. + """ + if isinstance(__callback, Command): + other_cmd = __callback + + if other_cmd.callback is None: + raise TypeError( + "The given command does not have a callback that can be invoked." + ) + else: + __callback = t.cast("t.Callable[..., V]", other_cmd.callback) + + ctx = __self._make_sub_context(other_cmd) + + for param in other_cmd.params: + if param.name not in kwargs and param.expose_value: + kwargs[param.name] = param.type_cast_value( # type: ignore + ctx, param.get_default(ctx) + ) + + # Track all kwargs as params, so that forward() will pass + # them on in subsequent calls. + ctx.params.update(kwargs) + else: + ctx = __self + + with augment_usage_errors(__self): + with ctx: + return __callback(*args, **kwargs) + + def forward( + __self, __cmd: "Command", *args: t.Any, **kwargs: t.Any # noqa: B902 + ) -> t.Any: + """Similar to :meth:`invoke` but fills in default keyword + arguments from the current context if the other command expects + it. This cannot invoke callbacks directly, only other commands. + + .. versionchanged:: 8.0 + All ``kwargs`` are tracked in :attr:`params` so they will be + passed if ``forward`` is called at multiple levels. + """ + # Can only forward to other commands, not direct callbacks. + if not isinstance(__cmd, Command): + raise TypeError("Callback is not a command.") + + for param in __self.params: + if param not in kwargs: + kwargs[param] = __self.params[param] + + return __self.invoke(__cmd, *args, **kwargs) + + def set_parameter_source(self, name: str, source: ParameterSource) -> None: + """Set the source of a parameter. This indicates the location + from which the value of the parameter was obtained. + + :param name: The name of the parameter. + :param source: A member of :class:`~click.core.ParameterSource`. + """ + self._parameter_source[name] = source + + def get_parameter_source(self, name: str) -> t.Optional[ParameterSource]: + """Get the source of a parameter. This indicates the location + from which the value of the parameter was obtained. + + This can be useful for determining when a user specified a value + on the command line that is the same as the default value. It + will be :attr:`~click.core.ParameterSource.DEFAULT` only if the + value was actually taken from the default. + + :param name: The name of the parameter. + :rtype: ParameterSource + + .. versionchanged:: 8.0 + Returns ``None`` if the parameter was not provided from any + source. + """ + return self._parameter_source.get(name) + + +class BaseCommand: + """The base command implements the minimal API contract of commands. + Most code will never use this as it does not implement a lot of useful + functionality but it can act as the direct subclass of alternative + parsing methods that do not depend on the Click parser. + + For instance, this can be used to bridge Click and other systems like + argparse or docopt. + + Because base commands do not implement a lot of the API that other + parts of Click take for granted, they are not supported for all + operations. For instance, they cannot be used with the decorators + usually and they have no built-in callback system. + + .. versionchanged:: 2.0 + Added the `context_settings` parameter. + + :param name: the name of the command to use unless a group overrides it. + :param context_settings: an optional dictionary with defaults that are + passed to the context object. + """ + + #: The context class to create with :meth:`make_context`. + #: + #: .. versionadded:: 8.0 + context_class: t.Type[Context] = Context + #: the default for the :attr:`Context.allow_extra_args` flag. + allow_extra_args = False + #: the default for the :attr:`Context.allow_interspersed_args` flag. + allow_interspersed_args = True + #: the default for the :attr:`Context.ignore_unknown_options` flag. + ignore_unknown_options = False + + def __init__( + self, + name: t.Optional[str], + context_settings: t.Optional[t.MutableMapping[str, t.Any]] = None, + ) -> None: + #: the name the command thinks it has. Upon registering a command + #: on a :class:`Group` the group will default the command name + #: with this information. You should instead use the + #: :class:`Context`\'s :attr:`~Context.info_name` attribute. + self.name = name + + if context_settings is None: + context_settings = {} + + #: an optional dictionary with defaults passed to the context. + self.context_settings: t.MutableMapping[str, t.Any] = context_settings + + def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]: + """Gather information that could be useful for a tool generating + user-facing documentation. This traverses the entire structure + below this command. + + Use :meth:`click.Context.to_info_dict` to traverse the entire + CLI structure. + + :param ctx: A :class:`Context` representing this command. + + .. versionadded:: 8.0 + """ + return {"name": self.name} + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.name}>" + + def get_usage(self, ctx: Context) -> str: + raise NotImplementedError("Base commands cannot get usage") + + def get_help(self, ctx: Context) -> str: + raise NotImplementedError("Base commands cannot get help") + + def make_context( + self, + info_name: t.Optional[str], + args: t.List[str], + parent: t.Optional[Context] = None, + **extra: t.Any, + ) -> Context: + """This function when given an info name and arguments will kick + off the parsing and create a new :class:`Context`. It does not + invoke the actual command callback though. + + To quickly customize the context class used without overriding + this method, set the :attr:`context_class` attribute. + + :param info_name: the info name for this invocation. Generally this + is the most descriptive name for the script or + command. For the toplevel script it's usually + the name of the script, for commands below it's + the name of the command. + :param args: the arguments to parse as list of strings. + :param parent: the parent context if available. + :param extra: extra keyword arguments forwarded to the context + constructor. + + .. versionchanged:: 8.0 + Added the :attr:`context_class` attribute. + """ + for key, value in self.context_settings.items(): + if key not in extra: + extra[key] = value + + ctx = self.context_class( + self, info_name=info_name, parent=parent, **extra # type: ignore + ) + + with ctx.scope(cleanup=False): + self.parse_args(ctx, args) + return ctx + + def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]: + """Given a context and a list of arguments this creates the parser + and parses the arguments, then modifies the context as necessary. + This is automatically invoked by :meth:`make_context`. + """ + raise NotImplementedError("Base commands do not know how to parse arguments.") + + def invoke(self, ctx: Context) -> t.Any: + """Given a context, this invokes the command. The default + implementation is raising a not implemented error. + """ + raise NotImplementedError("Base commands are not invocable by default") + + def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: + """Return a list of completions for the incomplete value. Looks + at the names of chained multi-commands. + + Any command could be part of a chained multi-command, so sibling + commands are valid at any point during command completion. Other + command classes will return more completions. + + :param ctx: Invocation context for this command. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + results: t.List["CompletionItem"] = [] + + while ctx.parent is not None: + ctx = ctx.parent + + if isinstance(ctx.command, MultiCommand) and ctx.command.chain: + results.extend( + CompletionItem(name, help=command.get_short_help_str()) + for name, command in _complete_visible_commands(ctx, incomplete) + if name not in ctx.protected_args + ) + + return results + + @t.overload + def main( + self, + args: t.Optional[t.Sequence[str]] = None, + prog_name: t.Optional[str] = None, + complete_var: t.Optional[str] = None, + standalone_mode: "te.Literal[True]" = True, + **extra: t.Any, + ) -> "te.NoReturn": + ... + + @t.overload + def main( + self, + args: t.Optional[t.Sequence[str]] = None, + prog_name: t.Optional[str] = None, + complete_var: t.Optional[str] = None, + standalone_mode: bool = ..., + **extra: t.Any, + ) -> t.Any: + ... + + def main( + self, + args: t.Optional[t.Sequence[str]] = None, + prog_name: t.Optional[str] = None, + complete_var: t.Optional[str] = None, + standalone_mode: bool = True, + windows_expand_args: bool = True, + **extra: t.Any, + ) -> t.Any: + """This is the way to invoke a script with all the bells and + whistles as a command line application. This will always terminate + the application after a call. If this is not wanted, ``SystemExit`` + needs to be caught. + + This method is also available by directly calling the instance of + a :class:`Command`. + + :param args: the arguments that should be used for parsing. If not + provided, ``sys.argv[1:]`` is used. + :param prog_name: the program name that should be used. By default + the program name is constructed by taking the file + name from ``sys.argv[0]``. + :param complete_var: the environment variable that controls the + bash completion support. The default is + ``"__COMPLETE"`` with prog_name in + uppercase. + :param standalone_mode: the default behavior is to invoke the script + in standalone mode. Click will then + handle exceptions and convert them into + error messages and the function will never + return but shut down the interpreter. If + this is set to `False` they will be + propagated to the caller and the return + value of this function is the return value + of :meth:`invoke`. + :param windows_expand_args: Expand glob patterns, user dir, and + env vars in command line args on Windows. + :param extra: extra keyword arguments are forwarded to the context + constructor. See :class:`Context` for more information. + + .. versionchanged:: 8.0.1 + Added the ``windows_expand_args`` parameter to allow + disabling command line arg expansion on Windows. + + .. versionchanged:: 8.0 + When taking arguments from ``sys.argv`` on Windows, glob + patterns, user dir, and env vars are expanded. + + .. versionchanged:: 3.0 + Added the ``standalone_mode`` parameter. + """ + if args is None: + args = sys.argv[1:] + + if os.name == "nt" and windows_expand_args: + args = _expand_args(args) + else: + args = list(args) + + if prog_name is None: + prog_name = _detect_program_name() + + # Process shell completion requests and exit early. + self._main_shell_completion(extra, prog_name, complete_var) + + try: + try: + with self.make_context(prog_name, args, **extra) as ctx: + rv = self.invoke(ctx) + if not standalone_mode: + return rv + # it's not safe to `ctx.exit(rv)` here! + # note that `rv` may actually contain data like "1" which + # has obvious effects + # more subtle case: `rv=[None, None]` can come out of + # chained commands which all returned `None` -- so it's not + # even always obvious that `rv` indicates success/failure + # by its truthiness/falsiness + ctx.exit() + except (EOFError, KeyboardInterrupt) as e: + echo(file=sys.stderr) + raise Abort() from e + except ClickException as e: + if not standalone_mode: + raise + e.show() + sys.exit(e.exit_code) + except OSError as e: + if e.errno == errno.EPIPE: + sys.stdout = t.cast(t.TextIO, PacifyFlushWrapper(sys.stdout)) + sys.stderr = t.cast(t.TextIO, PacifyFlushWrapper(sys.stderr)) + sys.exit(1) + else: + raise + except Exit as e: + if standalone_mode: + sys.exit(e.exit_code) + else: + # in non-standalone mode, return the exit code + # note that this is only reached if `self.invoke` above raises + # an Exit explicitly -- thus bypassing the check there which + # would return its result + # the results of non-standalone execution may therefore be + # somewhat ambiguous: if there are codepaths which lead to + # `ctx.exit(1)` and to `return 1`, the caller won't be able to + # tell the difference between the two + return e.exit_code + except Abort: + if not standalone_mode: + raise + echo(_("Aborted!"), file=sys.stderr) + sys.exit(1) + + def _main_shell_completion( + self, + ctx_args: t.MutableMapping[str, t.Any], + prog_name: str, + complete_var: t.Optional[str] = None, + ) -> None: + """Check if the shell is asking for tab completion, process + that, then exit early. Called from :meth:`main` before the + program is invoked. + + :param prog_name: Name of the executable in the shell. + :param complete_var: Name of the environment variable that holds + the completion instruction. Defaults to + ``_{PROG_NAME}_COMPLETE``. + + .. versionchanged:: 8.2.0 + Dots (``.``) in ``prog_name`` are replaced with underscores (``_``). + """ + if complete_var is None: + complete_name = prog_name.replace("-", "_").replace(".", "_") + complete_var = f"_{complete_name}_COMPLETE".upper() + + instruction = os.environ.get(complete_var) + + if not instruction: + return + + from .shell_completion import shell_complete + + rv = shell_complete(self, ctx_args, prog_name, complete_var, instruction) + sys.exit(rv) + + def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Any: + """Alias for :meth:`main`.""" + return self.main(*args, **kwargs) + + +class Command(BaseCommand): + """Commands are the basic building block of command line interfaces in + Click. A basic command handles command line parsing and might dispatch + more parsing to commands nested below it. + + :param name: the name of the command to use unless a group overrides it. + :param context_settings: an optional dictionary with defaults that are + passed to the context object. + :param callback: the callback to invoke. This is optional. + :param params: the parameters to register with this command. This can + be either :class:`Option` or :class:`Argument` objects. + :param help: the help string to use for this command. + :param epilog: like the help string but it's printed at the end of the + help page after everything else. + :param short_help: the short help to use for this command. This is + shown on the command listing of the parent command. + :param add_help_option: by default each command registers a ``--help`` + option. This can be disabled by this parameter. + :param no_args_is_help: this controls what happens if no arguments are + provided. This option is disabled by default. + If enabled this will add ``--help`` as argument + if no arguments are passed + :param hidden: hide this command from help outputs. + + :param deprecated: issues a message indicating that + the command is deprecated. + + .. versionchanged:: 8.1 + ``help``, ``epilog``, and ``short_help`` are stored unprocessed, + all formatting is done when outputting help text, not at init, + and is done even if not using the ``@command`` decorator. + + .. versionchanged:: 8.0 + Added a ``repr`` showing the command name. + + .. versionchanged:: 7.1 + Added the ``no_args_is_help`` parameter. + + .. versionchanged:: 2.0 + Added the ``context_settings`` parameter. + """ + + def __init__( + self, + name: t.Optional[str], + context_settings: t.Optional[t.MutableMapping[str, t.Any]] = None, + callback: t.Optional[t.Callable[..., t.Any]] = None, + params: t.Optional[t.List["Parameter"]] = None, + help: t.Optional[str] = None, + epilog: t.Optional[str] = None, + short_help: t.Optional[str] = None, + options_metavar: t.Optional[str] = "[OPTIONS]", + add_help_option: bool = True, + no_args_is_help: bool = False, + hidden: bool = False, + deprecated: bool = False, + ) -> None: + super().__init__(name, context_settings) + #: the callback to execute when the command fires. This might be + #: `None` in which case nothing happens. + self.callback = callback + #: the list of parameters for this command in the order they + #: should show up in the help page and execute. Eager parameters + #: will automatically be handled before non eager ones. + self.params: t.List["Parameter"] = params or [] + self.help = help + self.epilog = epilog + self.options_metavar = options_metavar + self.short_help = short_help + self.add_help_option = add_help_option + self.no_args_is_help = no_args_is_help + self.hidden = hidden + self.deprecated = deprecated + + def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict(ctx) + info_dict.update( + params=[param.to_info_dict() for param in self.get_params(ctx)], + help=self.help, + epilog=self.epilog, + short_help=self.short_help, + hidden=self.hidden, + deprecated=self.deprecated, + ) + return info_dict + + def get_usage(self, ctx: Context) -> str: + """Formats the usage line into a string and returns it. + + Calls :meth:`format_usage` internally. + """ + formatter = ctx.make_formatter() + self.format_usage(ctx, formatter) + return formatter.getvalue().rstrip("\n") + + def get_params(self, ctx: Context) -> t.List["Parameter"]: + rv = self.params + help_option = self.get_help_option(ctx) + + if help_option is not None: + rv = [*rv, help_option] + + return rv + + def format_usage(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the usage line into the formatter. + + This is a low-level method called by :meth:`get_usage`. + """ + pieces = self.collect_usage_pieces(ctx) + formatter.write_usage(ctx.command_path, " ".join(pieces)) + + def collect_usage_pieces(self, ctx: Context) -> t.List[str]: + """Returns all the pieces that go into the usage line and returns + it as a list of strings. + """ + rv = [self.options_metavar] if self.options_metavar else [] + + for param in self.get_params(ctx): + rv.extend(param.get_usage_pieces(ctx)) + + return rv + + def get_help_option_names(self, ctx: Context) -> t.List[str]: + """Returns the names for the help option.""" + all_names = set(ctx.help_option_names) + for param in self.params: + all_names.difference_update(param.opts) + all_names.difference_update(param.secondary_opts) + return list(all_names) + + def get_help_option(self, ctx: Context) -> t.Optional["Option"]: + """Returns the help option object.""" + help_options = self.get_help_option_names(ctx) + + if not help_options or not self.add_help_option: + return None + + def show_help(ctx: Context, param: "Parameter", value: str) -> None: + if value and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + + return Option( + help_options, + is_flag=True, + is_eager=True, + expose_value=False, + callback=show_help, + help=_("Show this message and exit."), + ) + + def make_parser(self, ctx: Context) -> OptionParser: + """Creates the underlying option parser for this command.""" + parser = OptionParser(ctx) + for param in self.get_params(ctx): + param.add_to_parser(parser, ctx) + return parser + + def get_help(self, ctx: Context) -> str: + """Formats the help into a string and returns it. + + Calls :meth:`format_help` internally. + """ + formatter = ctx.make_formatter() + self.format_help(ctx, formatter) + return formatter.getvalue().rstrip("\n") + + def get_short_help_str(self, limit: int = 45) -> str: + """Gets short help for the command or makes it by shortening the + long help string. + """ + if self.short_help: + text = inspect.cleandoc(self.short_help) + elif self.help: + text = make_default_short_help(self.help, limit) + else: + text = "" + + if self.deprecated: + text = _("(Deprecated) {text}").format(text=text) + + return text.strip() + + def format_help(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the help into the formatter if it exists. + + This is a low-level method called by :meth:`get_help`. + + This calls the following methods: + + - :meth:`format_usage` + - :meth:`format_help_text` + - :meth:`format_options` + - :meth:`format_epilog` + """ + self.format_usage(ctx, formatter) + self.format_help_text(ctx, formatter) + self.format_options(ctx, formatter) + self.format_epilog(ctx, formatter) + + def format_help_text(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the help text to the formatter if it exists.""" + if self.help is not None: + # truncate the help text to the first form feed + text = inspect.cleandoc(self.help).partition("\f")[0] + else: + text = "" + + if self.deprecated: + text = _("(Deprecated) {text}").format(text=text) + + if text: + formatter.write_paragraph() + + with formatter.indentation(): + formatter.write_text(text) + + def format_options(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes all the options into the formatter if they exist.""" + opts = [] + for param in self.get_params(ctx): + rv = param.get_help_record(ctx) + if rv is not None: + opts.append(rv) + + if opts: + with formatter.section(_("Options")): + formatter.write_dl(opts) + + def format_epilog(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the epilog into the formatter if it exists.""" + if self.epilog: + epilog = inspect.cleandoc(self.epilog) + formatter.write_paragraph() + + with formatter.indentation(): + formatter.write_text(epilog) + + def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]: + if not args and self.no_args_is_help and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + + parser = self.make_parser(ctx) + opts, args, param_order = parser.parse_args(args=args) + + for param in iter_params_for_processing(param_order, self.get_params(ctx)): + value, args = param.handle_parse_result(ctx, opts, args) + + if args and not ctx.allow_extra_args and not ctx.resilient_parsing: + ctx.fail( + ngettext( + "Got unexpected extra argument ({args})", + "Got unexpected extra arguments ({args})", + len(args), + ).format(args=" ".join(map(str, args))) + ) + + ctx.args = args + ctx._opt_prefixes.update(parser._opt_prefixes) + return args + + def invoke(self, ctx: Context) -> t.Any: + """Given a context, this invokes the attached callback (if it exists) + in the right way. + """ + if self.deprecated: + message = _( + "DeprecationWarning: The command {name!r} is deprecated." + ).format(name=self.name) + echo(style(message, fg="red"), err=True) + + if self.callback is not None: + return ctx.invoke(self.callback, **ctx.params) + + def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: + """Return a list of completions for the incomplete value. Looks + at the names of options and chained multi-commands. + + :param ctx: Invocation context for this command. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + results: t.List["CompletionItem"] = [] + + if incomplete and not incomplete[0].isalnum(): + for param in self.get_params(ctx): + if ( + not isinstance(param, Option) + or param.hidden + or ( + not param.multiple + and ctx.get_parameter_source(param.name) # type: ignore + is ParameterSource.COMMANDLINE + ) + ): + continue + + results.extend( + CompletionItem(name, help=param.help) + for name in [*param.opts, *param.secondary_opts] + if name.startswith(incomplete) + ) + + results.extend(super().shell_complete(ctx, incomplete)) + return results + + +class MultiCommand(Command): + """A multi command is the basic implementation of a command that + dispatches to subcommands. The most common version is the + :class:`Group`. + + :param invoke_without_command: this controls how the multi command itself + is invoked. By default it's only invoked + if a subcommand is provided. + :param no_args_is_help: this controls what happens if no arguments are + provided. This option is enabled by default if + `invoke_without_command` is disabled or disabled + if it's enabled. If enabled this will add + ``--help`` as argument if no arguments are + passed. + :param subcommand_metavar: the string that is used in the documentation + to indicate the subcommand place. + :param chain: if this is set to `True` chaining of multiple subcommands + is enabled. This restricts the form of commands in that + they cannot have optional arguments but it allows + multiple commands to be chained together. + :param result_callback: The result callback to attach to this multi + command. This can be set or changed later with the + :meth:`result_callback` decorator. + :param attrs: Other command arguments described in :class:`Command`. + """ + + allow_extra_args = True + allow_interspersed_args = False + + def __init__( + self, + name: t.Optional[str] = None, + invoke_without_command: bool = False, + no_args_is_help: t.Optional[bool] = None, + subcommand_metavar: t.Optional[str] = None, + chain: bool = False, + result_callback: t.Optional[t.Callable[..., t.Any]] = None, + **attrs: t.Any, + ) -> None: + super().__init__(name, **attrs) + + if no_args_is_help is None: + no_args_is_help = not invoke_without_command + + self.no_args_is_help = no_args_is_help + self.invoke_without_command = invoke_without_command + + if subcommand_metavar is None: + if chain: + subcommand_metavar = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..." + else: + subcommand_metavar = "COMMAND [ARGS]..." + + self.subcommand_metavar = subcommand_metavar + self.chain = chain + # The result callback that is stored. This can be set or + # overridden with the :func:`result_callback` decorator. + self._result_callback = result_callback + + if self.chain: + for param in self.params: + if isinstance(param, Argument) and not param.required: + raise RuntimeError( + "Multi commands in chain mode cannot have" + " optional arguments." + ) + + def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict(ctx) + commands = {} + + for name in self.list_commands(ctx): + command = self.get_command(ctx, name) + + if command is None: + continue + + sub_ctx = ctx._make_sub_context(command) + + with sub_ctx.scope(cleanup=False): + commands[name] = command.to_info_dict(sub_ctx) + + info_dict.update(commands=commands, chain=self.chain) + return info_dict + + def collect_usage_pieces(self, ctx: Context) -> t.List[str]: + rv = super().collect_usage_pieces(ctx) + rv.append(self.subcommand_metavar) + return rv + + def format_options(self, ctx: Context, formatter: HelpFormatter) -> None: + super().format_options(ctx, formatter) + self.format_commands(ctx, formatter) + + def result_callback(self, replace: bool = False) -> t.Callable[[F], F]: + """Adds a result callback to the command. By default if a + result callback is already registered this will chain them but + this can be disabled with the `replace` parameter. The result + callback is invoked with the return value of the subcommand + (or the list of return values from all subcommands if chaining + is enabled) as well as the parameters as they would be passed + to the main callback. + + Example:: + + @click.group() + @click.option('-i', '--input', default=23) + def cli(input): + return 42 + + @cli.result_callback() + def process_result(result, input): + return result + input + + :param replace: if set to `True` an already existing result + callback will be removed. + + .. versionchanged:: 8.0 + Renamed from ``resultcallback``. + + .. versionadded:: 3.0 + """ + + def decorator(f: F) -> F: + old_callback = self._result_callback + + if old_callback is None or replace: + self._result_callback = f + return f + + def function(__value, *args, **kwargs): # type: ignore + inner = old_callback(__value, *args, **kwargs) + return f(inner, *args, **kwargs) + + self._result_callback = rv = update_wrapper(t.cast(F, function), f) + return rv + + return decorator + + def format_commands(self, ctx: Context, formatter: HelpFormatter) -> None: + """Extra format methods for multi methods that adds all the commands + after the options. + """ + commands = [] + for subcommand in self.list_commands(ctx): + cmd = self.get_command(ctx, subcommand) + # What is this, the tool lied about a command. Ignore it + if cmd is None: + continue + if cmd.hidden: + continue + + commands.append((subcommand, cmd)) + + # allow for 3 times the default spacing + if len(commands): + limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands) + + rows = [] + for subcommand, cmd in commands: + help = cmd.get_short_help_str(limit) + rows.append((subcommand, help)) + + if rows: + with formatter.section(_("Commands")): + formatter.write_dl(rows) + + def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]: + if not args and self.no_args_is_help and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + + rest = super().parse_args(ctx, args) + + if self.chain: + ctx.protected_args = rest + ctx.args = [] + elif rest: + ctx.protected_args, ctx.args = rest[:1], rest[1:] + + return ctx.args + + def invoke(self, ctx: Context) -> t.Any: + def _process_result(value: t.Any) -> t.Any: + if self._result_callback is not None: + value = ctx.invoke(self._result_callback, value, **ctx.params) + return value + + if not ctx.protected_args: + if self.invoke_without_command: + # No subcommand was invoked, so the result callback is + # invoked with the group return value for regular + # groups, or an empty list for chained groups. + with ctx: + rv = super().invoke(ctx) + return _process_result([] if self.chain else rv) + ctx.fail(_("Missing command.")) + + # Fetch args back out + args = [*ctx.protected_args, *ctx.args] + ctx.args = [] + ctx.protected_args = [] + + # If we're not in chain mode, we only allow the invocation of a + # single command but we also inform the current context about the + # name of the command to invoke. + if not self.chain: + # Make sure the context is entered so we do not clean up + # resources until the result processor has worked. + with ctx: + cmd_name, cmd, args = self.resolve_command(ctx, args) + assert cmd is not None + ctx.invoked_subcommand = cmd_name + super().invoke(ctx) + sub_ctx = cmd.make_context(cmd_name, args, parent=ctx) + with sub_ctx: + return _process_result(sub_ctx.command.invoke(sub_ctx)) + + # In chain mode we create the contexts step by step, but after the + # base command has been invoked. Because at that point we do not + # know the subcommands yet, the invoked subcommand attribute is + # set to ``*`` to inform the command that subcommands are executed + # but nothing else. + with ctx: + ctx.invoked_subcommand = "*" if args else None + super().invoke(ctx) + + # Otherwise we make every single context and invoke them in a + # chain. In that case the return value to the result processor + # is the list of all invoked subcommand's results. + contexts = [] + while args: + cmd_name, cmd, args = self.resolve_command(ctx, args) + assert cmd is not None + sub_ctx = cmd.make_context( + cmd_name, + args, + parent=ctx, + allow_extra_args=True, + allow_interspersed_args=False, + ) + contexts.append(sub_ctx) + args, sub_ctx.args = sub_ctx.args, [] + + rv = [] + for sub_ctx in contexts: + with sub_ctx: + rv.append(sub_ctx.command.invoke(sub_ctx)) + return _process_result(rv) + + def resolve_command( + self, ctx: Context, args: t.List[str] + ) -> t.Tuple[t.Optional[str], t.Optional[Command], t.List[str]]: + cmd_name = make_str(args[0]) + original_cmd_name = cmd_name + + # Get the command + cmd = self.get_command(ctx, cmd_name) + + # If we can't find the command but there is a normalization + # function available, we try with that one. + if cmd is None and ctx.token_normalize_func is not None: + cmd_name = ctx.token_normalize_func(cmd_name) + cmd = self.get_command(ctx, cmd_name) + + # If we don't find the command we want to show an error message + # to the user that it was not provided. However, there is + # something else we should do: if the first argument looks like + # an option we want to kick off parsing again for arguments to + # resolve things like --help which now should go to the main + # place. + if cmd is None and not ctx.resilient_parsing: + if split_opt(cmd_name)[0]: + self.parse_args(ctx, ctx.args) + ctx.fail(_("No such command {name!r}.").format(name=original_cmd_name)) + return cmd_name if cmd else None, cmd, args[1:] + + def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]: + """Given a context and a command name, this returns a + :class:`Command` object if it exists or returns `None`. + """ + raise NotImplementedError + + def list_commands(self, ctx: Context) -> t.List[str]: + """Returns a list of subcommand names in the order they should + appear. + """ + return [] + + def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: + """Return a list of completions for the incomplete value. Looks + at the names of options, subcommands, and chained + multi-commands. + + :param ctx: Invocation context for this command. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + results = [ + CompletionItem(name, help=command.get_short_help_str()) + for name, command in _complete_visible_commands(ctx, incomplete) + ] + results.extend(super().shell_complete(ctx, incomplete)) + return results + + +class Group(MultiCommand): + """A group allows a command to have subcommands attached. This is + the most common way to implement nesting in Click. + + :param name: The name of the group command. + :param commands: A dict mapping names to :class:`Command` objects. + Can also be a list of :class:`Command`, which will use + :attr:`Command.name` to create the dict. + :param attrs: Other command arguments described in + :class:`MultiCommand`, :class:`Command`, and + :class:`BaseCommand`. + + .. versionchanged:: 8.0 + The ``commands`` argument can be a list of command objects. + """ + + #: If set, this is used by the group's :meth:`command` decorator + #: as the default :class:`Command` class. This is useful to make all + #: subcommands use a custom command class. + #: + #: .. versionadded:: 8.0 + command_class: t.Optional[t.Type[Command]] = None + + #: If set, this is used by the group's :meth:`group` decorator + #: as the default :class:`Group` class. This is useful to make all + #: subgroups use a custom group class. + #: + #: If set to the special value :class:`type` (literally + #: ``group_class = type``), this group's class will be used as the + #: default class. This makes a custom group class continue to make + #: custom groups. + #: + #: .. versionadded:: 8.0 + group_class: t.Optional[t.Union[t.Type["Group"], t.Type[type]]] = None + # Literal[type] isn't valid, so use Type[type] + + def __init__( + self, + name: t.Optional[str] = None, + commands: t.Optional[ + t.Union[t.MutableMapping[str, Command], t.Sequence[Command]] + ] = None, + **attrs: t.Any, + ) -> None: + super().__init__(name, **attrs) + + if commands is None: + commands = {} + elif isinstance(commands, abc.Sequence): + commands = {c.name: c for c in commands if c.name is not None} + + #: The registered subcommands by their exported names. + self.commands: t.MutableMapping[str, Command] = commands + + def add_command(self, cmd: Command, name: t.Optional[str] = None) -> None: + """Registers another :class:`Command` with this group. If the name + is not provided, the name of the command is used. + """ + name = name or cmd.name + if name is None: + raise TypeError("Command has no name.") + _check_multicommand(self, name, cmd, register=True) + self.commands[name] = cmd + + @t.overload + def command(self, __func: t.Callable[..., t.Any]) -> Command: + ... + + @t.overload + def command( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Callable[[t.Callable[..., t.Any]], Command]: + ... + + def command( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Union[t.Callable[[t.Callable[..., t.Any]], Command], Command]: + """A shortcut decorator for declaring and attaching a command to + the group. This takes the same arguments as :func:`command` and + immediately registers the created command with this group by + calling :meth:`add_command`. + + To customize the command class used, set the + :attr:`command_class` attribute. + + .. versionchanged:: 8.1 + This decorator can be applied without parentheses. + + .. versionchanged:: 8.0 + Added the :attr:`command_class` attribute. + """ + from .decorators import command + + func: t.Optional[t.Callable[..., t.Any]] = None + + if args and callable(args[0]): + assert ( + len(args) == 1 and not kwargs + ), "Use 'command(**kwargs)(callable)' to provide arguments." + (func,) = args + args = () + + if self.command_class and kwargs.get("cls") is None: + kwargs["cls"] = self.command_class + + def decorator(f: t.Callable[..., t.Any]) -> Command: + cmd: Command = command(*args, **kwargs)(f) + self.add_command(cmd) + return cmd + + if func is not None: + return decorator(func) + + return decorator + + @t.overload + def group(self, __func: t.Callable[..., t.Any]) -> "Group": + ... + + @t.overload + def group( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Callable[[t.Callable[..., t.Any]], "Group"]: + ... + + def group( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Union[t.Callable[[t.Callable[..., t.Any]], "Group"], "Group"]: + """A shortcut decorator for declaring and attaching a group to + the group. This takes the same arguments as :func:`group` and + immediately registers the created group with this group by + calling :meth:`add_command`. + + To customize the group class used, set the :attr:`group_class` + attribute. + + .. versionchanged:: 8.1 + This decorator can be applied without parentheses. + + .. versionchanged:: 8.0 + Added the :attr:`group_class` attribute. + """ + from .decorators import group + + func: t.Optional[t.Callable[..., t.Any]] = None + + if args and callable(args[0]): + assert ( + len(args) == 1 and not kwargs + ), "Use 'group(**kwargs)(callable)' to provide arguments." + (func,) = args + args = () + + if self.group_class is not None and kwargs.get("cls") is None: + if self.group_class is type: + kwargs["cls"] = type(self) + else: + kwargs["cls"] = self.group_class + + def decorator(f: t.Callable[..., t.Any]) -> "Group": + cmd: Group = group(*args, **kwargs)(f) + self.add_command(cmd) + return cmd + + if func is not None: + return decorator(func) + + return decorator + + def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]: + return self.commands.get(cmd_name) + + def list_commands(self, ctx: Context) -> t.List[str]: + return sorted(self.commands) + + +class CommandCollection(MultiCommand): + """A command collection is a multi command that merges multiple multi + commands together into one. This is a straightforward implementation + that accepts a list of different multi commands as sources and + provides all the commands for each of them. + + See :class:`MultiCommand` and :class:`Command` for the description of + ``name`` and ``attrs``. + """ + + def __init__( + self, + name: t.Optional[str] = None, + sources: t.Optional[t.List[MultiCommand]] = None, + **attrs: t.Any, + ) -> None: + super().__init__(name, **attrs) + #: The list of registered multi commands. + self.sources: t.List[MultiCommand] = sources or [] + + def add_source(self, multi_cmd: MultiCommand) -> None: + """Adds a new multi command to the chain dispatcher.""" + self.sources.append(multi_cmd) + + def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]: + for source in self.sources: + rv = source.get_command(ctx, cmd_name) + + if rv is not None: + if self.chain: + _check_multicommand(self, cmd_name, rv) + + return rv + + return None + + def list_commands(self, ctx: Context) -> t.List[str]: + rv: t.Set[str] = set() + + for source in self.sources: + rv.update(source.list_commands(ctx)) + + return sorted(rv) + + +def _check_iter(value: t.Any) -> t.Iterator[t.Any]: + """Check if the value is iterable but not a string. Raises a type + error, or return an iterator over the value. + """ + if isinstance(value, str): + raise TypeError + + return iter(value) + + +class Parameter: + r"""A parameter to a command comes in two versions: they are either + :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently + not supported by design as some of the internals for parsing are + intentionally not finalized. + + Some settings are supported by both options and arguments. + + :param param_decls: the parameter declarations for this option or + argument. This is a list of flags or argument + names. + :param type: the type that should be used. Either a :class:`ParamType` + or a Python type. The latter is converted into the former + automatically if supported. + :param required: controls if this is optional or not. + :param default: the default value if omitted. This can also be a callable, + in which case it's invoked when the default is needed + without any arguments. + :param callback: A function to further process or validate the value + after type conversion. It is called as ``f(ctx, param, value)`` + and must return the value. It is called for all sources, + including prompts. + :param nargs: the number of arguments to match. If not ``1`` the return + value is a tuple instead of single value. The default for + nargs is ``1`` (except if the type is a tuple, then it's + the arity of the tuple). If ``nargs=-1``, all remaining + parameters are collected. + :param metavar: how the value is represented in the help page. + :param expose_value: if this is `True` then the value is passed onwards + to the command callback and stored on the context, + otherwise it's skipped. + :param is_eager: eager values are processed before non eager ones. This + should not be set for arguments or it will inverse the + order of processing. + :param envvar: a string or list of strings that are environment variables + that should be checked. + :param shell_complete: A function that returns custom shell + completions. Used instead of the param's type completion if + given. Takes ``ctx, param, incomplete`` and must return a list + of :class:`~click.shell_completion.CompletionItem` or a list of + strings. + + .. versionchanged:: 8.0 + ``process_value`` validates required parameters and bounded + ``nargs``, and invokes the parameter callback before returning + the value. This allows the callback to validate prompts. + ``full_process_value`` is removed. + + .. versionchanged:: 8.0 + ``autocompletion`` is renamed to ``shell_complete`` and has new + semantics described above. The old name is deprecated and will + be removed in 8.1, until then it will be wrapped to match the + new requirements. + + .. versionchanged:: 8.0 + For ``multiple=True, nargs>1``, the default must be a list of + tuples. + + .. versionchanged:: 8.0 + Setting a default is no longer required for ``nargs>1``, it will + default to ``None``. ``multiple=True`` or ``nargs=-1`` will + default to ``()``. + + .. versionchanged:: 7.1 + Empty environment variables are ignored rather than taking the + empty string value. This makes it possible for scripts to clear + variables if they can't unset them. + + .. versionchanged:: 2.0 + Changed signature for parameter callback to also be passed the + parameter. The old callback format will still work, but it will + raise a warning to give you a chance to migrate the code easier. + """ + + param_type_name = "parameter" + + def __init__( + self, + param_decls: t.Optional[t.Sequence[str]] = None, + type: t.Optional[t.Union[types.ParamType, t.Any]] = None, + required: bool = False, + default: t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]] = None, + callback: t.Optional[t.Callable[[Context, "Parameter", t.Any], t.Any]] = None, + nargs: t.Optional[int] = None, + multiple: bool = False, + metavar: t.Optional[str] = None, + expose_value: bool = True, + is_eager: bool = False, + envvar: t.Optional[t.Union[str, t.Sequence[str]]] = None, + shell_complete: t.Optional[ + t.Callable[ + [Context, "Parameter", str], + t.Union[t.List["CompletionItem"], t.List[str]], + ] + ] = None, + ) -> None: + self.name: t.Optional[str] + self.opts: t.List[str] + self.secondary_opts: t.List[str] + self.name, self.opts, self.secondary_opts = self._parse_decls( + param_decls or (), expose_value + ) + self.type: types.ParamType = types.convert_type(type, default) + + # Default nargs to what the type tells us if we have that + # information available. + if nargs is None: + if self.type.is_composite: + nargs = self.type.arity + else: + nargs = 1 + + self.required = required + self.callback = callback + self.nargs = nargs + self.multiple = multiple + self.expose_value = expose_value + self.default = default + self.is_eager = is_eager + self.metavar = metavar + self.envvar = envvar + self._custom_shell_complete = shell_complete + + if __debug__: + if self.type.is_composite and nargs != self.type.arity: + raise ValueError( + f"'nargs' must be {self.type.arity} (or None) for" + f" type {self.type!r}, but it was {nargs}." + ) + + # Skip no default or callable default. + check_default = default if not callable(default) else None + + if check_default is not None: + if multiple: + try: + # Only check the first value against nargs. + check_default = next(_check_iter(check_default), None) + except TypeError: + raise ValueError( + "'default' must be a list when 'multiple' is true." + ) from None + + # Can be None for multiple with empty default. + if nargs != 1 and check_default is not None: + try: + _check_iter(check_default) + except TypeError: + if multiple: + message = ( + "'default' must be a list of lists when 'multiple' is" + " true and 'nargs' != 1." + ) + else: + message = "'default' must be a list when 'nargs' != 1." + + raise ValueError(message) from None + + if nargs > 1 and len(check_default) != nargs: + subject = "item length" if multiple else "length" + raise ValueError( + f"'default' {subject} must match nargs={nargs}." + ) + + def to_info_dict(self) -> t.Dict[str, t.Any]: + """Gather information that could be useful for a tool generating + user-facing documentation. + + Use :meth:`click.Context.to_info_dict` to traverse the entire + CLI structure. + + .. versionadded:: 8.0 + """ + return { + "name": self.name, + "param_type_name": self.param_type_name, + "opts": self.opts, + "secondary_opts": self.secondary_opts, + "type": self.type.to_info_dict(), + "required": self.required, + "nargs": self.nargs, + "multiple": self.multiple, + "default": self.default, + "envvar": self.envvar, + } + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.name}>" + + def _parse_decls( + self, decls: t.Sequence[str], expose_value: bool + ) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]: + raise NotImplementedError() + + @property + def human_readable_name(self) -> str: + """Returns the human readable name of this parameter. This is the + same as the name for options, but the metavar for arguments. + """ + return self.name # type: ignore + + def make_metavar(self) -> str: + if self.metavar is not None: + return self.metavar + + metavar = self.type.get_metavar(self) + + if metavar is None: + metavar = self.type.name.upper() + + if self.nargs != 1: + metavar += "..." + + return metavar + + @t.overload + def get_default( + self, ctx: Context, call: "te.Literal[True]" = True + ) -> t.Optional[t.Any]: + ... + + @t.overload + def get_default( + self, ctx: Context, call: bool = ... + ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: + ... + + def get_default( + self, ctx: Context, call: bool = True + ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: + """Get the default for the parameter. Tries + :meth:`Context.lookup_default` first, then the local default. + + :param ctx: Current context. + :param call: If the default is a callable, call it. Disable to + return the callable instead. + + .. versionchanged:: 8.0.2 + Type casting is no longer performed when getting a default. + + .. versionchanged:: 8.0.1 + Type casting can fail in resilient parsing mode. Invalid + defaults will not prevent showing help text. + + .. versionchanged:: 8.0 + Looks at ``ctx.default_map`` first. + + .. versionchanged:: 8.0 + Added the ``call`` parameter. + """ + value = ctx.lookup_default(self.name, call=False) # type: ignore + + if value is None: + value = self.default + + if call and callable(value): + value = value() + + return value + + def add_to_parser(self, parser: OptionParser, ctx: Context) -> None: + raise NotImplementedError() + + def consume_value( + self, ctx: Context, opts: t.Mapping[str, t.Any] + ) -> t.Tuple[t.Any, ParameterSource]: + value = opts.get(self.name) # type: ignore + source = ParameterSource.COMMANDLINE + + if value is None: + value = self.value_from_envvar(ctx) + source = ParameterSource.ENVIRONMENT + + if value is None: + value = ctx.lookup_default(self.name) # type: ignore + source = ParameterSource.DEFAULT_MAP + + if value is None: + value = self.get_default(ctx) + source = ParameterSource.DEFAULT + + return value, source + + def type_cast_value(self, ctx: Context, value: t.Any) -> t.Any: + """Convert and validate a value against the option's + :attr:`type`, :attr:`multiple`, and :attr:`nargs`. + """ + if value is None: + return () if self.multiple or self.nargs == -1 else None + + def check_iter(value: t.Any) -> t.Iterator[t.Any]: + try: + return _check_iter(value) + except TypeError: + # This should only happen when passing in args manually, + # the parser should construct an iterable when parsing + # the command line. + raise BadParameter( + _("Value must be an iterable."), ctx=ctx, param=self + ) from None + + if self.nargs == 1 or self.type.is_composite: + + def convert(value: t.Any) -> t.Any: + return self.type(value, param=self, ctx=ctx) + + elif self.nargs == -1: + + def convert(value: t.Any) -> t.Any: # t.Tuple[t.Any, ...] + return tuple(self.type(x, self, ctx) for x in check_iter(value)) + + else: # nargs > 1 + + def convert(value: t.Any) -> t.Any: # t.Tuple[t.Any, ...] + value = tuple(check_iter(value)) + + if len(value) != self.nargs: + raise BadParameter( + ngettext( + "Takes {nargs} values but 1 was given.", + "Takes {nargs} values but {len} were given.", + len(value), + ).format(nargs=self.nargs, len=len(value)), + ctx=ctx, + param=self, + ) + + return tuple(self.type(x, self, ctx) for x in value) + + if self.multiple: + return tuple(convert(x) for x in check_iter(value)) + + return convert(value) + + def value_is_missing(self, value: t.Any) -> bool: + if value is None: + return True + + if (self.nargs != 1 or self.multiple) and value == (): + return True + + return False + + def process_value(self, ctx: Context, value: t.Any) -> t.Any: + value = self.type_cast_value(ctx, value) + + if self.required and self.value_is_missing(value): + raise MissingParameter(ctx=ctx, param=self) + + if self.callback is not None: + value = self.callback(ctx, self, value) + + return value + + def resolve_envvar_value(self, ctx: Context) -> t.Optional[str]: + if self.envvar is None: + return None + + if isinstance(self.envvar, str): + rv = os.environ.get(self.envvar) + + if rv: + return rv + else: + for envvar in self.envvar: + rv = os.environ.get(envvar) + + if rv: + return rv + + return None + + def value_from_envvar(self, ctx: Context) -> t.Optional[t.Any]: + rv: t.Optional[t.Any] = self.resolve_envvar_value(ctx) + + if rv is not None and self.nargs != 1: + rv = self.type.split_envvar_value(rv) + + return rv + + def handle_parse_result( + self, ctx: Context, opts: t.Mapping[str, t.Any], args: t.List[str] + ) -> t.Tuple[t.Any, t.List[str]]: + with augment_usage_errors(ctx, param=self): + value, source = self.consume_value(ctx, opts) + ctx.set_parameter_source(self.name, source) # type: ignore + + try: + value = self.process_value(ctx, value) + except Exception: + if not ctx.resilient_parsing: + raise + + value = None + + if self.expose_value: + ctx.params[self.name] = value # type: ignore + + return value, args + + def get_help_record(self, ctx: Context) -> t.Optional[t.Tuple[str, str]]: + pass + + def get_usage_pieces(self, ctx: Context) -> t.List[str]: + return [] + + def get_error_hint(self, ctx: Context) -> str: + """Get a stringified version of the param for use in error messages to + indicate which param caused the error. + """ + hint_list = self.opts or [self.human_readable_name] + return " / ".join(f"'{x}'" for x in hint_list) + + def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: + """Return a list of completions for the incomplete value. If a + ``shell_complete`` function was given during init, it is used. + Otherwise, the :attr:`type` + :meth:`~click.types.ParamType.shell_complete` function is used. + + :param ctx: Invocation context for this command. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + if self._custom_shell_complete is not None: + results = self._custom_shell_complete(ctx, self, incomplete) + + if results and isinstance(results[0], str): + from click.shell_completion import CompletionItem + + results = [CompletionItem(c) for c in results] + + return t.cast(t.List["CompletionItem"], results) + + return self.type.shell_complete(ctx, self, incomplete) + + +class Option(Parameter): + """Options are usually optional values on the command line and + have some extra features that arguments don't have. + + All other parameters are passed onwards to the parameter constructor. + + :param show_default: Show the default value for this option in its + help text. Values are not shown by default, unless + :attr:`Context.show_default` is ``True``. If this value is a + string, it shows that string in parentheses instead of the + actual value. This is particularly useful for dynamic options. + For single option boolean flags, the default remains hidden if + its value is ``False``. + :param show_envvar: Controls if an environment variable should be + shown on the help page. Normally, environment variables are not + shown. + :param prompt: If set to ``True`` or a non empty string then the + user will be prompted for input. If set to ``True`` the prompt + will be the option name capitalized. + :param confirmation_prompt: Prompt a second time to confirm the + value if it was prompted for. Can be set to a string instead of + ``True`` to customize the message. + :param prompt_required: If set to ``False``, the user will be + prompted for input only when the option was specified as a flag + without a value. + :param hide_input: If this is ``True`` then the input on the prompt + will be hidden from the user. This is useful for password input. + :param is_flag: forces this option to act as a flag. The default is + auto detection. + :param flag_value: which value should be used for this flag if it's + enabled. This is set to a boolean automatically if + the option string contains a slash to mark two options. + :param multiple: if this is set to `True` then the argument is accepted + multiple times and recorded. This is similar to ``nargs`` + in how it works but supports arbitrary number of + arguments. + :param count: this flag makes an option increment an integer. + :param allow_from_autoenv: if this is enabled then the value of this + parameter will be pulled from an environment + variable in case a prefix is defined on the + context. + :param help: the help string. + :param hidden: hide this option from help outputs. + :param attrs: Other command arguments described in :class:`Parameter`. + + .. versionchanged:: 8.1.0 + Help text indentation is cleaned here instead of only in the + ``@option`` decorator. + + .. versionchanged:: 8.1.0 + The ``show_default`` parameter overrides + ``Context.show_default``. + + .. versionchanged:: 8.1.0 + The default of a single option boolean flag is not shown if the + default value is ``False``. + + .. versionchanged:: 8.0.1 + ``type`` is detected from ``flag_value`` if given. + """ + + param_type_name = "option" + + def __init__( + self, + param_decls: t.Optional[t.Sequence[str]] = None, + show_default: t.Union[bool, str, None] = None, + prompt: t.Union[bool, str] = False, + confirmation_prompt: t.Union[bool, str] = False, + prompt_required: bool = True, + hide_input: bool = False, + is_flag: t.Optional[bool] = None, + flag_value: t.Optional[t.Any] = None, + multiple: bool = False, + count: bool = False, + allow_from_autoenv: bool = True, + type: t.Optional[t.Union[types.ParamType, t.Any]] = None, + help: t.Optional[str] = None, + hidden: bool = False, + show_choices: bool = True, + show_envvar: bool = False, + **attrs: t.Any, + ) -> None: + if help: + help = inspect.cleandoc(help) + + default_is_missing = "default" not in attrs + super().__init__(param_decls, type=type, multiple=multiple, **attrs) + + if prompt is True: + if self.name is None: + raise TypeError("'name' is required with 'prompt=True'.") + + prompt_text: t.Optional[str] = self.name.replace("_", " ").capitalize() + elif prompt is False: + prompt_text = None + else: + prompt_text = prompt + + self.prompt = prompt_text + self.confirmation_prompt = confirmation_prompt + self.prompt_required = prompt_required + self.hide_input = hide_input + self.hidden = hidden + + # If prompt is enabled but not required, then the option can be + # used as a flag to indicate using prompt or flag_value. + self._flag_needs_value = self.prompt is not None and not self.prompt_required + + if is_flag is None: + if flag_value is not None: + # Implicitly a flag because flag_value was set. + is_flag = True + elif self._flag_needs_value: + # Not a flag, but when used as a flag it shows a prompt. + is_flag = False + else: + # Implicitly a flag because flag options were given. + is_flag = bool(self.secondary_opts) + elif is_flag is False and not self._flag_needs_value: + # Not a flag, and prompt is not enabled, can be used as a + # flag if flag_value is set. + self._flag_needs_value = flag_value is not None + + self.default: t.Union[t.Any, t.Callable[[], t.Any]] + + if is_flag and default_is_missing and not self.required: + if multiple: + self.default = () + else: + self.default = False + + if flag_value is None: + flag_value = not self.default + + self.type: types.ParamType + if is_flag and type is None: + # Re-guess the type from the flag value instead of the + # default. + self.type = types.convert_type(None, flag_value) + + self.is_flag: bool = is_flag + self.is_bool_flag: bool = is_flag and isinstance(self.type, types.BoolParamType) + self.flag_value: t.Any = flag_value + + # Counting + self.count = count + if count: + if type is None: + self.type = types.IntRange(min=0) + if default_is_missing: + self.default = 0 + + self.allow_from_autoenv = allow_from_autoenv + self.help = help + self.show_default = show_default + self.show_choices = show_choices + self.show_envvar = show_envvar + + if __debug__: + if self.nargs == -1: + raise TypeError("nargs=-1 is not supported for options.") + + if self.prompt and self.is_flag and not self.is_bool_flag: + raise TypeError("'prompt' is not valid for non-boolean flag.") + + if not self.is_bool_flag and self.secondary_opts: + raise TypeError("Secondary flag is not valid for non-boolean flag.") + + if self.is_bool_flag and self.hide_input and self.prompt is not None: + raise TypeError( + "'prompt' with 'hide_input' is not valid for boolean flag." + ) + + if self.count: + if self.multiple: + raise TypeError("'count' is not valid with 'multiple'.") + + if self.is_flag: + raise TypeError("'count' is not valid with 'is_flag'.") + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update( + help=self.help, + prompt=self.prompt, + is_flag=self.is_flag, + flag_value=self.flag_value, + count=self.count, + hidden=self.hidden, + ) + return info_dict + + def _parse_decls( + self, decls: t.Sequence[str], expose_value: bool + ) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]: + opts = [] + secondary_opts = [] + name = None + possible_names = [] + + for decl in decls: + if decl.isidentifier(): + if name is not None: + raise TypeError(f"Name '{name}' defined twice") + name = decl + else: + split_char = ";" if decl[:1] == "/" else "/" + if split_char in decl: + first, second = decl.split(split_char, 1) + first = first.rstrip() + if first: + possible_names.append(split_opt(first)) + opts.append(first) + second = second.lstrip() + if second: + secondary_opts.append(second.lstrip()) + if first == second: + raise ValueError( + f"Boolean option {decl!r} cannot use the" + " same flag for true/false." + ) + else: + possible_names.append(split_opt(decl)) + opts.append(decl) + + if name is None and possible_names: + possible_names.sort(key=lambda x: -len(x[0])) # group long options first + name = possible_names[0][1].replace("-", "_").lower() + if not name.isidentifier(): + name = None + + if name is None: + if not expose_value: + return None, opts, secondary_opts + raise TypeError("Could not determine name for option") + + if not opts and not secondary_opts: + raise TypeError( + f"No options defined but a name was passed ({name})." + " Did you mean to declare an argument instead? Did" + f" you mean to pass '--{name}'?" + ) + + return name, opts, secondary_opts + + def add_to_parser(self, parser: OptionParser, ctx: Context) -> None: + if self.multiple: + action = "append" + elif self.count: + action = "count" + else: + action = "store" + + if self.is_flag: + action = f"{action}_const" + + if self.is_bool_flag and self.secondary_opts: + parser.add_option( + obj=self, opts=self.opts, dest=self.name, action=action, const=True + ) + parser.add_option( + obj=self, + opts=self.secondary_opts, + dest=self.name, + action=action, + const=False, + ) + else: + parser.add_option( + obj=self, + opts=self.opts, + dest=self.name, + action=action, + const=self.flag_value, + ) + else: + parser.add_option( + obj=self, + opts=self.opts, + dest=self.name, + action=action, + nargs=self.nargs, + ) + + def get_help_record(self, ctx: Context) -> t.Optional[t.Tuple[str, str]]: + if self.hidden: + return None + + any_prefix_is_slash = False + + def _write_opts(opts: t.Sequence[str]) -> str: + nonlocal any_prefix_is_slash + + rv, any_slashes = join_options(opts) + + if any_slashes: + any_prefix_is_slash = True + + if not self.is_flag and not self.count: + rv += f" {self.make_metavar()}" + + return rv + + rv = [_write_opts(self.opts)] + + if self.secondary_opts: + rv.append(_write_opts(self.secondary_opts)) + + help = self.help or "" + extra = [] + + if self.show_envvar: + envvar = self.envvar + + if envvar is None: + if ( + self.allow_from_autoenv + and ctx.auto_envvar_prefix is not None + and self.name is not None + ): + envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}" + + if envvar is not None: + var_str = ( + envvar + if isinstance(envvar, str) + else ", ".join(str(d) for d in envvar) + ) + extra.append(_("env var: {var}").format(var=var_str)) + + # Temporarily enable resilient parsing to avoid type casting + # failing for the default. Might be possible to extend this to + # help formatting in general. + resilient = ctx.resilient_parsing + ctx.resilient_parsing = True + + try: + default_value = self.get_default(ctx, call=False) + finally: + ctx.resilient_parsing = resilient + + show_default = False + show_default_is_str = False + + if self.show_default is not None: + if isinstance(self.show_default, str): + show_default_is_str = show_default = True + else: + show_default = self.show_default + elif ctx.show_default is not None: + show_default = ctx.show_default + + if show_default_is_str or (show_default and (default_value is not None)): + if show_default_is_str: + default_string = f"({self.show_default})" + elif isinstance(default_value, (list, tuple)): + default_string = ", ".join(str(d) for d in default_value) + elif inspect.isfunction(default_value): + default_string = _("(dynamic)") + elif self.is_bool_flag and self.secondary_opts: + # For boolean flags that have distinct True/False opts, + # use the opt without prefix instead of the value. + default_string = split_opt( + (self.opts if self.default else self.secondary_opts)[0] + )[1] + elif self.is_bool_flag and not self.secondary_opts and not default_value: + default_string = "" + else: + default_string = str(default_value) + + if default_string: + extra.append(_("default: {default}").format(default=default_string)) + + if ( + isinstance(self.type, types._NumberRangeBase) + # skip count with default range type + and not (self.count and self.type.min == 0 and self.type.max is None) + ): + range_str = self.type._describe_range() + + if range_str: + extra.append(range_str) + + if self.required: + extra.append(_("required")) + + if extra: + extra_str = "; ".join(extra) + help = f"{help} [{extra_str}]" if help else f"[{extra_str}]" + + return ("; " if any_prefix_is_slash else " / ").join(rv), help + + @t.overload + def get_default( + self, ctx: Context, call: "te.Literal[True]" = True + ) -> t.Optional[t.Any]: + ... + + @t.overload + def get_default( + self, ctx: Context, call: bool = ... + ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: + ... + + def get_default( + self, ctx: Context, call: bool = True + ) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]: + # If we're a non boolean flag our default is more complex because + # we need to look at all flags in the same group to figure out + # if we're the default one in which case we return the flag + # value as default. + if self.is_flag and not self.is_bool_flag: + for param in ctx.command.params: + if param.name == self.name and param.default: + return t.cast(Option, param).flag_value + + return None + + return super().get_default(ctx, call=call) + + def prompt_for_value(self, ctx: Context) -> t.Any: + """This is an alternative flow that can be activated in the full + value processing if a value does not exist. It will prompt the + user until a valid value exists and then returns the processed + value as result. + """ + assert self.prompt is not None + + # Calculate the default before prompting anything to be stable. + default = self.get_default(ctx) + + # If this is a prompt for a flag we need to handle this + # differently. + if self.is_bool_flag: + return confirm(self.prompt, default) + + return prompt( + self.prompt, + default=default, + type=self.type, + hide_input=self.hide_input, + show_choices=self.show_choices, + confirmation_prompt=self.confirmation_prompt, + value_proc=lambda x: self.process_value(ctx, x), + ) + + def resolve_envvar_value(self, ctx: Context) -> t.Optional[str]: + rv = super().resolve_envvar_value(ctx) + + if rv is not None: + return rv + + if ( + self.allow_from_autoenv + and ctx.auto_envvar_prefix is not None + and self.name is not None + ): + envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}" + rv = os.environ.get(envvar) + + if rv: + return rv + + return None + + def value_from_envvar(self, ctx: Context) -> t.Optional[t.Any]: + rv: t.Optional[t.Any] = self.resolve_envvar_value(ctx) + + if rv is None: + return None + + value_depth = (self.nargs != 1) + bool(self.multiple) + + if value_depth > 0: + rv = self.type.split_envvar_value(rv) + + if self.multiple and self.nargs != 1: + rv = batch(rv, self.nargs) + + return rv + + def consume_value( + self, ctx: Context, opts: t.Mapping[str, "Parameter"] + ) -> t.Tuple[t.Any, ParameterSource]: + value, source = super().consume_value(ctx, opts) + + # The parser will emit a sentinel value if the option can be + # given as a flag without a value. This is different from None + # to distinguish from the flag not being given at all. + if value is _flag_needs_value: + if self.prompt is not None and not ctx.resilient_parsing: + value = self.prompt_for_value(ctx) + source = ParameterSource.PROMPT + else: + value = self.flag_value + source = ParameterSource.COMMANDLINE + + elif ( + self.multiple + and value is not None + and any(v is _flag_needs_value for v in value) + ): + value = [self.flag_value if v is _flag_needs_value else v for v in value] + source = ParameterSource.COMMANDLINE + + # The value wasn't set, or used the param's default, prompt if + # prompting is enabled. + elif ( + source in {None, ParameterSource.DEFAULT} + and self.prompt is not None + and (self.required or self.prompt_required) + and not ctx.resilient_parsing + ): + value = self.prompt_for_value(ctx) + source = ParameterSource.PROMPT + + return value, source + + +class Argument(Parameter): + """Arguments are positional parameters to a command. They generally + provide fewer features than options but can have infinite ``nargs`` + and are required by default. + + All parameters are passed onwards to the constructor of :class:`Parameter`. + """ + + param_type_name = "argument" + + def __init__( + self, + param_decls: t.Sequence[str], + required: t.Optional[bool] = None, + **attrs: t.Any, + ) -> None: + if required is None: + if attrs.get("default") is not None: + required = False + else: + required = attrs.get("nargs", 1) > 0 + + if "multiple" in attrs: + raise TypeError("__init__() got an unexpected keyword argument 'multiple'.") + + super().__init__(param_decls, required=required, **attrs) + + if __debug__: + if self.default is not None and self.nargs == -1: + raise TypeError("'default' is not supported for nargs=-1.") + + @property + def human_readable_name(self) -> str: + if self.metavar is not None: + return self.metavar + return self.name.upper() # type: ignore + + def make_metavar(self) -> str: + if self.metavar is not None: + return self.metavar + var = self.type.get_metavar(self) + if not var: + var = self.name.upper() # type: ignore + if not self.required: + var = f"[{var}]" + if self.nargs != 1: + var += "..." + return var + + def _parse_decls( + self, decls: t.Sequence[str], expose_value: bool + ) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]: + if not decls: + if not expose_value: + return None, [], [] + raise TypeError("Could not determine name for argument") + if len(decls) == 1: + name = arg = decls[0] + name = name.replace("-", "_").lower() + else: + raise TypeError( + "Arguments take exactly one parameter declaration, got" + f" {len(decls)}." + ) + return name, [arg], [] + + def get_usage_pieces(self, ctx: Context) -> t.List[str]: + return [self.make_metavar()] + + def get_error_hint(self, ctx: Context) -> str: + return f"'{self.make_metavar()}'" + + def add_to_parser(self, parser: OptionParser, ctx: Context) -> None: + parser.add_argument(dest=self.name, nargs=self.nargs, obj=self) diff --git a/llmeval-env/lib/python3.10/site-packages/click/parser.py b/llmeval-env/lib/python3.10/site-packages/click/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..5fa7adfac842bfa5689fd1a41ae4017be1ebff6f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/click/parser.py @@ -0,0 +1,529 @@ +""" +This module started out as largely a copy paste from the stdlib's +optparse module with the features removed that we do not need from +optparse because we implement them in Click on a higher level (for +instance type handling, help formatting and a lot more). + +The plan is to remove more and more from here over time. + +The reason this is a different module and not optparse from the stdlib +is that there are differences in 2.x and 3.x about the error messages +generated and optparse in the stdlib uses gettext for no good reason +and might cause us issues. + +Click uses parts of optparse written by Gregory P. Ward and maintained +by the Python Software Foundation. This is limited to code in parser.py. + +Copyright 2001-2006 Gregory P. Ward. All rights reserved. +Copyright 2002-2006 Python Software Foundation. All rights reserved. +""" +# This code uses parts of optparse written by Gregory P. Ward and +# maintained by the Python Software Foundation. +# Copyright 2001-2006 Gregory P. Ward +# Copyright 2002-2006 Python Software Foundation +import typing as t +from collections import deque +from gettext import gettext as _ +from gettext import ngettext + +from .exceptions import BadArgumentUsage +from .exceptions import BadOptionUsage +from .exceptions import NoSuchOption +from .exceptions import UsageError + +if t.TYPE_CHECKING: + import typing_extensions as te + from .core import Argument as CoreArgument + from .core import Context + from .core import Option as CoreOption + from .core import Parameter as CoreParameter + +V = t.TypeVar("V") + +# Sentinel value that indicates an option was passed as a flag without a +# value but is not a flag option. Option.consume_value uses this to +# prompt or use the flag_value. +_flag_needs_value = object() + + +def _unpack_args( + args: t.Sequence[str], nargs_spec: t.Sequence[int] +) -> t.Tuple[t.Sequence[t.Union[str, t.Sequence[t.Optional[str]], None]], t.List[str]]: + """Given an iterable of arguments and an iterable of nargs specifications, + it returns a tuple with all the unpacked arguments at the first index + and all remaining arguments as the second. + + The nargs specification is the number of arguments that should be consumed + or `-1` to indicate that this position should eat up all the remainders. + + Missing items are filled with `None`. + """ + args = deque(args) + nargs_spec = deque(nargs_spec) + rv: t.List[t.Union[str, t.Tuple[t.Optional[str], ...], None]] = [] + spos: t.Optional[int] = None + + def _fetch(c: "te.Deque[V]") -> t.Optional[V]: + try: + if spos is None: + return c.popleft() + else: + return c.pop() + except IndexError: + return None + + while nargs_spec: + nargs = _fetch(nargs_spec) + + if nargs is None: + continue + + if nargs == 1: + rv.append(_fetch(args)) + elif nargs > 1: + x = [_fetch(args) for _ in range(nargs)] + + # If we're reversed, we're pulling in the arguments in reverse, + # so we need to turn them around. + if spos is not None: + x.reverse() + + rv.append(tuple(x)) + elif nargs < 0: + if spos is not None: + raise TypeError("Cannot have two nargs < 0") + + spos = len(rv) + rv.append(None) + + # spos is the position of the wildcard (star). If it's not `None`, + # we fill it with the remainder. + if spos is not None: + rv[spos] = tuple(args) + args = [] + rv[spos + 1 :] = reversed(rv[spos + 1 :]) + + return tuple(rv), list(args) + + +def split_opt(opt: str) -> t.Tuple[str, str]: + first = opt[:1] + if first.isalnum(): + return "", opt + if opt[1:2] == first: + return opt[:2], opt[2:] + return first, opt[1:] + + +def normalize_opt(opt: str, ctx: t.Optional["Context"]) -> str: + if ctx is None or ctx.token_normalize_func is None: + return opt + prefix, opt = split_opt(opt) + return f"{prefix}{ctx.token_normalize_func(opt)}" + + +def split_arg_string(string: str) -> t.List[str]: + """Split an argument string as with :func:`shlex.split`, but don't + fail if the string is incomplete. Ignores a missing closing quote or + incomplete escape sequence and uses the partial token as-is. + + .. code-block:: python + + split_arg_string("example 'my file") + ["example", "my file"] + + split_arg_string("example my\\") + ["example", "my"] + + :param string: String to split. + """ + import shlex + + lex = shlex.shlex(string, posix=True) + lex.whitespace_split = True + lex.commenters = "" + out = [] + + try: + for token in lex: + out.append(token) + except ValueError: + # Raised when end-of-string is reached in an invalid state. Use + # the partial token as-is. The quote or escape character is in + # lex.state, not lex.token. + out.append(lex.token) + + return out + + +class Option: + def __init__( + self, + obj: "CoreOption", + opts: t.Sequence[str], + dest: t.Optional[str], + action: t.Optional[str] = None, + nargs: int = 1, + const: t.Optional[t.Any] = None, + ): + self._short_opts = [] + self._long_opts = [] + self.prefixes: t.Set[str] = set() + + for opt in opts: + prefix, value = split_opt(opt) + if not prefix: + raise ValueError(f"Invalid start character for option ({opt})") + self.prefixes.add(prefix[0]) + if len(prefix) == 1 and len(value) == 1: + self._short_opts.append(opt) + else: + self._long_opts.append(opt) + self.prefixes.add(prefix) + + if action is None: + action = "store" + + self.dest = dest + self.action = action + self.nargs = nargs + self.const = const + self.obj = obj + + @property + def takes_value(self) -> bool: + return self.action in ("store", "append") + + def process(self, value: t.Any, state: "ParsingState") -> None: + if self.action == "store": + state.opts[self.dest] = value # type: ignore + elif self.action == "store_const": + state.opts[self.dest] = self.const # type: ignore + elif self.action == "append": + state.opts.setdefault(self.dest, []).append(value) # type: ignore + elif self.action == "append_const": + state.opts.setdefault(self.dest, []).append(self.const) # type: ignore + elif self.action == "count": + state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 # type: ignore + else: + raise ValueError(f"unknown action '{self.action}'") + state.order.append(self.obj) + + +class Argument: + def __init__(self, obj: "CoreArgument", dest: t.Optional[str], nargs: int = 1): + self.dest = dest + self.nargs = nargs + self.obj = obj + + def process( + self, + value: t.Union[t.Optional[str], t.Sequence[t.Optional[str]]], + state: "ParsingState", + ) -> None: + if self.nargs > 1: + assert value is not None + holes = sum(1 for x in value if x is None) + if holes == len(value): + value = None + elif holes != 0: + raise BadArgumentUsage( + _("Argument {name!r} takes {nargs} values.").format( + name=self.dest, nargs=self.nargs + ) + ) + + if self.nargs == -1 and self.obj.envvar is not None and value == (): + # Replace empty tuple with None so that a value from the + # environment may be tried. + value = None + + state.opts[self.dest] = value # type: ignore + state.order.append(self.obj) + + +class ParsingState: + def __init__(self, rargs: t.List[str]) -> None: + self.opts: t.Dict[str, t.Any] = {} + self.largs: t.List[str] = [] + self.rargs = rargs + self.order: t.List["CoreParameter"] = [] + + +class OptionParser: + """The option parser is an internal class that is ultimately used to + parse options and arguments. It's modelled after optparse and brings + a similar but vastly simplified API. It should generally not be used + directly as the high level Click classes wrap it for you. + + It's not nearly as extensible as optparse or argparse as it does not + implement features that are implemented on a higher level (such as + types or defaults). + + :param ctx: optionally the :class:`~click.Context` where this parser + should go with. + """ + + def __init__(self, ctx: t.Optional["Context"] = None) -> None: + #: The :class:`~click.Context` for this parser. This might be + #: `None` for some advanced use cases. + self.ctx = ctx + #: This controls how the parser deals with interspersed arguments. + #: If this is set to `False`, the parser will stop on the first + #: non-option. Click uses this to implement nested subcommands + #: safely. + self.allow_interspersed_args: bool = True + #: This tells the parser how to deal with unknown options. By + #: default it will error out (which is sensible), but there is a + #: second mode where it will ignore it and continue processing + #: after shifting all the unknown options into the resulting args. + self.ignore_unknown_options: bool = False + + if ctx is not None: + self.allow_interspersed_args = ctx.allow_interspersed_args + self.ignore_unknown_options = ctx.ignore_unknown_options + + self._short_opt: t.Dict[str, Option] = {} + self._long_opt: t.Dict[str, Option] = {} + self._opt_prefixes = {"-", "--"} + self._args: t.List[Argument] = [] + + def add_option( + self, + obj: "CoreOption", + opts: t.Sequence[str], + dest: t.Optional[str], + action: t.Optional[str] = None, + nargs: int = 1, + const: t.Optional[t.Any] = None, + ) -> None: + """Adds a new option named `dest` to the parser. The destination + is not inferred (unlike with optparse) and needs to be explicitly + provided. Action can be any of ``store``, ``store_const``, + ``append``, ``append_const`` or ``count``. + + The `obj` can be used to identify the option in the order list + that is returned from the parser. + """ + opts = [normalize_opt(opt, self.ctx) for opt in opts] + option = Option(obj, opts, dest, action=action, nargs=nargs, const=const) + self._opt_prefixes.update(option.prefixes) + for opt in option._short_opts: + self._short_opt[opt] = option + for opt in option._long_opts: + self._long_opt[opt] = option + + def add_argument( + self, obj: "CoreArgument", dest: t.Optional[str], nargs: int = 1 + ) -> None: + """Adds a positional argument named `dest` to the parser. + + The `obj` can be used to identify the option in the order list + that is returned from the parser. + """ + self._args.append(Argument(obj, dest=dest, nargs=nargs)) + + def parse_args( + self, args: t.List[str] + ) -> t.Tuple[t.Dict[str, t.Any], t.List[str], t.List["CoreParameter"]]: + """Parses positional arguments and returns ``(values, args, order)`` + for the parsed options and arguments as well as the leftover + arguments if there are any. The order is a list of objects as they + appear on the command line. If arguments appear multiple times they + will be memorized multiple times as well. + """ + state = ParsingState(args) + try: + self._process_args_for_options(state) + self._process_args_for_args(state) + except UsageError: + if self.ctx is None or not self.ctx.resilient_parsing: + raise + return state.opts, state.largs, state.order + + def _process_args_for_args(self, state: ParsingState) -> None: + pargs, args = _unpack_args( + state.largs + state.rargs, [x.nargs for x in self._args] + ) + + for idx, arg in enumerate(self._args): + arg.process(pargs[idx], state) + + state.largs = args + state.rargs = [] + + def _process_args_for_options(self, state: ParsingState) -> None: + while state.rargs: + arg = state.rargs.pop(0) + arglen = len(arg) + # Double dashes always handled explicitly regardless of what + # prefixes are valid. + if arg == "--": + return + elif arg[:1] in self._opt_prefixes and arglen > 1: + self._process_opts(arg, state) + elif self.allow_interspersed_args: + state.largs.append(arg) + else: + state.rargs.insert(0, arg) + return + + # Say this is the original argument list: + # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] + # ^ + # (we are about to process arg(i)). + # + # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of + # [arg0, ..., arg(i-1)] (any options and their arguments will have + # been removed from largs). + # + # The while loop will usually consume 1 or more arguments per pass. + # If it consumes 1 (eg. arg is an option that takes no arguments), + # then after _process_arg() is done the situation is: + # + # largs = subset of [arg0, ..., arg(i)] + # rargs = [arg(i+1), ..., arg(N-1)] + # + # If allow_interspersed_args is false, largs will always be + # *empty* -- still a subset of [arg0, ..., arg(i-1)], but + # not a very interesting subset! + + def _match_long_opt( + self, opt: str, explicit_value: t.Optional[str], state: ParsingState + ) -> None: + if opt not in self._long_opt: + from difflib import get_close_matches + + possibilities = get_close_matches(opt, self._long_opt) + raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx) + + option = self._long_opt[opt] + if option.takes_value: + # At this point it's safe to modify rargs by injecting the + # explicit value, because no exception is raised in this + # branch. This means that the inserted value will be fully + # consumed. + if explicit_value is not None: + state.rargs.insert(0, explicit_value) + + value = self._get_value_from_state(opt, option, state) + + elif explicit_value is not None: + raise BadOptionUsage( + opt, _("Option {name!r} does not take a value.").format(name=opt) + ) + + else: + value = None + + option.process(value, state) + + def _match_short_opt(self, arg: str, state: ParsingState) -> None: + stop = False + i = 1 + prefix = arg[0] + unknown_options = [] + + for ch in arg[1:]: + opt = normalize_opt(f"{prefix}{ch}", self.ctx) + option = self._short_opt.get(opt) + i += 1 + + if not option: + if self.ignore_unknown_options: + unknown_options.append(ch) + continue + raise NoSuchOption(opt, ctx=self.ctx) + if option.takes_value: + # Any characters left in arg? Pretend they're the + # next arg, and stop consuming characters of arg. + if i < len(arg): + state.rargs.insert(0, arg[i:]) + stop = True + + value = self._get_value_from_state(opt, option, state) + + else: + value = None + + option.process(value, state) + + if stop: + break + + # If we got any unknown options we recombine the string of the + # remaining options and re-attach the prefix, then report that + # to the state as new larg. This way there is basic combinatorics + # that can be achieved while still ignoring unknown arguments. + if self.ignore_unknown_options and unknown_options: + state.largs.append(f"{prefix}{''.join(unknown_options)}") + + def _get_value_from_state( + self, option_name: str, option: Option, state: ParsingState + ) -> t.Any: + nargs = option.nargs + + if len(state.rargs) < nargs: + if option.obj._flag_needs_value: + # Option allows omitting the value. + value = _flag_needs_value + else: + raise BadOptionUsage( + option_name, + ngettext( + "Option {name!r} requires an argument.", + "Option {name!r} requires {nargs} arguments.", + nargs, + ).format(name=option_name, nargs=nargs), + ) + elif nargs == 1: + next_rarg = state.rargs[0] + + if ( + option.obj._flag_needs_value + and isinstance(next_rarg, str) + and next_rarg[:1] in self._opt_prefixes + and len(next_rarg) > 1 + ): + # The next arg looks like the start of an option, don't + # use it as the value if omitting the value is allowed. + value = _flag_needs_value + else: + value = state.rargs.pop(0) + else: + value = tuple(state.rargs[:nargs]) + del state.rargs[:nargs] + + return value + + def _process_opts(self, arg: str, state: ParsingState) -> None: + explicit_value = None + # Long option handling happens in two parts. The first part is + # supporting explicitly attached values. In any case, we will try + # to long match the option first. + if "=" in arg: + long_opt, explicit_value = arg.split("=", 1) + else: + long_opt = arg + norm_long_opt = normalize_opt(long_opt, self.ctx) + + # At this point we will match the (assumed) long option through + # the long option matching code. Note that this allows options + # like "-foo" to be matched as long options. + try: + self._match_long_opt(norm_long_opt, explicit_value, state) + except NoSuchOption: + # At this point the long option matching failed, and we need + # to try with short options. However there is a special rule + # which says, that if we have a two character options prefix + # (applies to "--foo" for instance), we do not dispatch to the + # short option code and will instead raise the no option + # error. + if arg[:2] not in self._opt_prefixes: + self._match_short_opt(arg, state) + return + + if not self.ignore_unknown_options: + raise + + state.largs.append(arg) diff --git a/llmeval-env/lib/python3.10/site-packages/click/py.typed b/llmeval-env/lib/python3.10/site-packages/click/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/click/types.py b/llmeval-env/lib/python3.10/site-packages/click/types.py new file mode 100644 index 0000000000000000000000000000000000000000..2b1d1797f2e115e9bc976bcaf7d8e1884a91e91c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/click/types.py @@ -0,0 +1,1089 @@ +import os +import stat +import sys +import typing as t +from datetime import datetime +from gettext import gettext as _ +from gettext import ngettext + +from ._compat import _get_argv_encoding +from ._compat import open_stream +from .exceptions import BadParameter +from .utils import format_filename +from .utils import LazyFile +from .utils import safecall + +if t.TYPE_CHECKING: + import typing_extensions as te + from .core import Context + from .core import Parameter + from .shell_completion import CompletionItem + + +class ParamType: + """Represents the type of a parameter. Validates and converts values + from the command line or Python into the correct type. + + To implement a custom type, subclass and implement at least the + following: + + - The :attr:`name` class attribute must be set. + - Calling an instance of the type with ``None`` must return + ``None``. This is already implemented by default. + - :meth:`convert` must convert string values to the correct type. + - :meth:`convert` must accept values that are already the correct + type. + - It must be able to convert a value if the ``ctx`` and ``param`` + arguments are ``None``. This can occur when converting prompt + input. + """ + + is_composite: t.ClassVar[bool] = False + arity: t.ClassVar[int] = 1 + + #: the descriptive name of this type + name: str + + #: if a list of this type is expected and the value is pulled from a + #: string environment variable, this is what splits it up. `None` + #: means any whitespace. For all parameters the general rule is that + #: whitespace splits them up. The exception are paths and files which + #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on + #: Windows). + envvar_list_splitter: t.ClassVar[t.Optional[str]] = None + + def to_info_dict(self) -> t.Dict[str, t.Any]: + """Gather information that could be useful for a tool generating + user-facing documentation. + + Use :meth:`click.Context.to_info_dict` to traverse the entire + CLI structure. + + .. versionadded:: 8.0 + """ + # The class name without the "ParamType" suffix. + param_type = type(self).__name__.partition("ParamType")[0] + param_type = param_type.partition("ParameterType")[0] + + # Custom subclasses might not remember to set a name. + if hasattr(self, "name"): + name = self.name + else: + name = param_type + + return {"param_type": param_type, "name": name} + + def __call__( + self, + value: t.Any, + param: t.Optional["Parameter"] = None, + ctx: t.Optional["Context"] = None, + ) -> t.Any: + if value is not None: + return self.convert(value, param, ctx) + + def get_metavar(self, param: "Parameter") -> t.Optional[str]: + """Returns the metavar default for this param if it provides one.""" + + def get_missing_message(self, param: "Parameter") -> t.Optional[str]: + """Optionally might return extra information about a missing + parameter. + + .. versionadded:: 2.0 + """ + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + """Convert the value to the correct type. This is not called if + the value is ``None`` (the missing value). + + This must accept string values from the command line, as well as + values that are already the correct type. It may also convert + other compatible types. + + The ``param`` and ``ctx`` arguments may be ``None`` in certain + situations, such as when converting prompt input. + + If the value cannot be converted, call :meth:`fail` with a + descriptive message. + + :param value: The value to convert. + :param param: The parameter that is using this type to convert + its value. May be ``None``. + :param ctx: The current context that arrived at this value. May + be ``None``. + """ + return value + + def split_envvar_value(self, rv: str) -> t.Sequence[str]: + """Given a value from an environment variable this splits it up + into small chunks depending on the defined envvar list splitter. + + If the splitter is set to `None`, which means that whitespace splits, + then leading and trailing whitespace is ignored. Otherwise, leading + and trailing splitters usually lead to empty items being included. + """ + return (rv or "").split(self.envvar_list_splitter) + + def fail( + self, + message: str, + param: t.Optional["Parameter"] = None, + ctx: t.Optional["Context"] = None, + ) -> "t.NoReturn": + """Helper method to fail with an invalid value message.""" + raise BadParameter(message, ctx=ctx, param=param) + + def shell_complete( + self, ctx: "Context", param: "Parameter", incomplete: str + ) -> t.List["CompletionItem"]: + """Return a list of + :class:`~click.shell_completion.CompletionItem` objects for the + incomplete value. Most types do not provide completions, but + some do, and this allows custom types to provide custom + completions as well. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + return [] + + +class CompositeParamType(ParamType): + is_composite = True + + @property + def arity(self) -> int: # type: ignore + raise NotImplementedError() + + +class FuncParamType(ParamType): + def __init__(self, func: t.Callable[[t.Any], t.Any]) -> None: + self.name: str = func.__name__ + self.func = func + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["func"] = self.func + return info_dict + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + try: + return self.func(value) + except ValueError: + try: + value = str(value) + except UnicodeError: + value = value.decode("utf-8", "replace") + + self.fail(value, param, ctx) + + +class UnprocessedParamType(ParamType): + name = "text" + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + return value + + def __repr__(self) -> str: + return "UNPROCESSED" + + +class StringParamType(ParamType): + name = "text" + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + if isinstance(value, bytes): + enc = _get_argv_encoding() + try: + value = value.decode(enc) + except UnicodeError: + fs_enc = sys.getfilesystemencoding() + if fs_enc != enc: + try: + value = value.decode(fs_enc) + except UnicodeError: + value = value.decode("utf-8", "replace") + else: + value = value.decode("utf-8", "replace") + return value + return str(value) + + def __repr__(self) -> str: + return "STRING" + + +class Choice(ParamType): + """The choice type allows a value to be checked against a fixed set + of supported values. All of these values have to be strings. + + You should only pass a list or tuple of choices. Other iterables + (like generators) may lead to surprising results. + + The resulting value will always be one of the originally passed choices + regardless of ``case_sensitive`` or any ``ctx.token_normalize_func`` + being specified. + + See :ref:`choice-opts` for an example. + + :param case_sensitive: Set to false to make choices case + insensitive. Defaults to true. + """ + + name = "choice" + + def __init__(self, choices: t.Sequence[str], case_sensitive: bool = True) -> None: + self.choices = choices + self.case_sensitive = case_sensitive + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["choices"] = self.choices + info_dict["case_sensitive"] = self.case_sensitive + return info_dict + + def get_metavar(self, param: "Parameter") -> str: + choices_str = "|".join(self.choices) + + # Use curly braces to indicate a required argument. + if param.required and param.param_type_name == "argument": + return f"{{{choices_str}}}" + + # Use square braces to indicate an option or optional argument. + return f"[{choices_str}]" + + def get_missing_message(self, param: "Parameter") -> str: + return _("Choose from:\n\t{choices}").format(choices=",\n\t".join(self.choices)) + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + # Match through normalization and case sensitivity + # first do token_normalize_func, then lowercase + # preserve original `value` to produce an accurate message in + # `self.fail` + normed_value = value + normed_choices = {choice: choice for choice in self.choices} + + if ctx is not None and ctx.token_normalize_func is not None: + normed_value = ctx.token_normalize_func(value) + normed_choices = { + ctx.token_normalize_func(normed_choice): original + for normed_choice, original in normed_choices.items() + } + + if not self.case_sensitive: + normed_value = normed_value.casefold() + normed_choices = { + normed_choice.casefold(): original + for normed_choice, original in normed_choices.items() + } + + if normed_value in normed_choices: + return normed_choices[normed_value] + + choices_str = ", ".join(map(repr, self.choices)) + self.fail( + ngettext( + "{value!r} is not {choice}.", + "{value!r} is not one of {choices}.", + len(self.choices), + ).format(value=value, choice=choices_str, choices=choices_str), + param, + ctx, + ) + + def __repr__(self) -> str: + return f"Choice({list(self.choices)})" + + def shell_complete( + self, ctx: "Context", param: "Parameter", incomplete: str + ) -> t.List["CompletionItem"]: + """Complete choices that start with the incomplete value. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + str_choices = map(str, self.choices) + + if self.case_sensitive: + matched = (c for c in str_choices if c.startswith(incomplete)) + else: + incomplete = incomplete.lower() + matched = (c for c in str_choices if c.lower().startswith(incomplete)) + + return [CompletionItem(c) for c in matched] + + +class DateTime(ParamType): + """The DateTime type converts date strings into `datetime` objects. + + The format strings which are checked are configurable, but default to some + common (non-timezone aware) ISO 8601 formats. + + When specifying *DateTime* formats, you should only pass a list or a tuple. + Other iterables, like generators, may lead to surprising results. + + The format strings are processed using ``datetime.strptime``, and this + consequently defines the format strings which are allowed. + + Parsing is tried using each format, in order, and the first format which + parses successfully is used. + + :param formats: A list or tuple of date format strings, in the order in + which they should be tried. Defaults to + ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``, + ``'%Y-%m-%d %H:%M:%S'``. + """ + + name = "datetime" + + def __init__(self, formats: t.Optional[t.Sequence[str]] = None): + self.formats: t.Sequence[str] = formats or [ + "%Y-%m-%d", + "%Y-%m-%dT%H:%M:%S", + "%Y-%m-%d %H:%M:%S", + ] + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["formats"] = self.formats + return info_dict + + def get_metavar(self, param: "Parameter") -> str: + return f"[{'|'.join(self.formats)}]" + + def _try_to_convert_date(self, value: t.Any, format: str) -> t.Optional[datetime]: + try: + return datetime.strptime(value, format) + except ValueError: + return None + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + if isinstance(value, datetime): + return value + + for format in self.formats: + converted = self._try_to_convert_date(value, format) + + if converted is not None: + return converted + + formats_str = ", ".join(map(repr, self.formats)) + self.fail( + ngettext( + "{value!r} does not match the format {format}.", + "{value!r} does not match the formats {formats}.", + len(self.formats), + ).format(value=value, format=formats_str, formats=formats_str), + param, + ctx, + ) + + def __repr__(self) -> str: + return "DateTime" + + +class _NumberParamTypeBase(ParamType): + _number_class: t.ClassVar[t.Type[t.Any]] + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + try: + return self._number_class(value) + except ValueError: + self.fail( + _("{value!r} is not a valid {number_type}.").format( + value=value, number_type=self.name + ), + param, + ctx, + ) + + +class _NumberRangeBase(_NumberParamTypeBase): + def __init__( + self, + min: t.Optional[float] = None, + max: t.Optional[float] = None, + min_open: bool = False, + max_open: bool = False, + clamp: bool = False, + ) -> None: + self.min = min + self.max = max + self.min_open = min_open + self.max_open = max_open + self.clamp = clamp + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update( + min=self.min, + max=self.max, + min_open=self.min_open, + max_open=self.max_open, + clamp=self.clamp, + ) + return info_dict + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + import operator + + rv = super().convert(value, param, ctx) + lt_min: bool = self.min is not None and ( + operator.le if self.min_open else operator.lt + )(rv, self.min) + gt_max: bool = self.max is not None and ( + operator.ge if self.max_open else operator.gt + )(rv, self.max) + + if self.clamp: + if lt_min: + return self._clamp(self.min, 1, self.min_open) # type: ignore + + if gt_max: + return self._clamp(self.max, -1, self.max_open) # type: ignore + + if lt_min or gt_max: + self.fail( + _("{value} is not in the range {range}.").format( + value=rv, range=self._describe_range() + ), + param, + ctx, + ) + + return rv + + def _clamp(self, bound: float, dir: "te.Literal[1, -1]", open: bool) -> float: + """Find the valid value to clamp to bound in the given + direction. + + :param bound: The boundary value. + :param dir: 1 or -1 indicating the direction to move. + :param open: If true, the range does not include the bound. + """ + raise NotImplementedError + + def _describe_range(self) -> str: + """Describe the range for use in help text.""" + if self.min is None: + op = "<" if self.max_open else "<=" + return f"x{op}{self.max}" + + if self.max is None: + op = ">" if self.min_open else ">=" + return f"x{op}{self.min}" + + lop = "<" if self.min_open else "<=" + rop = "<" if self.max_open else "<=" + return f"{self.min}{lop}x{rop}{self.max}" + + def __repr__(self) -> str: + clamp = " clamped" if self.clamp else "" + return f"<{type(self).__name__} {self._describe_range()}{clamp}>" + + +class IntParamType(_NumberParamTypeBase): + name = "integer" + _number_class = int + + def __repr__(self) -> str: + return "INT" + + +class IntRange(_NumberRangeBase, IntParamType): + """Restrict an :data:`click.INT` value to a range of accepted + values. See :ref:`ranges`. + + If ``min`` or ``max`` are not passed, any value is accepted in that + direction. If ``min_open`` or ``max_open`` are enabled, the + corresponding boundary is not included in the range. + + If ``clamp`` is enabled, a value outside the range is clamped to the + boundary instead of failing. + + .. versionchanged:: 8.0 + Added the ``min_open`` and ``max_open`` parameters. + """ + + name = "integer range" + + def _clamp( # type: ignore + self, bound: int, dir: "te.Literal[1, -1]", open: bool + ) -> int: + if not open: + return bound + + return bound + dir + + +class FloatParamType(_NumberParamTypeBase): + name = "float" + _number_class = float + + def __repr__(self) -> str: + return "FLOAT" + + +class FloatRange(_NumberRangeBase, FloatParamType): + """Restrict a :data:`click.FLOAT` value to a range of accepted + values. See :ref:`ranges`. + + If ``min`` or ``max`` are not passed, any value is accepted in that + direction. If ``min_open`` or ``max_open`` are enabled, the + corresponding boundary is not included in the range. + + If ``clamp`` is enabled, a value outside the range is clamped to the + boundary instead of failing. This is not supported if either + boundary is marked ``open``. + + .. versionchanged:: 8.0 + Added the ``min_open`` and ``max_open`` parameters. + """ + + name = "float range" + + def __init__( + self, + min: t.Optional[float] = None, + max: t.Optional[float] = None, + min_open: bool = False, + max_open: bool = False, + clamp: bool = False, + ) -> None: + super().__init__( + min=min, max=max, min_open=min_open, max_open=max_open, clamp=clamp + ) + + if (min_open or max_open) and clamp: + raise TypeError("Clamping is not supported for open bounds.") + + def _clamp(self, bound: float, dir: "te.Literal[1, -1]", open: bool) -> float: + if not open: + return bound + + # Could use Python 3.9's math.nextafter here, but clamping an + # open float range doesn't seem to be particularly useful. It's + # left up to the user to write a callback to do it if needed. + raise RuntimeError("Clamping is not supported for open bounds.") + + +class BoolParamType(ParamType): + name = "boolean" + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + if value in {False, True}: + return bool(value) + + norm = value.strip().lower() + + if norm in {"1", "true", "t", "yes", "y", "on"}: + return True + + if norm in {"0", "false", "f", "no", "n", "off"}: + return False + + self.fail( + _("{value!r} is not a valid boolean.").format(value=value), param, ctx + ) + + def __repr__(self) -> str: + return "BOOL" + + +class UUIDParameterType(ParamType): + name = "uuid" + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + import uuid + + if isinstance(value, uuid.UUID): + return value + + value = value.strip() + + try: + return uuid.UUID(value) + except ValueError: + self.fail( + _("{value!r} is not a valid UUID.").format(value=value), param, ctx + ) + + def __repr__(self) -> str: + return "UUID" + + +class File(ParamType): + """Declares a parameter to be a file for reading or writing. The file + is automatically closed once the context tears down (after the command + finished working). + + Files can be opened for reading or writing. The special value ``-`` + indicates stdin or stdout depending on the mode. + + By default, the file is opened for reading text data, but it can also be + opened in binary mode or for writing. The encoding parameter can be used + to force a specific encoding. + + The `lazy` flag controls if the file should be opened immediately or upon + first IO. The default is to be non-lazy for standard input and output + streams as well as files opened for reading, `lazy` otherwise. When opening a + file lazily for reading, it is still opened temporarily for validation, but + will not be held open until first IO. lazy is mainly useful when opening + for writing to avoid creating the file until it is needed. + + Starting with Click 2.0, files can also be opened atomically in which + case all writes go into a separate file in the same folder and upon + completion the file will be moved over to the original location. This + is useful if a file regularly read by other users is modified. + + See :ref:`file-args` for more information. + """ + + name = "filename" + envvar_list_splitter: t.ClassVar[str] = os.path.pathsep + + def __init__( + self, + mode: str = "r", + encoding: t.Optional[str] = None, + errors: t.Optional[str] = "strict", + lazy: t.Optional[bool] = None, + atomic: bool = False, + ) -> None: + self.mode = mode + self.encoding = encoding + self.errors = errors + self.lazy = lazy + self.atomic = atomic + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update(mode=self.mode, encoding=self.encoding) + return info_dict + + def resolve_lazy_flag(self, value: "t.Union[str, os.PathLike[str]]") -> bool: + if self.lazy is not None: + return self.lazy + if os.fspath(value) == "-": + return False + elif "w" in self.mode: + return True + return False + + def convert( + self, + value: t.Union[str, "os.PathLike[str]", t.IO[t.Any]], + param: t.Optional["Parameter"], + ctx: t.Optional["Context"], + ) -> t.IO[t.Any]: + if _is_file_like(value): + return value + + value = t.cast("t.Union[str, os.PathLike[str]]", value) + + try: + lazy = self.resolve_lazy_flag(value) + + if lazy: + lf = LazyFile( + value, self.mode, self.encoding, self.errors, atomic=self.atomic + ) + + if ctx is not None: + ctx.call_on_close(lf.close_intelligently) + + return t.cast(t.IO[t.Any], lf) + + f, should_close = open_stream( + value, self.mode, self.encoding, self.errors, atomic=self.atomic + ) + + # If a context is provided, we automatically close the file + # at the end of the context execution (or flush out). If a + # context does not exist, it's the caller's responsibility to + # properly close the file. This for instance happens when the + # type is used with prompts. + if ctx is not None: + if should_close: + ctx.call_on_close(safecall(f.close)) + else: + ctx.call_on_close(safecall(f.flush)) + + return f + except OSError as e: # noqa: B014 + self.fail(f"'{format_filename(value)}': {e.strerror}", param, ctx) + + def shell_complete( + self, ctx: "Context", param: "Parameter", incomplete: str + ) -> t.List["CompletionItem"]: + """Return a special completion marker that tells the completion + system to use the shell to provide file path completions. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + return [CompletionItem(incomplete, type="file")] + + +def _is_file_like(value: t.Any) -> "te.TypeGuard[t.IO[t.Any]]": + return hasattr(value, "read") or hasattr(value, "write") + + +class Path(ParamType): + """The ``Path`` type is similar to the :class:`File` type, but + returns the filename instead of an open file. Various checks can be + enabled to validate the type of file and permissions. + + :param exists: The file or directory needs to exist for the value to + be valid. If this is not set to ``True``, and the file does not + exist, then all further checks are silently skipped. + :param file_okay: Allow a file as a value. + :param dir_okay: Allow a directory as a value. + :param readable: if true, a readable check is performed. + :param writable: if true, a writable check is performed. + :param executable: if true, an executable check is performed. + :param resolve_path: Make the value absolute and resolve any + symlinks. A ``~`` is not expanded, as this is supposed to be + done by the shell only. + :param allow_dash: Allow a single dash as a value, which indicates + a standard stream (but does not open it). Use + :func:`~click.open_file` to handle opening this value. + :param path_type: Convert the incoming path value to this type. If + ``None``, keep Python's default, which is ``str``. Useful to + convert to :class:`pathlib.Path`. + + .. versionchanged:: 8.1 + Added the ``executable`` parameter. + + .. versionchanged:: 8.0 + Allow passing ``path_type=pathlib.Path``. + + .. versionchanged:: 6.0 + Added the ``allow_dash`` parameter. + """ + + envvar_list_splitter: t.ClassVar[str] = os.path.pathsep + + def __init__( + self, + exists: bool = False, + file_okay: bool = True, + dir_okay: bool = True, + writable: bool = False, + readable: bool = True, + resolve_path: bool = False, + allow_dash: bool = False, + path_type: t.Optional[t.Type[t.Any]] = None, + executable: bool = False, + ): + self.exists = exists + self.file_okay = file_okay + self.dir_okay = dir_okay + self.readable = readable + self.writable = writable + self.executable = executable + self.resolve_path = resolve_path + self.allow_dash = allow_dash + self.type = path_type + + if self.file_okay and not self.dir_okay: + self.name: str = _("file") + elif self.dir_okay and not self.file_okay: + self.name = _("directory") + else: + self.name = _("path") + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update( + exists=self.exists, + file_okay=self.file_okay, + dir_okay=self.dir_okay, + writable=self.writable, + readable=self.readable, + allow_dash=self.allow_dash, + ) + return info_dict + + def coerce_path_result( + self, value: "t.Union[str, os.PathLike[str]]" + ) -> "t.Union[str, bytes, os.PathLike[str]]": + if self.type is not None and not isinstance(value, self.type): + if self.type is str: + return os.fsdecode(value) + elif self.type is bytes: + return os.fsencode(value) + else: + return t.cast("os.PathLike[str]", self.type(value)) + + return value + + def convert( + self, + value: "t.Union[str, os.PathLike[str]]", + param: t.Optional["Parameter"], + ctx: t.Optional["Context"], + ) -> "t.Union[str, bytes, os.PathLike[str]]": + rv = value + + is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-") + + if not is_dash: + if self.resolve_path: + # os.path.realpath doesn't resolve symlinks on Windows + # until Python 3.8. Use pathlib for now. + import pathlib + + rv = os.fsdecode(pathlib.Path(rv).resolve()) + + try: + st = os.stat(rv) + except OSError: + if not self.exists: + return self.coerce_path_result(rv) + self.fail( + _("{name} {filename!r} does not exist.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + if not self.file_okay and stat.S_ISREG(st.st_mode): + self.fail( + _("{name} {filename!r} is a file.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + if not self.dir_okay and stat.S_ISDIR(st.st_mode): + self.fail( + _("{name} '{filename}' is a directory.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + if self.readable and not os.access(rv, os.R_OK): + self.fail( + _("{name} {filename!r} is not readable.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + if self.writable and not os.access(rv, os.W_OK): + self.fail( + _("{name} {filename!r} is not writable.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + if self.executable and not os.access(value, os.X_OK): + self.fail( + _("{name} {filename!r} is not executable.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + return self.coerce_path_result(rv) + + def shell_complete( + self, ctx: "Context", param: "Parameter", incomplete: str + ) -> t.List["CompletionItem"]: + """Return a special completion marker that tells the completion + system to use the shell to provide path completions for only + directories or any paths. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + type = "dir" if self.dir_okay and not self.file_okay else "file" + return [CompletionItem(incomplete, type=type)] + + +class Tuple(CompositeParamType): + """The default behavior of Click is to apply a type on a value directly. + This works well in most cases, except for when `nargs` is set to a fixed + count and different types should be used for different items. In this + case the :class:`Tuple` type can be used. This type can only be used + if `nargs` is set to a fixed number. + + For more information see :ref:`tuple-type`. + + This can be selected by using a Python tuple literal as a type. + + :param types: a list of types that should be used for the tuple items. + """ + + def __init__(self, types: t.Sequence[t.Union[t.Type[t.Any], ParamType]]) -> None: + self.types: t.Sequence[ParamType] = [convert_type(ty) for ty in types] + + def to_info_dict(self) -> t.Dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["types"] = [t.to_info_dict() for t in self.types] + return info_dict + + @property + def name(self) -> str: # type: ignore + return f"<{' '.join(ty.name for ty in self.types)}>" + + @property + def arity(self) -> int: # type: ignore + return len(self.types) + + def convert( + self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"] + ) -> t.Any: + len_type = len(self.types) + len_value = len(value) + + if len_value != len_type: + self.fail( + ngettext( + "{len_type} values are required, but {len_value} was given.", + "{len_type} values are required, but {len_value} were given.", + len_value, + ).format(len_type=len_type, len_value=len_value), + param=param, + ctx=ctx, + ) + + return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value)) + + +def convert_type(ty: t.Optional[t.Any], default: t.Optional[t.Any] = None) -> ParamType: + """Find the most appropriate :class:`ParamType` for the given Python + type. If the type isn't provided, it can be inferred from a default + value. + """ + guessed_type = False + + if ty is None and default is not None: + if isinstance(default, (tuple, list)): + # If the default is empty, ty will remain None and will + # return STRING. + if default: + item = default[0] + + # A tuple of tuples needs to detect the inner types. + # Can't call convert recursively because that would + # incorrectly unwind the tuple to a single type. + if isinstance(item, (tuple, list)): + ty = tuple(map(type, item)) + else: + ty = type(item) + else: + ty = type(default) + + guessed_type = True + + if isinstance(ty, tuple): + return Tuple(ty) + + if isinstance(ty, ParamType): + return ty + + if ty is str or ty is None: + return STRING + + if ty is int: + return INT + + if ty is float: + return FLOAT + + if ty is bool: + return BOOL + + if guessed_type: + return STRING + + if __debug__: + try: + if issubclass(ty, ParamType): + raise AssertionError( + f"Attempted to use an uninstantiated parameter type ({ty})." + ) + except TypeError: + # ty is an instance (correct), so issubclass fails. + pass + + return FuncParamType(ty) + + +#: A dummy parameter type that just does nothing. From a user's +#: perspective this appears to just be the same as `STRING` but +#: internally no string conversion takes place if the input was bytes. +#: This is usually useful when working with file paths as they can +#: appear in bytes and unicode. +#: +#: For path related uses the :class:`Path` type is a better choice but +#: there are situations where an unprocessed type is useful which is why +#: it is is provided. +#: +#: .. versionadded:: 4.0 +UNPROCESSED = UnprocessedParamType() + +#: A unicode string parameter type which is the implicit default. This +#: can also be selected by using ``str`` as type. +STRING = StringParamType() + +#: An integer parameter. This can also be selected by using ``int`` as +#: type. +INT = IntParamType() + +#: A floating point value parameter. This can also be selected by using +#: ``float`` as type. +FLOAT = FloatParamType() + +#: A boolean parameter. This is the default for boolean flags. This can +#: also be selected by using ``bool`` as a type. +BOOL = BoolParamType() + +#: A UUID parameter. +UUID = UUIDParameterType() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/draw/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/draw/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4e3d4308b5e4be658f94a175631eadc62c84008b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/draw/__init__.py @@ -0,0 +1,27 @@ +# Natural Language Toolkit: graphical representations package +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# Import Tkinter-based modules if Tkinter is installed +try: + import tkinter +except ImportError: + import warnings + + warnings.warn("nltk.draw package not loaded (please install Tkinter library).") +else: + from nltk.draw.cfg import ProductionList, CFGEditor, CFGDemo + from nltk.draw.tree import ( + TreeSegmentWidget, + tree_to_treesegment, + TreeWidget, + TreeView, + draw_trees, + ) + from nltk.draw.table import Table + +from nltk.draw.dispersion import dispersion_plot diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4b571388eacb874ffd574290a613f8d5ba1e8ad Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/cfg.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/cfg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fa2c425ed565db9a97abcec8649428495b1fbb4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/cfg.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/dispersion.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/dispersion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..515c043b231f937e764ccfba1753cd92649fbcab Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/dispersion.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/table.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/table.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d9371d324e9d60558bae82d56922847540f9e31 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/table.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/tree.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2ffdd290405ff51481008ef7308357783b6a799 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/tree.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c984ec0c95fc5815422f05b9e4da4b0bf2220f9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/draw/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/draw/cfg.py b/llmeval-env/lib/python3.10/site-packages/nltk/draw/cfg.py new file mode 100644 index 0000000000000000000000000000000000000000..650162abf095d439cf7ca2ba3f0f36c81f0ed041 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/draw/cfg.py @@ -0,0 +1,859 @@ +# Natural Language Toolkit: CFG visualization +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Visualization tools for CFGs. +""" + +# Idea for a nice demo: +# - 3 panes: grammar, treelet, working area +# - grammar is a list of productions +# - when you select a production, the treelet that it licenses appears +# in the treelet area +# - the working area has the text on the bottom, and S at top. When +# you select a production, it shows (ghosted) the locations where +# that production's treelet could be attached to either the text +# or the tree rooted at S. +# - the user can drag the treelet onto one of those (or click on them?) +# - the user can delete pieces of the tree from the working area +# (right click?) +# - connecting top to bottom? drag one NP onto another? +# +# +-------------------------------------------------------------+ +# | S -> NP VP | S | +# |[NP -> Det N ]| / \ | +# | ... | NP VP | +# | N -> 'dog' | | +# | N -> 'cat' | | +# | ... | | +# +--------------+ | +# | NP | Det N | +# | / \ | | | | +# | Det N | the cat saw the dog | +# | | | +# +--------------+----------------------------------------------+ +# +# Operations: +# - connect a new treelet -- drag or click shadow +# - delete a treelet -- right click +# - if only connected to top, delete everything below +# - if only connected to bottom, delete everything above +# - connect top & bottom -- drag a leaf to a root or a root to a leaf +# - disconnect top & bottom -- right click +# - if connected to top & bottom, then disconnect + +import re +from tkinter import ( + Button, + Canvas, + Entry, + Frame, + IntVar, + Label, + Scrollbar, + Text, + Tk, + Toplevel, +) + +from nltk.draw.tree import TreeSegmentWidget, tree_to_treesegment +from nltk.draw.util import ( + CanvasFrame, + ColorizedList, + ShowText, + SymbolWidget, + TextWidget, +) +from nltk.grammar import CFG, Nonterminal, _read_cfg_production, nonterminals +from nltk.tree import Tree + +###################################################################### +# Production List +###################################################################### + + +class ProductionList(ColorizedList): + ARROW = SymbolWidget.SYMBOLS["rightarrow"] + + def _init_colortags(self, textwidget, options): + textwidget.tag_config("terminal", foreground="#006000") + textwidget.tag_config("arrow", font="symbol", underline="0") + textwidget.tag_config( + "nonterminal", foreground="blue", font=("helvetica", -12, "bold") + ) + + def _item_repr(self, item): + contents = [] + contents.append(("%s\t" % item.lhs(), "nonterminal")) + contents.append((self.ARROW, "arrow")) + for elt in item.rhs(): + if isinstance(elt, Nonterminal): + contents.append((" %s" % elt.symbol(), "nonterminal")) + else: + contents.append((" %r" % elt, "terminal")) + return contents + + +###################################################################### +# CFG Editor +###################################################################### + +_CFGEditor_HELP = """ + +The CFG Editor can be used to create or modify context free grammars. +A context free grammar consists of a start symbol and a list of +productions. The start symbol is specified by the text entry field in +the upper right hand corner of the editor; and the list of productions +are specified in the main text editing box. + +Every non-blank line specifies a single production. Each production +has the form "LHS -> RHS," where LHS is a single nonterminal, and RHS +is a list of nonterminals and terminals. + +Nonterminals must be a single word, such as S or NP or NP_subj. +Currently, nonterminals must consists of alphanumeric characters and +underscores (_). Nonterminals are colored blue. If you place the +mouse over any nonterminal, then all occurrences of that nonterminal +will be highlighted. + +Terminals must be surrounded by single quotes (') or double +quotes(\"). For example, "dog" and "New York" are terminals. +Currently, the string within the quotes must consist of alphanumeric +characters, underscores, and spaces. + +To enter a new production, go to a blank line, and type a nonterminal, +followed by an arrow (->), followed by a sequence of terminals and +nonterminals. Note that "->" (dash + greater-than) is automatically +converted to an arrow symbol. When you move your cursor to a +different line, your production will automatically be colorized. If +there are any errors, they will be highlighted in red. + +Note that the order of the productions is significant for some +algorithms. To re-order the productions, use cut and paste to move +them. + +Use the buttons at the bottom of the window when you are done editing +the CFG: + - Ok: apply the new CFG, and exit the editor. + - Apply: apply the new CFG, and do not exit the editor. + - Reset: revert to the original CFG, and do not exit the editor. + - Cancel: revert to the original CFG, and exit the editor. + +""" + + +class CFGEditor: + """ + A dialog window for creating and editing context free grammars. + ``CFGEditor`` imposes the following restrictions: + + - All nonterminals must be strings consisting of word + characters. + - All terminals must be strings consisting of word characters + and space characters. + """ + + # Regular expressions used by _analyze_line. Precompile them, so + # we can process the text faster. + ARROW = SymbolWidget.SYMBOLS["rightarrow"] + _LHS_RE = re.compile(r"(^\s*\w+\s*)(->|(" + ARROW + "))") + _ARROW_RE = re.compile(r"\s*(->|(" + ARROW + r"))\s*") + _PRODUCTION_RE = re.compile( + r"(^\s*\w+\s*)" + + "(->|(" # LHS + + ARROW + + r"))\s*" + + r"((\w+|'[\w ]*'|\"[\w ]*\"|\|)\s*)*$" # arrow + ) # RHS + _TOKEN_RE = re.compile("\\w+|->|'[\\w ]+'|\"[\\w ]+\"|(" + ARROW + ")") + _BOLD = ("helvetica", -12, "bold") + + def __init__(self, parent, cfg=None, set_cfg_callback=None): + self._parent = parent + if cfg is not None: + self._cfg = cfg + else: + self._cfg = CFG(Nonterminal("S"), []) + self._set_cfg_callback = set_cfg_callback + + self._highlight_matching_nonterminals = 1 + + # Create the top-level window. + self._top = Toplevel(parent) + self._init_bindings() + + self._init_startframe() + self._startframe.pack(side="top", fill="x", expand=0) + self._init_prodframe() + self._prodframe.pack(side="top", fill="both", expand=1) + self._init_buttons() + self._buttonframe.pack(side="bottom", fill="x", expand=0) + + self._textwidget.focus() + + def _init_startframe(self): + frame = self._startframe = Frame(self._top) + self._start = Entry(frame) + self._start.pack(side="right") + Label(frame, text="Start Symbol:").pack(side="right") + Label(frame, text="Productions:").pack(side="left") + self._start.insert(0, self._cfg.start().symbol()) + + def _init_buttons(self): + frame = self._buttonframe = Frame(self._top) + Button(frame, text="Ok", command=self._ok, underline=0, takefocus=0).pack( + side="left" + ) + Button(frame, text="Apply", command=self._apply, underline=0, takefocus=0).pack( + side="left" + ) + Button(frame, text="Reset", command=self._reset, underline=0, takefocus=0).pack( + side="left" + ) + Button( + frame, text="Cancel", command=self._cancel, underline=0, takefocus=0 + ).pack(side="left") + Button(frame, text="Help", command=self._help, underline=0, takefocus=0).pack( + side="right" + ) + + def _init_bindings(self): + self._top.title("CFG Editor") + self._top.bind("", self._cancel) + self._top.bind("", self._cancel) + self._top.bind("", self._cancel) + # self._top.bind('', self._cancel) + self._top.bind("", self._cancel) + self._top.bind("", self._cancel) + # self._top.bind('', self._cancel) + self._top.bind("", self._cancel) + + self._top.bind("", self._ok) + self._top.bind("", self._ok) + self._top.bind("", self._apply) + self._top.bind("", self._apply) + self._top.bind("", self._reset) + self._top.bind("", self._reset) + self._top.bind("", self._help) + self._top.bind("", self._help) + self._top.bind("", self._help) + + def _init_prodframe(self): + self._prodframe = Frame(self._top) + + # Create the basic Text widget & scrollbar. + self._textwidget = Text( + self._prodframe, background="#e0e0e0", exportselection=1 + ) + self._textscroll = Scrollbar(self._prodframe, takefocus=0, orient="vertical") + self._textwidget.config(yscrollcommand=self._textscroll.set) + self._textscroll.config(command=self._textwidget.yview) + self._textscroll.pack(side="right", fill="y") + self._textwidget.pack(expand=1, fill="both", side="left") + + # Initialize the colorization tags. Each nonterminal gets its + # own tag, so they aren't listed here. + self._textwidget.tag_config("terminal", foreground="#006000") + self._textwidget.tag_config("arrow", font="symbol") + self._textwidget.tag_config("error", background="red") + + # Keep track of what line they're on. We use that to remember + # to re-analyze a line whenever they leave it. + self._linenum = 0 + + # Expand "->" to an arrow. + self._top.bind(">", self._replace_arrows) + + # Re-colorize lines when appropriate. + self._top.bind("<>", self._analyze) + self._top.bind("", self._check_analyze) + self._top.bind("", self._check_analyze) + + # Tab cycles focus. (why doesn't this work??) + def cycle(e, textwidget=self._textwidget): + textwidget.tk_focusNext().focus() + + self._textwidget.bind("", cycle) + + prod_tuples = [(p.lhs(), [p.rhs()]) for p in self._cfg.productions()] + for i in range(len(prod_tuples) - 1, 0, -1): + if prod_tuples[i][0] == prod_tuples[i - 1][0]: + if () in prod_tuples[i][1]: + continue + if () in prod_tuples[i - 1][1]: + continue + print(prod_tuples[i - 1][1]) + print(prod_tuples[i][1]) + prod_tuples[i - 1][1].extend(prod_tuples[i][1]) + del prod_tuples[i] + + for lhs, rhss in prod_tuples: + print(lhs, rhss) + s = "%s ->" % lhs + for rhs in rhss: + for elt in rhs: + if isinstance(elt, Nonterminal): + s += " %s" % elt + else: + s += " %r" % elt + s += " |" + s = s[:-2] + "\n" + self._textwidget.insert("end", s) + + self._analyze() + + # # Add the producitons to the text widget, and colorize them. + # prod_by_lhs = {} + # for prod in self._cfg.productions(): + # if len(prod.rhs()) > 0: + # prod_by_lhs.setdefault(prod.lhs(),[]).append(prod) + # for (lhs, prods) in prod_by_lhs.items(): + # self._textwidget.insert('end', '%s ->' % lhs) + # self._textwidget.insert('end', self._rhs(prods[0])) + # for prod in prods[1:]: + # print '\t|'+self._rhs(prod), + # self._textwidget.insert('end', '\t|'+self._rhs(prod)) + # print + # self._textwidget.insert('end', '\n') + # for prod in self._cfg.productions(): + # if len(prod.rhs()) == 0: + # self._textwidget.insert('end', '%s' % prod) + # self._analyze() + + # def _rhs(self, prod): + # s = '' + # for elt in prod.rhs(): + # if isinstance(elt, Nonterminal): s += ' %s' % elt.symbol() + # else: s += ' %r' % elt + # return s + + def _clear_tags(self, linenum): + """ + Remove all tags (except ``arrow`` and ``sel``) from the given + line of the text widget used for editing the productions. + """ + start = "%d.0" % linenum + end = "%d.end" % linenum + for tag in self._textwidget.tag_names(): + if tag not in ("arrow", "sel"): + self._textwidget.tag_remove(tag, start, end) + + def _check_analyze(self, *e): + """ + Check if we've moved to a new line. If we have, then remove + all colorization from the line we moved to, and re-colorize + the line that we moved from. + """ + linenum = int(self._textwidget.index("insert").split(".")[0]) + if linenum != self._linenum: + self._clear_tags(linenum) + self._analyze_line(self._linenum) + self._linenum = linenum + + def _replace_arrows(self, *e): + """ + Replace any ``'->'`` text strings with arrows (char \\256, in + symbol font). This searches the whole buffer, but is fast + enough to be done anytime they press '>'. + """ + arrow = "1.0" + while True: + arrow = self._textwidget.search("->", arrow, "end+1char") + if arrow == "": + break + self._textwidget.delete(arrow, arrow + "+2char") + self._textwidget.insert(arrow, self.ARROW, "arrow") + self._textwidget.insert(arrow, "\t") + + arrow = "1.0" + while True: + arrow = self._textwidget.search(self.ARROW, arrow + "+1char", "end+1char") + if arrow == "": + break + self._textwidget.tag_add("arrow", arrow, arrow + "+1char") + + def _analyze_token(self, match, linenum): + """ + Given a line number and a regexp match for a token on that + line, colorize the token. Note that the regexp match gives us + the token's text, start index (on the line), and end index (on + the line). + """ + # What type of token is it? + if match.group()[0] in "'\"": + tag = "terminal" + elif match.group() in ("->", self.ARROW): + tag = "arrow" + else: + # If it's a nonterminal, then set up new bindings, so we + # can highlight all instances of that nonterminal when we + # put the mouse over it. + tag = "nonterminal_" + match.group() + if tag not in self._textwidget.tag_names(): + self._init_nonterminal_tag(tag) + + start = "%d.%d" % (linenum, match.start()) + end = "%d.%d" % (linenum, match.end()) + self._textwidget.tag_add(tag, start, end) + + def _init_nonterminal_tag(self, tag, foreground="blue"): + self._textwidget.tag_config(tag, foreground=foreground, font=CFGEditor._BOLD) + if not self._highlight_matching_nonterminals: + return + + def enter(e, textwidget=self._textwidget, tag=tag): + textwidget.tag_config(tag, background="#80ff80") + + def leave(e, textwidget=self._textwidget, tag=tag): + textwidget.tag_config(tag, background="") + + self._textwidget.tag_bind(tag, "", enter) + self._textwidget.tag_bind(tag, "", leave) + + def _analyze_line(self, linenum): + """ + Colorize a given line. + """ + # Get rid of any tags that were previously on the line. + self._clear_tags(linenum) + + # Get the line line's text string. + line = self._textwidget.get(repr(linenum) + ".0", repr(linenum) + ".end") + + # If it's a valid production, then colorize each token. + if CFGEditor._PRODUCTION_RE.match(line): + # It's valid; Use _TOKEN_RE to tokenize the production, + # and call analyze_token on each token. + def analyze_token(match, self=self, linenum=linenum): + self._analyze_token(match, linenum) + return "" + + CFGEditor._TOKEN_RE.sub(analyze_token, line) + elif line.strip() != "": + # It's invalid; show the user where the error is. + self._mark_error(linenum, line) + + def _mark_error(self, linenum, line): + """ + Mark the location of an error in a line. + """ + arrowmatch = CFGEditor._ARROW_RE.search(line) + if not arrowmatch: + # If there's no arrow at all, highlight the whole line. + start = "%d.0" % linenum + end = "%d.end" % linenum + elif not CFGEditor._LHS_RE.match(line): + # Otherwise, if the LHS is bad, highlight it. + start = "%d.0" % linenum + end = "%d.%d" % (linenum, arrowmatch.start()) + else: + # Otherwise, highlight the RHS. + start = "%d.%d" % (linenum, arrowmatch.end()) + end = "%d.end" % linenum + + # If we're highlighting 0 chars, highlight the whole line. + if self._textwidget.compare(start, "==", end): + start = "%d.0" % linenum + end = "%d.end" % linenum + self._textwidget.tag_add("error", start, end) + + def _analyze(self, *e): + """ + Replace ``->`` with arrows, and colorize the entire buffer. + """ + self._replace_arrows() + numlines = int(self._textwidget.index("end").split(".")[0]) + for linenum in range(1, numlines + 1): # line numbers start at 1. + self._analyze_line(linenum) + + def _parse_productions(self): + """ + Parse the current contents of the textwidget buffer, to create + a list of productions. + """ + productions = [] + + # Get the text, normalize it, and split it into lines. + text = self._textwidget.get("1.0", "end") + text = re.sub(self.ARROW, "->", text) + text = re.sub("\t", " ", text) + lines = text.split("\n") + + # Convert each line to a CFG production + for line in lines: + line = line.strip() + if line == "": + continue + productions += _read_cfg_production(line) + # if line.strip() == '': continue + # if not CFGEditor._PRODUCTION_RE.match(line): + # raise ValueError('Bad production string %r' % line) + # + # (lhs_str, rhs_str) = line.split('->') + # lhs = Nonterminal(lhs_str.strip()) + # rhs = [] + # def parse_token(match, rhs=rhs): + # token = match.group() + # if token[0] in "'\"": rhs.append(token[1:-1]) + # else: rhs.append(Nonterminal(token)) + # return '' + # CFGEditor._TOKEN_RE.sub(parse_token, rhs_str) + # + # productions.append(Production(lhs, *rhs)) + + return productions + + def _destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + def _ok(self, *e): + self._apply() + self._destroy() + + def _apply(self, *e): + productions = self._parse_productions() + start = Nonterminal(self._start.get()) + cfg = CFG(start, productions) + if self._set_cfg_callback is not None: + self._set_cfg_callback(cfg) + + def _reset(self, *e): + self._textwidget.delete("1.0", "end") + for production in self._cfg.productions(): + self._textwidget.insert("end", "%s\n" % production) + self._analyze() + if self._set_cfg_callback is not None: + self._set_cfg_callback(self._cfg) + + def _cancel(self, *e): + try: + self._reset() + except: + pass + self._destroy() + + def _help(self, *e): + # The default font's not very legible; try using 'fixed' instead. + try: + ShowText( + self._parent, + "Help: Chart Parser Demo", + (_CFGEditor_HELP).strip(), + width=75, + font="fixed", + ) + except: + ShowText( + self._parent, + "Help: Chart Parser Demo", + (_CFGEditor_HELP).strip(), + width=75, + ) + + +###################################################################### +# New Demo (built tree based on cfg) +###################################################################### + + +class CFGDemo: + def __init__(self, grammar, text): + self._grammar = grammar + self._text = text + + # Set up the main window. + self._top = Tk() + self._top.title("Context Free Grammar Demo") + + # Base font size + self._size = IntVar(self._top) + self._size.set(12) # = medium + + # Set up the key bindings + self._init_bindings(self._top) + + # Create the basic frames + frame1 = Frame(self._top) + frame1.pack(side="left", fill="y", expand=0) + self._init_menubar(self._top) + self._init_buttons(self._top) + self._init_grammar(frame1) + self._init_treelet(frame1) + self._init_workspace(self._top) + + # ////////////////////////////////////////////////// + # Initialization + # ////////////////////////////////////////////////// + + def _init_bindings(self, top): + top.bind("", self.destroy) + + def _init_menubar(self, parent): + pass + + def _init_buttons(self, parent): + pass + + def _init_grammar(self, parent): + self._prodlist = ProductionList(parent, self._grammar, width=20) + self._prodlist.pack(side="top", fill="both", expand=1) + self._prodlist.focus() + self._prodlist.add_callback("select", self._selectprod_cb) + self._prodlist.add_callback("move", self._selectprod_cb) + + def _init_treelet(self, parent): + self._treelet_canvas = Canvas(parent, background="white") + self._treelet_canvas.pack(side="bottom", fill="x") + self._treelet = None + + def _init_workspace(self, parent): + self._workspace = CanvasFrame(parent, background="white") + self._workspace.pack(side="right", fill="both", expand=1) + self._tree = None + self.reset_workspace() + + # ////////////////////////////////////////////////// + # Workspace + # ////////////////////////////////////////////////// + + def reset_workspace(self): + c = self._workspace.canvas() + fontsize = int(self._size.get()) + node_font = ("helvetica", -(fontsize + 4), "bold") + leaf_font = ("helvetica", -(fontsize + 2)) + + # Remove the old tree + if self._tree is not None: + self._workspace.remove_widget(self._tree) + + # The root of the tree. + start = self._grammar.start().symbol() + rootnode = TextWidget(c, start, font=node_font, draggable=1) + + # The leaves of the tree. + leaves = [] + for word in self._text: + leaves.append(TextWidget(c, word, font=leaf_font, draggable=1)) + + # Put it all together into one tree + self._tree = TreeSegmentWidget(c, rootnode, leaves, color="white") + + # Add it to the workspace. + self._workspace.add_widget(self._tree) + + # Move the leaves to the bottom of the workspace. + for leaf in leaves: + leaf.move(0, 100) + + # self._nodes = {start:1} + # self._leaves = dict([(l,1) for l in leaves]) + + def workspace_markprod(self, production): + pass + + def _markproduction(self, prod, tree=None): + if tree is None: + tree = self._tree + for i in range(len(tree.subtrees()) - len(prod.rhs())): + if tree["color", i] == "white": + self._markproduction # FIXME: Is this necessary at all? + + for j, node in enumerate(prod.rhs()): + widget = tree.subtrees()[i + j] + if ( + isinstance(node, Nonterminal) + and isinstance(widget, TreeSegmentWidget) + and node.symbol == widget.label().text() + ): + pass # matching nonterminal + elif ( + isinstance(node, str) + and isinstance(widget, TextWidget) + and node == widget.text() + ): + pass # matching nonterminal + else: + break + else: + # Everything matched! + print("MATCH AT", i) + + # ////////////////////////////////////////////////// + # Grammar + # ////////////////////////////////////////////////// + + def _selectprod_cb(self, production): + canvas = self._treelet_canvas + + self._prodlist.highlight(production) + if self._treelet is not None: + self._treelet.destroy() + + # Convert the production to a tree. + rhs = production.rhs() + for (i, elt) in enumerate(rhs): + if isinstance(elt, Nonterminal): + elt = Tree(elt) + tree = Tree(production.lhs().symbol(), *rhs) + + # Draw the tree in the treelet area. + fontsize = int(self._size.get()) + node_font = ("helvetica", -(fontsize + 4), "bold") + leaf_font = ("helvetica", -(fontsize + 2)) + self._treelet = tree_to_treesegment( + canvas, tree, node_font=node_font, leaf_font=leaf_font + ) + self._treelet["draggable"] = 1 + + # Center the treelet. + (x1, y1, x2, y2) = self._treelet.bbox() + w, h = int(canvas["width"]), int(canvas["height"]) + self._treelet.move((w - x1 - x2) / 2, (h - y1 - y2) / 2) + + # Mark the places where we can add it to the workspace. + self._markproduction(production) + + def destroy(self, *args): + self._top.destroy() + + def mainloop(self, *args, **kwargs): + self._top.mainloop(*args, **kwargs) + + +def demo2(): + from nltk import CFG, Nonterminal, Production + + nonterminals = "S VP NP PP P N Name V Det" + (S, VP, NP, PP, P, N, Name, V, Det) = (Nonterminal(s) for s in nonterminals.split()) + productions = ( + # Syntactic Productions + Production(S, [NP, VP]), + Production(NP, [Det, N]), + Production(NP, [NP, PP]), + Production(VP, [VP, PP]), + Production(VP, [V, NP, PP]), + Production(VP, [V, NP]), + Production(PP, [P, NP]), + Production(PP, []), + Production(PP, ["up", "over", NP]), + # Lexical Productions + Production(NP, ["I"]), + Production(Det, ["the"]), + Production(Det, ["a"]), + Production(N, ["man"]), + Production(V, ["saw"]), + Production(P, ["in"]), + Production(P, ["with"]), + Production(N, ["park"]), + Production(N, ["dog"]), + Production(N, ["statue"]), + Production(Det, ["my"]), + ) + grammar = CFG(S, productions) + + text = "I saw a man in the park".split() + d = CFGDemo(grammar, text) + d.mainloop() + + +###################################################################### +# Old Demo +###################################################################### + + +def demo(): + from nltk import CFG, Nonterminal + + nonterminals = "S VP NP PP P N Name V Det" + (S, VP, NP, PP, P, N, Name, V, Det) = (Nonterminal(s) for s in nonterminals.split()) + + grammar = CFG.fromstring( + """ + S -> NP VP + PP -> P NP + NP -> Det N + NP -> NP PP + VP -> V NP + VP -> VP PP + Det -> 'a' + Det -> 'the' + Det -> 'my' + NP -> 'I' + N -> 'dog' + N -> 'man' + N -> 'park' + N -> 'statue' + V -> 'saw' + P -> 'in' + P -> 'up' + P -> 'over' + P -> 'with' + """ + ) + + def cb(grammar): + print(grammar) + + top = Tk() + editor = CFGEditor(top, grammar, cb) + Label(top, text="\nTesting CFG Editor\n").pack() + Button(top, text="Quit", command=top.destroy).pack() + top.mainloop() + + +def demo3(): + from nltk import Production + + (S, VP, NP, PP, P, N, Name, V, Det) = nonterminals( + "S, VP, NP, PP, P, N, Name, V, Det" + ) + + productions = ( + # Syntactic Productions + Production(S, [NP, VP]), + Production(NP, [Det, N]), + Production(NP, [NP, PP]), + Production(VP, [VP, PP]), + Production(VP, [V, NP, PP]), + Production(VP, [V, NP]), + Production(PP, [P, NP]), + Production(PP, []), + Production(PP, ["up", "over", NP]), + # Lexical Productions + Production(NP, ["I"]), + Production(Det, ["the"]), + Production(Det, ["a"]), + Production(N, ["man"]), + Production(V, ["saw"]), + Production(P, ["in"]), + Production(P, ["with"]), + Production(N, ["park"]), + Production(N, ["dog"]), + Production(N, ["statue"]), + Production(Det, ["my"]), + ) + + t = Tk() + + def destroy(e, t=t): + t.destroy() + + t.bind("q", destroy) + p = ProductionList(t, productions) + p.pack(expand=1, fill="both") + p.add_callback("select", p.markonly) + p.add_callback("move", p.markonly) + p.focus() + p.mark(productions[2]) + p.mark(productions[8]) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/draw/dispersion.py b/llmeval-env/lib/python3.10/site-packages/nltk/draw/dispersion.py new file mode 100644 index 0000000000000000000000000000000000000000..0991194dc42e1c258b6e62c3e8dfb71d44bb3ce6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/draw/dispersion.py @@ -0,0 +1,63 @@ +# Natural Language Toolkit: Dispersion Plots +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +A utility for displaying lexical dispersion. +""" + + +def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot"): + """ + Generate a lexical dispersion plot. + + :param text: The source text + :type text: list(str) or iter(str) + :param words: The target words + :type words: list of str + :param ignore_case: flag to set if case should be ignored when searching text + :type ignore_case: bool + :return: a matplotlib Axes object that may still be modified before plotting + :rtype: Axes + """ + + try: + import matplotlib.pyplot as plt + except ImportError as e: + raise ImportError( + "The plot function requires matplotlib to be installed. " + "See https://matplotlib.org/" + ) from e + + word2y = { + word.casefold() if ignore_case else word: y + for y, word in enumerate(reversed(words)) + } + xs, ys = [], [] + for x, token in enumerate(text): + token = token.casefold() if ignore_case else token + y = word2y.get(token) + if y is not None: + xs.append(x) + ys.append(y) + + _, ax = plt.subplots() + ax.plot(xs, ys, "|") + ax.set_yticks(list(range(len(words))), words, color="C0") + ax.set_ylim(-1, len(words)) + ax.set_title(title) + ax.set_xlabel("Word Offset") + return ax + + +if __name__ == "__main__": + import matplotlib.pyplot as plt + + from nltk.corpus import gutenberg + + words = ["Elinor", "Marianne", "Edward", "Willoughby"] + dispersion_plot(gutenberg.words("austen-sense.txt"), words) + plt.show() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/draw/table.py b/llmeval-env/lib/python3.10/site-packages/nltk/draw/table.py new file mode 100644 index 0000000000000000000000000000000000000000..0d3526d5f1bf223684a1293dd5ff32ef6cbbbf55 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/draw/table.py @@ -0,0 +1,1177 @@ +# Natural Language Toolkit: Table widget +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Tkinter widgets for displaying multi-column listboxes and tables. +""" + +import operator +from tkinter import Frame, Label, Listbox, Scrollbar, Tk + +###################################################################### +# Multi-Column Listbox +###################################################################### + + +class MultiListbox(Frame): + """ + A multi-column listbox, where the current selection applies to an + entire row. Based on the MultiListbox Tkinter widget + recipe from the Python Cookbook (https://code.activestate.com/recipes/52266/) + + For the most part, ``MultiListbox`` methods delegate to its + contained listboxes. For any methods that do not have docstrings, + see ``Tkinter.Listbox`` for a description of what that method does. + """ + + # ///////////////////////////////////////////////////////////////// + # Configuration + # ///////////////////////////////////////////////////////////////// + + #: Default configuration values for the frame. + FRAME_CONFIG = dict(background="#888", takefocus=True, highlightthickness=1) + + #: Default configurations for the column labels. + LABEL_CONFIG = dict( + borderwidth=1, + relief="raised", + font="helvetica -16 bold", + background="#444", + foreground="white", + ) + + #: Default configuration for the column listboxes. + LISTBOX_CONFIG = dict( + borderwidth=1, + selectborderwidth=0, + highlightthickness=0, + exportselection=False, + selectbackground="#888", + activestyle="none", + takefocus=False, + ) + + # ///////////////////////////////////////////////////////////////// + # Constructor + # ///////////////////////////////////////////////////////////////// + + def __init__(self, master, columns, column_weights=None, cnf={}, **kw): + """ + Construct a new multi-column listbox widget. + + :param master: The widget that should contain the new + multi-column listbox. + + :param columns: Specifies what columns should be included in + the new multi-column listbox. If ``columns`` is an integer, + then it is the number of columns to include. If it is + a list, then its length indicates the number of columns + to include; and each element of the list will be used as + a label for the corresponding column. + + :param cnf, kw: Configuration parameters for this widget. + Use ``label_*`` to configure all labels; and ``listbox_*`` + to configure all listboxes. E.g.: + >>> root = Tk() # doctest: +SKIP + >>> MultiListbox(root, ["Subject", "Sender", "Date"], label_foreground='red').pack() # doctest: +SKIP + """ + # If columns was specified as an int, convert it to a list. + if isinstance(columns, int): + columns = list(range(columns)) + include_labels = False + else: + include_labels = True + + if len(columns) == 0: + raise ValueError("Expected at least one column") + + # Instance variables + self._column_names = tuple(columns) + self._listboxes = [] + self._labels = [] + + # Pick a default value for column_weights, if none was specified. + if column_weights is None: + column_weights = [1] * len(columns) + elif len(column_weights) != len(columns): + raise ValueError("Expected one column_weight for each column") + self._column_weights = column_weights + + # Configure our widgets. + Frame.__init__(self, master, **self.FRAME_CONFIG) + self.grid_rowconfigure(1, weight=1) + for i, label in enumerate(self._column_names): + self.grid_columnconfigure(i, weight=column_weights[i]) + + # Create a label for the column + if include_labels: + l = Label(self, text=label, **self.LABEL_CONFIG) + self._labels.append(l) + l.grid(column=i, row=0, sticky="news", padx=0, pady=0) + l.column_index = i + + # Create a listbox for the column + lb = Listbox(self, **self.LISTBOX_CONFIG) + self._listboxes.append(lb) + lb.grid(column=i, row=1, sticky="news", padx=0, pady=0) + lb.column_index = i + + # Clicking or dragging selects: + lb.bind("", self._select) + lb.bind("", self._select) + # Scroll wheel scrolls: + lb.bind("", lambda e: self._scroll(-1)) + lb.bind("", lambda e: self._scroll(+1)) + lb.bind("", lambda e: self._scroll(e.delta)) + # Button 2 can be used to scan: + lb.bind("", lambda e: self.scan_mark(e.x, e.y)) + lb.bind("", lambda e: self.scan_dragto(e.x, e.y)) + # Dragging outside the window has no effect (disable + # the default listbox behavior, which scrolls): + lb.bind("", lambda e: "break") + # Columns can be resized by dragging them: + lb.bind("", self._resize_column) + + # Columns can be resized by dragging them. (This binding is + # used if they click on the grid between columns:) + self.bind("", self._resize_column) + + # Set up key bindings for the widget: + self.bind("", lambda e: self.select(delta=-1)) + self.bind("", lambda e: self.select(delta=1)) + self.bind("", lambda e: self.select(delta=-self._pagesize())) + self.bind("", lambda e: self.select(delta=self._pagesize())) + + # Configuration customizations + self.configure(cnf, **kw) + + # ///////////////////////////////////////////////////////////////// + # Column Resizing + # ///////////////////////////////////////////////////////////////// + + def _resize_column(self, event): + """ + Callback used to resize a column of the table. Return ``True`` + if the column is actually getting resized (if the user clicked + on the far left or far right 5 pixels of a label); and + ``False`` otherwies. + """ + # If we're already waiting for a button release, then ignore + # the new button press. + if event.widget.bind(""): + return False + + # Decide which column (if any) to resize. + self._resize_column_index = None + if event.widget is self: + for i, lb in enumerate(self._listboxes): + if abs(event.x - (lb.winfo_x() + lb.winfo_width())) < 10: + self._resize_column_index = i + elif event.x > (event.widget.winfo_width() - 5): + self._resize_column_index = event.widget.column_index + elif event.x < 5 and event.widget.column_index != 0: + self._resize_column_index = event.widget.column_index - 1 + + # Bind callbacks that are used to resize it. + if self._resize_column_index is not None: + event.widget.bind("", self._resize_column_motion_cb) + event.widget.bind( + "" % event.num, self._resize_column_buttonrelease_cb + ) + return True + else: + return False + + def _resize_column_motion_cb(self, event): + lb = self._listboxes[self._resize_column_index] + charwidth = lb.winfo_width() / lb["width"] + + x1 = event.x + event.widget.winfo_x() + x2 = lb.winfo_x() + lb.winfo_width() + + lb["width"] = max(3, lb["width"] + (x1 - x2) // charwidth) + + def _resize_column_buttonrelease_cb(self, event): + event.widget.unbind("" % event.num) + event.widget.unbind("") + + # ///////////////////////////////////////////////////////////////// + # Properties + # ///////////////////////////////////////////////////////////////// + + @property + def column_names(self): + """ + A tuple containing the names of the columns used by this + multi-column listbox. + """ + return self._column_names + + @property + def column_labels(self): + """ + A tuple containing the ``Tkinter.Label`` widgets used to + display the label of each column. If this multi-column + listbox was created without labels, then this will be an empty + tuple. These widgets will all be augmented with a + ``column_index`` attribute, which can be used to determine + which column they correspond to. This can be convenient, + e.g., when defining callbacks for bound events. + """ + return tuple(self._labels) + + @property + def listboxes(self): + """ + A tuple containing the ``Tkinter.Listbox`` widgets used to + display individual columns. These widgets will all be + augmented with a ``column_index`` attribute, which can be used + to determine which column they correspond to. This can be + convenient, e.g., when defining callbacks for bound events. + """ + return tuple(self._listboxes) + + # ///////////////////////////////////////////////////////////////// + # Mouse & Keyboard Callback Functions + # ///////////////////////////////////////////////////////////////// + + def _select(self, e): + i = e.widget.nearest(e.y) + self.selection_clear(0, "end") + self.selection_set(i) + self.activate(i) + self.focus() + + def _scroll(self, delta): + for lb in self._listboxes: + lb.yview_scroll(delta, "unit") + return "break" + + def _pagesize(self): + """:return: The number of rows that makes up one page""" + return int(self.index("@0,1000000")) - int(self.index("@0,0")) + + # ///////////////////////////////////////////////////////////////// + # Row selection + # ///////////////////////////////////////////////////////////////// + + def select(self, index=None, delta=None, see=True): + """ + Set the selected row. If ``index`` is specified, then select + row ``index``. Otherwise, if ``delta`` is specified, then move + the current selection by ``delta`` (negative numbers for up, + positive numbers for down). This will not move the selection + past the top or the bottom of the list. + + :param see: If true, then call ``self.see()`` with the newly + selected index, to ensure that it is visible. + """ + if (index is not None) and (delta is not None): + raise ValueError("specify index or delta, but not both") + + # If delta was given, then calculate index. + if delta is not None: + if len(self.curselection()) == 0: + index = -1 + delta + else: + index = int(self.curselection()[0]) + delta + + # Clear all selected rows. + self.selection_clear(0, "end") + + # Select the specified index + if index is not None: + index = min(max(index, 0), self.size() - 1) + # self.activate(index) + self.selection_set(index) + if see: + self.see(index) + + # ///////////////////////////////////////////////////////////////// + # Configuration + # ///////////////////////////////////////////////////////////////// + + def configure(self, cnf={}, **kw): + """ + Configure this widget. Use ``label_*`` to configure all + labels; and ``listbox_*`` to configure all listboxes. E.g.: + + >>> master = Tk() # doctest: +SKIP + >>> mlb = MultiListbox(master, 5) # doctest: +SKIP + >>> mlb.configure(label_foreground='red') # doctest: +SKIP + >>> mlb.configure(listbox_foreground='red') # doctest: +SKIP + """ + cnf = dict(list(cnf.items()) + list(kw.items())) + for (key, val) in list(cnf.items()): + if key.startswith("label_") or key.startswith("label-"): + for label in self._labels: + label.configure({key[6:]: val}) + elif key.startswith("listbox_") or key.startswith("listbox-"): + for listbox in self._listboxes: + listbox.configure({key[8:]: val}) + else: + Frame.configure(self, {key: val}) + + def __setitem__(self, key, val): + """ + Configure this widget. This is equivalent to + ``self.configure({key,val``)}. See ``configure()``. + """ + self.configure({key: val}) + + def rowconfigure(self, row_index, cnf={}, **kw): + """ + Configure all table cells in the given row. Valid keyword + arguments are: ``background``, ``bg``, ``foreground``, ``fg``, + ``selectbackground``, ``selectforeground``. + """ + for lb in self._listboxes: + lb.itemconfigure(row_index, cnf, **kw) + + def columnconfigure(self, col_index, cnf={}, **kw): + """ + Configure all table cells in the given column. Valid keyword + arguments are: ``background``, ``bg``, ``foreground``, ``fg``, + ``selectbackground``, ``selectforeground``. + """ + lb = self._listboxes[col_index] + + cnf = dict(list(cnf.items()) + list(kw.items())) + for (key, val) in list(cnf.items()): + if key in ( + "background", + "bg", + "foreground", + "fg", + "selectbackground", + "selectforeground", + ): + for i in range(lb.size()): + lb.itemconfigure(i, {key: val}) + else: + lb.configure({key: val}) + + def itemconfigure(self, row_index, col_index, cnf=None, **kw): + """ + Configure the table cell at the given row and column. Valid + keyword arguments are: ``background``, ``bg``, ``foreground``, + ``fg``, ``selectbackground``, ``selectforeground``. + """ + lb = self._listboxes[col_index] + return lb.itemconfigure(row_index, cnf, **kw) + + # ///////////////////////////////////////////////////////////////// + # Value Access + # ///////////////////////////////////////////////////////////////// + + def insert(self, index, *rows): + """ + Insert the given row or rows into the table, at the given + index. Each row value should be a tuple of cell values, one + for each column in the row. Index may be an integer or any of + the special strings (such as ``'end'``) accepted by + ``Tkinter.Listbox``. + """ + for elt in rows: + if len(elt) != len(self._column_names): + raise ValueError( + "rows should be tuples whose length " + "is equal to the number of columns" + ) + for (lb, elts) in zip(self._listboxes, list(zip(*rows))): + lb.insert(index, *elts) + + def get(self, first, last=None): + """ + Return the value(s) of the specified row(s). If ``last`` is + not specified, then return a single row value; otherwise, + return a list of row values. Each row value is a tuple of + cell values, one for each column in the row. + """ + values = [lb.get(first, last) for lb in self._listboxes] + if last: + return [tuple(row) for row in zip(*values)] + else: + return tuple(values) + + def bbox(self, row, col): + """ + Return the bounding box for the given table cell, relative to + this widget's top-left corner. The bounding box is a tuple + of integers ``(left, top, width, height)``. + """ + dx, dy, _, _ = self.grid_bbox(row=0, column=col) + x, y, w, h = self._listboxes[col].bbox(row) + return int(x) + int(dx), int(y) + int(dy), int(w), int(h) + + # ///////////////////////////////////////////////////////////////// + # Hide/Show Columns + # ///////////////////////////////////////////////////////////////// + + def hide_column(self, col_index): + """ + Hide the given column. The column's state is still + maintained: its values will still be returned by ``get()``, and + you must supply its values when calling ``insert()``. It is + safe to call this on a column that is already hidden. + + :see: ``show_column()`` + """ + if self._labels: + self._labels[col_index].grid_forget() + self.listboxes[col_index].grid_forget() + self.grid_columnconfigure(col_index, weight=0) + + def show_column(self, col_index): + """ + Display a column that has been hidden using ``hide_column()``. + It is safe to call this on a column that is not hidden. + """ + weight = self._column_weights[col_index] + if self._labels: + self._labels[col_index].grid( + column=col_index, row=0, sticky="news", padx=0, pady=0 + ) + self._listboxes[col_index].grid( + column=col_index, row=1, sticky="news", padx=0, pady=0 + ) + self.grid_columnconfigure(col_index, weight=weight) + + # ///////////////////////////////////////////////////////////////// + # Binding Methods + # ///////////////////////////////////////////////////////////////// + + def bind_to_labels(self, sequence=None, func=None, add=None): + """ + Add a binding to each ``Tkinter.Label`` widget in this + mult-column listbox that will call ``func`` in response to the + event sequence. + + :return: A list of the identifiers of replaced binding + functions (if any), allowing for their deletion (to + prevent a memory leak). + """ + return [label.bind(sequence, func, add) for label in self.column_labels] + + def bind_to_listboxes(self, sequence=None, func=None, add=None): + """ + Add a binding to each ``Tkinter.Listbox`` widget in this + mult-column listbox that will call ``func`` in response to the + event sequence. + + :return: A list of the identifiers of replaced binding + functions (if any), allowing for their deletion (to + prevent a memory leak). + """ + for listbox in self.listboxes: + listbox.bind(sequence, func, add) + + def bind_to_columns(self, sequence=None, func=None, add=None): + """ + Add a binding to each ``Tkinter.Label`` and ``Tkinter.Listbox`` + widget in this mult-column listbox that will call ``func`` in + response to the event sequence. + + :return: A list of the identifiers of replaced binding + functions (if any), allowing for their deletion (to + prevent a memory leak). + """ + return self.bind_to_labels(sequence, func, add) + self.bind_to_listboxes( + sequence, func, add + ) + + # ///////////////////////////////////////////////////////////////// + # Simple Delegation + # ///////////////////////////////////////////////////////////////// + + # These methods delegate to the first listbox: + def curselection(self, *args, **kwargs): + return self._listboxes[0].curselection(*args, **kwargs) + + def selection_includes(self, *args, **kwargs): + return self._listboxes[0].selection_includes(*args, **kwargs) + + def itemcget(self, *args, **kwargs): + return self._listboxes[0].itemcget(*args, **kwargs) + + def size(self, *args, **kwargs): + return self._listboxes[0].size(*args, **kwargs) + + def index(self, *args, **kwargs): + return self._listboxes[0].index(*args, **kwargs) + + def nearest(self, *args, **kwargs): + return self._listboxes[0].nearest(*args, **kwargs) + + # These methods delegate to each listbox (and return None): + def activate(self, *args, **kwargs): + for lb in self._listboxes: + lb.activate(*args, **kwargs) + + def delete(self, *args, **kwargs): + for lb in self._listboxes: + lb.delete(*args, **kwargs) + + def scan_mark(self, *args, **kwargs): + for lb in self._listboxes: + lb.scan_mark(*args, **kwargs) + + def scan_dragto(self, *args, **kwargs): + for lb in self._listboxes: + lb.scan_dragto(*args, **kwargs) + + def see(self, *args, **kwargs): + for lb in self._listboxes: + lb.see(*args, **kwargs) + + def selection_anchor(self, *args, **kwargs): + for lb in self._listboxes: + lb.selection_anchor(*args, **kwargs) + + def selection_clear(self, *args, **kwargs): + for lb in self._listboxes: + lb.selection_clear(*args, **kwargs) + + def selection_set(self, *args, **kwargs): + for lb in self._listboxes: + lb.selection_set(*args, **kwargs) + + def yview(self, *args, **kwargs): + for lb in self._listboxes: + v = lb.yview(*args, **kwargs) + return v # if called with no arguments + + def yview_moveto(self, *args, **kwargs): + for lb in self._listboxes: + lb.yview_moveto(*args, **kwargs) + + def yview_scroll(self, *args, **kwargs): + for lb in self._listboxes: + lb.yview_scroll(*args, **kwargs) + + # ///////////////////////////////////////////////////////////////// + # Aliases + # ///////////////////////////////////////////////////////////////// + + itemconfig = itemconfigure + rowconfig = rowconfigure + columnconfig = columnconfigure + select_anchor = selection_anchor + select_clear = selection_clear + select_includes = selection_includes + select_set = selection_set + + # ///////////////////////////////////////////////////////////////// + # These listbox methods are not defined for multi-listbox + # ///////////////////////////////////////////////////////////////// + # def xview(self, *what): pass + # def xview_moveto(self, fraction): pass + # def xview_scroll(self, number, what): pass + + +###################################################################### +# Table +###################################################################### + + +class Table: + """ + A display widget for a table of values, based on a ``MultiListbox`` + widget. For many purposes, ``Table`` can be treated as a + list-of-lists. E.g., table[i] is a list of the values for row i; + and table.append(row) adds a new row with the given list of + values. Individual cells can be accessed using table[i,j], which + refers to the j-th column of the i-th row. This can be used to + both read and write values from the table. E.g.: + + >>> table[i,j] = 'hello' # doctest: +SKIP + + The column (j) can be given either as an index number, or as a + column name. E.g., the following prints the value in the 3rd row + for the 'First Name' column: + + >>> print(table[3, 'First Name']) # doctest: +SKIP + John + + You can configure the colors for individual rows, columns, or + cells using ``rowconfig()``, ``columnconfig()``, and ``itemconfig()``. + The color configuration for each row will be preserved if the + table is modified; however, when new rows are added, any color + configurations that have been made for *columns* will not be + applied to the new row. + + Note: Although ``Table`` acts like a widget in some ways (e.g., it + defines ``grid()``, ``pack()``, and ``bind()``), it is not itself a + widget; it just contains one. This is because widgets need to + define ``__getitem__()``, ``__setitem__()``, and ``__nonzero__()`` in + a way that's incompatible with the fact that ``Table`` behaves as a + list-of-lists. + + :ivar _mlb: The multi-column listbox used to display this table's data. + :ivar _rows: A list-of-lists used to hold the cell values of this + table. Each element of _rows is a row value, i.e., a list of + cell values, one for each column in the row. + """ + + def __init__( + self, + master, + column_names, + rows=None, + column_weights=None, + scrollbar=True, + click_to_sort=True, + reprfunc=None, + cnf={}, + **kw + ): + """ + Construct a new Table widget. + + :type master: Tkinter.Widget + :param master: The widget that should contain the new table. + :type column_names: list(str) + :param column_names: A list of names for the columns; these + names will be used to create labels for each column; + and can be used as an index when reading or writing + cell values from the table. + :type rows: list(list) + :param rows: A list of row values used to initialize the table. + Each row value should be a tuple of cell values, one for + each column in the row. + :type scrollbar: bool + :param scrollbar: If true, then create a scrollbar for the + new table widget. + :type click_to_sort: bool + :param click_to_sort: If true, then create bindings that will + sort the table's rows by a given column's values if the + user clicks on that colum's label. + :type reprfunc: function + :param reprfunc: If specified, then use this function to + convert each table cell value to a string suitable for + display. ``reprfunc`` has the following signature: + reprfunc(row_index, col_index, cell_value) -> str + (Note that the column is specified by index, not by name.) + :param cnf, kw: Configuration parameters for this widget's + contained ``MultiListbox``. See ``MultiListbox.__init__()`` + for details. + """ + self._num_columns = len(column_names) + self._reprfunc = reprfunc + self._frame = Frame(master) + + self._column_name_to_index = {c: i for (i, c) in enumerate(column_names)} + + # Make a copy of the rows & check that it's valid. + if rows is None: + self._rows = [] + else: + self._rows = [[v for v in row] for row in rows] + for row in self._rows: + self._checkrow(row) + + # Create our multi-list box. + self._mlb = MultiListbox(self._frame, column_names, column_weights, cnf, **kw) + self._mlb.pack(side="left", expand=True, fill="both") + + # Optional scrollbar + if scrollbar: + sb = Scrollbar(self._frame, orient="vertical", command=self._mlb.yview) + self._mlb.listboxes[0]["yscrollcommand"] = sb.set + # for listbox in self._mlb.listboxes: + # listbox['yscrollcommand'] = sb.set + sb.pack(side="right", fill="y") + self._scrollbar = sb + + # Set up sorting + self._sortkey = None + if click_to_sort: + for i, l in enumerate(self._mlb.column_labels): + l.bind("", self._sort) + + # Fill in our multi-list box. + self._fill_table() + + # ///////////////////////////////////////////////////////////////// + # { Widget-like Methods + # ///////////////////////////////////////////////////////////////// + # These all just delegate to either our frame or our MLB. + + def pack(self, *args, **kwargs): + """Position this table's main frame widget in its parent + widget. See ``Tkinter.Frame.pack()`` for more info.""" + self._frame.pack(*args, **kwargs) + + def grid(self, *args, **kwargs): + """Position this table's main frame widget in its parent + widget. See ``Tkinter.Frame.grid()`` for more info.""" + self._frame.grid(*args, **kwargs) + + def focus(self): + """Direct (keyboard) input foxus to this widget.""" + self._mlb.focus() + + def bind(self, sequence=None, func=None, add=None): + """Add a binding to this table's main frame that will call + ``func`` in response to the event sequence.""" + self._mlb.bind(sequence, func, add) + + def rowconfigure(self, row_index, cnf={}, **kw): + """:see: ``MultiListbox.rowconfigure()``""" + self._mlb.rowconfigure(row_index, cnf, **kw) + + def columnconfigure(self, col_index, cnf={}, **kw): + """:see: ``MultiListbox.columnconfigure()``""" + col_index = self.column_index(col_index) + self._mlb.columnconfigure(col_index, cnf, **kw) + + def itemconfigure(self, row_index, col_index, cnf=None, **kw): + """:see: ``MultiListbox.itemconfigure()``""" + col_index = self.column_index(col_index) + return self._mlb.itemconfigure(row_index, col_index, cnf, **kw) + + def bind_to_labels(self, sequence=None, func=None, add=None): + """:see: ``MultiListbox.bind_to_labels()``""" + return self._mlb.bind_to_labels(sequence, func, add) + + def bind_to_listboxes(self, sequence=None, func=None, add=None): + """:see: ``MultiListbox.bind_to_listboxes()``""" + return self._mlb.bind_to_listboxes(sequence, func, add) + + def bind_to_columns(self, sequence=None, func=None, add=None): + """:see: ``MultiListbox.bind_to_columns()``""" + return self._mlb.bind_to_columns(sequence, func, add) + + rowconfig = rowconfigure + columnconfig = columnconfigure + itemconfig = itemconfigure + + # ///////////////////////////////////////////////////////////////// + # { Table as list-of-lists + # ///////////////////////////////////////////////////////////////// + + def insert(self, row_index, rowvalue): + """ + Insert a new row into the table, so that its row index will be + ``row_index``. If the table contains any rows whose row index + is greater than or equal to ``row_index``, then they will be + shifted down. + + :param rowvalue: A tuple of cell values, one for each column + in the new row. + """ + self._checkrow(rowvalue) + self._rows.insert(row_index, rowvalue) + if self._reprfunc is not None: + rowvalue = [ + self._reprfunc(row_index, j, v) for (j, v) in enumerate(rowvalue) + ] + self._mlb.insert(row_index, rowvalue) + if self._DEBUG: + self._check_table_vs_mlb() + + def extend(self, rowvalues): + """ + Add new rows at the end of the table. + + :param rowvalues: A list of row values used to initialize the + table. Each row value should be a tuple of cell values, + one for each column in the row. + """ + for rowvalue in rowvalues: + self.append(rowvalue) + if self._DEBUG: + self._check_table_vs_mlb() + + def append(self, rowvalue): + """ + Add a new row to the end of the table. + + :param rowvalue: A tuple of cell values, one for each column + in the new row. + """ + self.insert(len(self._rows), rowvalue) + if self._DEBUG: + self._check_table_vs_mlb() + + def clear(self): + """ + Delete all rows in this table. + """ + self._rows = [] + self._mlb.delete(0, "end") + if self._DEBUG: + self._check_table_vs_mlb() + + def __getitem__(self, index): + """ + Return the value of a row or a cell in this table. If + ``index`` is an integer, then the row value for the ``index``th + row. This row value consists of a tuple of cell values, one + for each column in the row. If ``index`` is a tuple of two + integers, ``(i,j)``, then return the value of the cell in the + ``i``th row and the ``j``th column. + """ + if isinstance(index, slice): + raise ValueError("Slicing not supported") + elif isinstance(index, tuple) and len(index) == 2: + return self._rows[index[0]][self.column_index(index[1])] + else: + return tuple(self._rows[index]) + + def __setitem__(self, index, val): + """ + Replace the value of a row or a cell in this table with + ``val``. + + If ``index`` is an integer, then ``val`` should be a row value + (i.e., a tuple of cell values, one for each column). In this + case, the values of the ``index``th row of the table will be + replaced with the values in ``val``. + + If ``index`` is a tuple of integers, ``(i,j)``, then replace the + value of the cell in the ``i``th row and ``j``th column with + ``val``. + """ + if isinstance(index, slice): + raise ValueError("Slicing not supported") + + # table[i,j] = val + elif isinstance(index, tuple) and len(index) == 2: + i, j = index[0], self.column_index(index[1]) + config_cookie = self._save_config_info([i]) + self._rows[i][j] = val + if self._reprfunc is not None: + val = self._reprfunc(i, j, val) + self._mlb.listboxes[j].insert(i, val) + self._mlb.listboxes[j].delete(i + 1) + self._restore_config_info(config_cookie) + + # table[i] = val + else: + config_cookie = self._save_config_info([index]) + self._checkrow(val) + self._rows[index] = list(val) + if self._reprfunc is not None: + val = [self._reprfunc(index, j, v) for (j, v) in enumerate(val)] + self._mlb.insert(index, val) + self._mlb.delete(index + 1) + self._restore_config_info(config_cookie) + + def __delitem__(self, row_index): + """ + Delete the ``row_index``th row from this table. + """ + if isinstance(row_index, slice): + raise ValueError("Slicing not supported") + if isinstance(row_index, tuple) and len(row_index) == 2: + raise ValueError("Cannot delete a single cell!") + del self._rows[row_index] + self._mlb.delete(row_index) + if self._DEBUG: + self._check_table_vs_mlb() + + def __len__(self): + """ + :return: the number of rows in this table. + """ + return len(self._rows) + + def _checkrow(self, rowvalue): + """ + Helper function: check that a given row value has the correct + number of elements; and if not, raise an exception. + """ + if len(rowvalue) != self._num_columns: + raise ValueError( + "Row %r has %d columns; expected %d" + % (rowvalue, len(rowvalue), self._num_columns) + ) + + # ///////////////////////////////////////////////////////////////// + # Columns + # ///////////////////////////////////////////////////////////////// + + @property + def column_names(self): + """A list of the names of the columns in this table.""" + return self._mlb.column_names + + def column_index(self, i): + """ + If ``i`` is a valid column index integer, then return it as is. + Otherwise, check if ``i`` is used as the name for any column; + if so, return that column's index. Otherwise, raise a + ``KeyError`` exception. + """ + if isinstance(i, int) and 0 <= i < self._num_columns: + return i + else: + # This raises a key error if the column is not found. + return self._column_name_to_index[i] + + def hide_column(self, column_index): + """:see: ``MultiListbox.hide_column()``""" + self._mlb.hide_column(self.column_index(column_index)) + + def show_column(self, column_index): + """:see: ``MultiListbox.show_column()``""" + self._mlb.show_column(self.column_index(column_index)) + + # ///////////////////////////////////////////////////////////////// + # Selection + # ///////////////////////////////////////////////////////////////// + + def selected_row(self): + """ + Return the index of the currently selected row, or None if + no row is selected. To get the row value itself, use + ``table[table.selected_row()]``. + """ + sel = self._mlb.curselection() + if sel: + return int(sel[0]) + else: + return None + + def select(self, index=None, delta=None, see=True): + """:see: ``MultiListbox.select()``""" + self._mlb.select(index, delta, see) + + # ///////////////////////////////////////////////////////////////// + # Sorting + # ///////////////////////////////////////////////////////////////// + + def sort_by(self, column_index, order="toggle"): + """ + Sort the rows in this table, using the specified column's + values as a sort key. + + :param column_index: Specifies which column to sort, using + either a column index (int) or a column's label name + (str). + + :param order: Specifies whether to sort the values in + ascending or descending order: + + - ``'ascending'``: Sort from least to greatest. + - ``'descending'``: Sort from greatest to least. + - ``'toggle'``: If the most recent call to ``sort_by()`` + sorted the table by the same column (``column_index``), + then reverse the rows; otherwise sort in ascending + order. + """ + if order not in ("ascending", "descending", "toggle"): + raise ValueError( + 'sort_by(): order should be "ascending", ' '"descending", or "toggle".' + ) + column_index = self.column_index(column_index) + config_cookie = self._save_config_info(index_by_id=True) + + # Sort the rows. + if order == "toggle" and column_index == self._sortkey: + self._rows.reverse() + else: + self._rows.sort( + key=operator.itemgetter(column_index), reverse=(order == "descending") + ) + self._sortkey = column_index + + # Redraw the table. + self._fill_table() + self._restore_config_info(config_cookie, index_by_id=True, see=True) + if self._DEBUG: + self._check_table_vs_mlb() + + def _sort(self, event): + """Event handler for clicking on a column label -- sort by + that column.""" + column_index = event.widget.column_index + + # If they click on the far-left of far-right of a column's + # label, then resize rather than sorting. + if self._mlb._resize_column(event): + return "continue" + + # Otherwise, sort. + else: + self.sort_by(column_index) + return "continue" + + # ///////////////////////////////////////////////////////////////// + # { Table Drawing Helpers + # ///////////////////////////////////////////////////////////////// + + def _fill_table(self, save_config=True): + """ + Re-draw the table from scratch, by clearing out the table's + multi-column listbox; and then filling it in with values from + ``self._rows``. Note that any cell-, row-, or column-specific + color configuration that has been done will be lost. The + selection will also be lost -- i.e., no row will be selected + after this call completes. + """ + self._mlb.delete(0, "end") + for i, row in enumerate(self._rows): + if self._reprfunc is not None: + row = [self._reprfunc(i, j, v) for (j, v) in enumerate(row)] + self._mlb.insert("end", row) + + def _get_itemconfig(self, r, c): + return { + k: self._mlb.itemconfig(r, c, k)[-1] + for k in ( + "foreground", + "selectforeground", + "background", + "selectbackground", + ) + } + + def _save_config_info(self, row_indices=None, index_by_id=False): + """ + Return a 'cookie' containing information about which row is + selected, and what color configurations have been applied. + this information can the be re-applied to the table (after + making modifications) using ``_restore_config_info()``. Color + configuration information will be saved for any rows in + ``row_indices``, or in the entire table, if + ``row_indices=None``. If ``index_by_id=True``, the the cookie + will associate rows with their configuration information based + on the rows' python id. This is useful when performing + operations that re-arrange the rows (e.g. ``sort``). If + ``index_by_id=False``, then it is assumed that all rows will be + in the same order when ``_restore_config_info()`` is called. + """ + # Default value for row_indices is all rows. + if row_indices is None: + row_indices = list(range(len(self._rows))) + + # Look up our current selection. + selection = self.selected_row() + if index_by_id and selection is not None: + selection = id(self._rows[selection]) + + # Look up the color configuration info for each row. + if index_by_id: + config = { + id(self._rows[r]): [ + self._get_itemconfig(r, c) for c in range(self._num_columns) + ] + for r in row_indices + } + else: + config = { + r: [self._get_itemconfig(r, c) for c in range(self._num_columns)] + for r in row_indices + } + + return selection, config + + def _restore_config_info(self, cookie, index_by_id=False, see=False): + """ + Restore selection & color configuration information that was + saved using ``_save_config_info``. + """ + selection, config = cookie + + # Clear the selection. + if selection is None: + self._mlb.selection_clear(0, "end") + + # Restore selection & color config + if index_by_id: + for r, row in enumerate(self._rows): + if id(row) in config: + for c in range(self._num_columns): + self._mlb.itemconfigure(r, c, config[id(row)][c]) + if id(row) == selection: + self._mlb.select(r, see=see) + else: + if selection is not None: + self._mlb.select(selection, see=see) + for r in config: + for c in range(self._num_columns): + self._mlb.itemconfigure(r, c, config[r][c]) + + # ///////////////////////////////////////////////////////////////// + # Debugging (Invariant Checker) + # ///////////////////////////////////////////////////////////////// + + _DEBUG = False + """If true, then run ``_check_table_vs_mlb()`` after any operation + that modifies the table.""" + + def _check_table_vs_mlb(self): + """ + Verify that the contents of the table's ``_rows`` variable match + the contents of its multi-listbox (``_mlb``). This is just + included for debugging purposes, to make sure that the + list-modifying operations are working correctly. + """ + for col in self._mlb.listboxes: + assert len(self) == col.size() + for row in self: + assert len(row) == self._num_columns + assert self._num_columns == len(self._mlb.column_names) + # assert self._column_names == self._mlb.column_names + for i, row in enumerate(self): + for j, cell in enumerate(row): + if self._reprfunc is not None: + cell = self._reprfunc(i, j, cell) + assert self._mlb.get(i)[j] == cell + + +###################################################################### +# Demo/Test Function +###################################################################### + +# update this to use new WordNet API +def demo(): + root = Tk() + root.bind("", lambda e: root.destroy()) + + table = Table( + root, + "Word Synset Hypernym Hyponym".split(), + column_weights=[0, 1, 1, 1], + reprfunc=(lambda i, j, s: " %s" % s), + ) + table.pack(expand=True, fill="both") + + from nltk.corpus import brown, wordnet + + for word, pos in sorted(set(brown.tagged_words()[:500])): + if pos[0] != "N": + continue + word = word.lower() + for synset in wordnet.synsets(word): + try: + hyper_def = synset.hypernyms()[0].definition() + except: + hyper_def = "*none*" + try: + hypo_def = synset.hypernyms()[0].definition() + except: + hypo_def = "*none*" + table.append([word, synset.definition(), hyper_def, hypo_def]) + + table.columnconfig("Word", background="#afa") + table.columnconfig("Synset", background="#efe") + table.columnconfig("Hypernym", background="#fee") + table.columnconfig("Hyponym", background="#ffe") + for row in range(len(table)): + for column in ("Hypernym", "Hyponym"): + if table[row, column] == "*none*": + table.itemconfig( + row, column, foreground="#666", selectforeground="#666" + ) + root.mainloop() + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/draw/tree.py b/llmeval-env/lib/python3.10/site-packages/nltk/draw/tree.py new file mode 100644 index 0000000000000000000000000000000000000000..6a2791428fcab5a47dd6d88561971d6907f74084 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/draw/tree.py @@ -0,0 +1,1129 @@ +# Natural Language Toolkit: Graphical Representations for Trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Graphically display a Tree. +""" + +from tkinter import IntVar, Menu, Tk + +from nltk.draw.util import ( + BoxWidget, + CanvasFrame, + CanvasWidget, + OvalWidget, + ParenWidget, + TextWidget, +) +from nltk.tree import Tree +from nltk.util import in_idle + +##////////////////////////////////////////////////////// +## Tree Segment +##////////////////////////////////////////////////////// + + +class TreeSegmentWidget(CanvasWidget): + """ + A canvas widget that displays a single segment of a hierarchical + tree. Each ``TreeSegmentWidget`` connects a single "node widget" + to a sequence of zero or more "subtree widgets". By default, the + bottom of the node is connected to the top of each subtree by a + single line. However, if the ``roof`` attribute is set, then a + single triangular "roof" will connect the node to all of its + children. + + Attributes: + - ``roof``: What sort of connection to draw between the node and + its subtrees. If ``roof`` is true, draw a single triangular + "roof" over the subtrees. If ``roof`` is false, draw a line + between each subtree and the node. Default value is false. + - ``xspace``: The amount of horizontal space to leave between + subtrees when managing this widget. Default value is 10. + - ``yspace``: The amount of space to place between the node and + its children when managing this widget. Default value is 15. + - ``color``: The color of the lines connecting the node to its + subtrees; and of the outline of the triangular roof. Default + value is ``'#006060'``. + - ``fill``: The fill color for the triangular roof. Default + value is ``''`` (no fill). + - ``width``: The width of the lines connecting the node to its + subtrees; and of the outline of the triangular roof. Default + value is 1. + - ``orientation``: Determines whether the tree branches downwards + or rightwards. Possible values are ``'horizontal'`` and + ``'vertical'``. The default value is ``'vertical'`` (i.e., + branch downwards). + - ``draggable``: whether the widget can be dragged by the user. + """ + + def __init__(self, canvas, label, subtrees, **attribs): + """ + :type node: + :type subtrees: list(CanvasWidgetI) + """ + self._label = label + self._subtrees = subtrees + + # Attributes + self._horizontal = 0 + self._roof = 0 + self._xspace = 10 + self._yspace = 15 + self._ordered = False + + # Create canvas objects. + self._lines = [canvas.create_line(0, 0, 0, 0, fill="#006060") for c in subtrees] + self._polygon = canvas.create_polygon( + 0, 0, fill="", state="hidden", outline="#006060" + ) + + # Register child widgets (label + subtrees) + self._add_child_widget(label) + for subtree in subtrees: + self._add_child_widget(subtree) + + # Are we currently managing? + self._managing = False + + CanvasWidget.__init__(self, canvas, **attribs) + + def __setitem__(self, attr, value): + canvas = self.canvas() + if attr == "roof": + self._roof = value + if self._roof: + for l in self._lines: + canvas.itemconfig(l, state="hidden") + canvas.itemconfig(self._polygon, state="normal") + else: + for l in self._lines: + canvas.itemconfig(l, state="normal") + canvas.itemconfig(self._polygon, state="hidden") + elif attr == "orientation": + if value == "horizontal": + self._horizontal = 1 + elif value == "vertical": + self._horizontal = 0 + else: + raise ValueError("orientation must be horizontal or vertical") + elif attr == "color": + for l in self._lines: + canvas.itemconfig(l, fill=value) + canvas.itemconfig(self._polygon, outline=value) + elif isinstance(attr, tuple) and attr[0] == "color": + # Set the color of an individual line. + l = self._lines[int(attr[1])] + canvas.itemconfig(l, fill=value) + elif attr == "fill": + canvas.itemconfig(self._polygon, fill=value) + elif attr == "width": + canvas.itemconfig(self._polygon, {attr: value}) + for l in self._lines: + canvas.itemconfig(l, {attr: value}) + elif attr in ("xspace", "yspace"): + if attr == "xspace": + self._xspace = value + elif attr == "yspace": + self._yspace = value + self.update(self._label) + elif attr == "ordered": + self._ordered = value + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "roof": + return self._roof + elif attr == "width": + return self.canvas().itemcget(self._polygon, attr) + elif attr == "color": + return self.canvas().itemcget(self._polygon, "outline") + elif isinstance(attr, tuple) and attr[0] == "color": + l = self._lines[int(attr[1])] + return self.canvas().itemcget(l, "fill") + elif attr == "xspace": + return self._xspace + elif attr == "yspace": + return self._yspace + elif attr == "orientation": + if self._horizontal: + return "horizontal" + else: + return "vertical" + elif attr == "ordered": + return self._ordered + else: + return CanvasWidget.__getitem__(self, attr) + + def label(self): + return self._label + + def subtrees(self): + return self._subtrees[:] + + def set_label(self, label): + """ + Set the node label to ``label``. + """ + self._remove_child_widget(self._label) + self._add_child_widget(label) + self._label = label + self.update(self._label) + + def replace_child(self, oldchild, newchild): + """ + Replace the child ``oldchild`` with ``newchild``. + """ + index = self._subtrees.index(oldchild) + self._subtrees[index] = newchild + self._remove_child_widget(oldchild) + self._add_child_widget(newchild) + self.update(newchild) + + def remove_child(self, child): + index = self._subtrees.index(child) + del self._subtrees[index] + self._remove_child_widget(child) + self.canvas().delete(self._lines.pop()) + self.update(self._label) + + def insert_child(self, index, child): + canvas = self.canvas() + self._subtrees.insert(index, child) + self._add_child_widget(child) + self._lines.append(canvas.create_line(0, 0, 0, 0, fill="#006060")) + self.update(self._label) + + # but.. lines??? + + def _tags(self): + if self._roof: + return [self._polygon] + else: + return self._lines + + def _subtree_top(self, child): + if isinstance(child, TreeSegmentWidget): + bbox = child.label().bbox() + else: + bbox = child.bbox() + if self._horizontal: + return (bbox[0], (bbox[1] + bbox[3]) / 2.0) + else: + return ((bbox[0] + bbox[2]) / 2.0, bbox[1]) + + def _node_bottom(self): + bbox = self._label.bbox() + if self._horizontal: + return (bbox[2], (bbox[1] + bbox[3]) / 2.0) + else: + return ((bbox[0] + bbox[2]) / 2.0, bbox[3]) + + def _update(self, child): + if len(self._subtrees) == 0: + return + if self._label.bbox() is None: + return # [XX] ??? + + # Which lines need to be redrawn? + if child is self._label: + need_update = self._subtrees + else: + need_update = [child] + + if self._ordered and not self._managing: + need_update = self._maintain_order(child) + + # Update the polygon. + (nodex, nodey) = self._node_bottom() + (xmin, ymin, xmax, ymax) = self._subtrees[0].bbox() + for subtree in self._subtrees[1:]: + bbox = subtree.bbox() + xmin = min(xmin, bbox[0]) + ymin = min(ymin, bbox[1]) + xmax = max(xmax, bbox[2]) + ymax = max(ymax, bbox[3]) + + if self._horizontal: + self.canvas().coords( + self._polygon, nodex, nodey, xmin, ymin, xmin, ymax, nodex, nodey + ) + else: + self.canvas().coords( + self._polygon, nodex, nodey, xmin, ymin, xmax, ymin, nodex, nodey + ) + + # Redraw all lines that need it. + for subtree in need_update: + (nodex, nodey) = self._node_bottom() + line = self._lines[self._subtrees.index(subtree)] + (subtreex, subtreey) = self._subtree_top(subtree) + self.canvas().coords(line, nodex, nodey, subtreex, subtreey) + + def _maintain_order(self, child): + if self._horizontal: + return self._maintain_order_horizontal(child) + else: + return self._maintain_order_vertical(child) + + def _maintain_order_vertical(self, child): + (left, top, right, bot) = child.bbox() + + if child is self._label: + # Check all the leaves + for subtree in self._subtrees: + (x1, y1, x2, y2) = subtree.bbox() + if bot + self._yspace > y1: + subtree.move(0, bot + self._yspace - y1) + + return self._subtrees + else: + moved = [child] + index = self._subtrees.index(child) + + # Check leaves to our right. + x = right + self._xspace + for i in range(index + 1, len(self._subtrees)): + (x1, y1, x2, y2) = self._subtrees[i].bbox() + if x > x1: + self._subtrees[i].move(x - x1, 0) + x += x2 - x1 + self._xspace + moved.append(self._subtrees[i]) + + # Check leaves to our left. + x = left - self._xspace + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._subtrees[i].bbox() + if x < x2: + self._subtrees[i].move(x - x2, 0) + x -= x2 - x1 + self._xspace + moved.append(self._subtrees[i]) + + # Check the node + (x1, y1, x2, y2) = self._label.bbox() + if y2 > top - self._yspace: + self._label.move(0, top - self._yspace - y2) + moved = self._subtrees + + # Return a list of the nodes we moved + return moved + + def _maintain_order_horizontal(self, child): + (left, top, right, bot) = child.bbox() + + if child is self._label: + # Check all the leaves + for subtree in self._subtrees: + (x1, y1, x2, y2) = subtree.bbox() + if right + self._xspace > x1: + subtree.move(right + self._xspace - x1) + + return self._subtrees + else: + moved = [child] + index = self._subtrees.index(child) + + # Check leaves below us. + y = bot + self._yspace + for i in range(index + 1, len(self._subtrees)): + (x1, y1, x2, y2) = self._subtrees[i].bbox() + if y > y1: + self._subtrees[i].move(0, y - y1) + y += y2 - y1 + self._yspace + moved.append(self._subtrees[i]) + + # Check leaves above us + y = top - self._yspace + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._subtrees[i].bbox() + if y < y2: + self._subtrees[i].move(0, y - y2) + y -= y2 - y1 + self._yspace + moved.append(self._subtrees[i]) + + # Check the node + (x1, y1, x2, y2) = self._label.bbox() + if x2 > left - self._xspace: + self._label.move(left - self._xspace - x2, 0) + moved = self._subtrees + + # Return a list of the nodes we moved + return moved + + def _manage_horizontal(self): + (nodex, nodey) = self._node_bottom() + + # Put the subtrees in a line. + y = 20 + for subtree in self._subtrees: + subtree_bbox = subtree.bbox() + dx = nodex - subtree_bbox[0] + self._xspace + dy = y - subtree_bbox[1] + subtree.move(dx, dy) + y += subtree_bbox[3] - subtree_bbox[1] + self._yspace + + # Find the center of their tops. + center = 0.0 + for subtree in self._subtrees: + center += self._subtree_top(subtree)[1] + center /= len(self._subtrees) + + # Center the subtrees with the node. + for subtree in self._subtrees: + subtree.move(0, nodey - center) + + def _manage_vertical(self): + (nodex, nodey) = self._node_bottom() + + # Put the subtrees in a line. + x = 0 + for subtree in self._subtrees: + subtree_bbox = subtree.bbox() + dy = nodey - subtree_bbox[1] + self._yspace + dx = x - subtree_bbox[0] + subtree.move(dx, dy) + x += subtree_bbox[2] - subtree_bbox[0] + self._xspace + + # Find the center of their tops. + center = 0.0 + for subtree in self._subtrees: + center += self._subtree_top(subtree)[0] / len(self._subtrees) + + # Center the subtrees with the node. + for subtree in self._subtrees: + subtree.move(nodex - center, 0) + + def _manage(self): + self._managing = True + (nodex, nodey) = self._node_bottom() + if len(self._subtrees) == 0: + return + + if self._horizontal: + self._manage_horizontal() + else: + self._manage_vertical() + + # Update lines to subtrees. + for subtree in self._subtrees: + self._update(subtree) + + self._managing = False + + def __repr__(self): + return f"[TreeSeg {self._label}: {self._subtrees}]" + + +def _tree_to_treeseg( + canvas, + t, + make_node, + make_leaf, + tree_attribs, + node_attribs, + leaf_attribs, + loc_attribs, +): + if isinstance(t, Tree): + label = make_node(canvas, t.label(), **node_attribs) + subtrees = [ + _tree_to_treeseg( + canvas, + child, + make_node, + make_leaf, + tree_attribs, + node_attribs, + leaf_attribs, + loc_attribs, + ) + for child in t + ] + return TreeSegmentWidget(canvas, label, subtrees, **tree_attribs) + else: + return make_leaf(canvas, t, **leaf_attribs) + + +def tree_to_treesegment( + canvas, t, make_node=TextWidget, make_leaf=TextWidget, **attribs +): + """ + Convert a Tree into a ``TreeSegmentWidget``. + + :param make_node: A ``CanvasWidget`` constructor or a function that + creates ``CanvasWidgets``. ``make_node`` is used to convert + the Tree's nodes into ``CanvasWidgets``. If no constructor + is specified, then ``TextWidget`` will be used. + :param make_leaf: A ``CanvasWidget`` constructor or a function that + creates ``CanvasWidgets``. ``make_leaf`` is used to convert + the Tree's leafs into ``CanvasWidgets``. If no constructor + is specified, then ``TextWidget`` will be used. + :param attribs: Attributes for the canvas widgets that make up the + returned ``TreeSegmentWidget``. Any attribute beginning with + ``'tree_'`` will be passed to all ``TreeSegmentWidgets`` (with + the ``'tree_'`` prefix removed. Any attribute beginning with + ``'node_'`` will be passed to all nodes. Any attribute + beginning with ``'leaf_'`` will be passed to all leaves. And + any attribute beginning with ``'loc_'`` will be passed to all + text locations (for Trees). + """ + # Process attribs. + tree_attribs = {} + node_attribs = {} + leaf_attribs = {} + loc_attribs = {} + + for (key, value) in list(attribs.items()): + if key[:5] == "tree_": + tree_attribs[key[5:]] = value + elif key[:5] == "node_": + node_attribs[key[5:]] = value + elif key[:5] == "leaf_": + leaf_attribs[key[5:]] = value + elif key[:4] == "loc_": + loc_attribs[key[4:]] = value + else: + raise ValueError("Bad attribute: %s" % key) + return _tree_to_treeseg( + canvas, + t, + make_node, + make_leaf, + tree_attribs, + node_attribs, + leaf_attribs, + loc_attribs, + ) + + +##////////////////////////////////////////////////////// +## Tree Widget +##////////////////////////////////////////////////////// + + +class TreeWidget(CanvasWidget): + """ + A canvas widget that displays a single Tree. + ``TreeWidget`` manages a group of ``TreeSegmentWidgets`` that are + used to display a Tree. + + Attributes: + + - ``node_attr``: Sets the attribute ``attr`` on all of the + node widgets for this ``TreeWidget``. + - ``node_attr``: Sets the attribute ``attr`` on all of the + leaf widgets for this ``TreeWidget``. + - ``loc_attr``: Sets the attribute ``attr`` on all of the + location widgets for this ``TreeWidget`` (if it was built from + a Tree). Note that a location widget is a ``TextWidget``. + + - ``xspace``: The amount of horizontal space to leave between + subtrees when managing this widget. Default value is 10. + - ``yspace``: The amount of space to place between the node and + its children when managing this widget. Default value is 15. + + - ``line_color``: The color of the lines connecting each expanded + node to its subtrees. + - ``roof_color``: The color of the outline of the triangular roof + for collapsed trees. + - ``roof_fill``: The fill color for the triangular roof for + collapsed trees. + - ``width`` + + - ``orientation``: Determines whether the tree branches downwards + or rightwards. Possible values are ``'horizontal'`` and + ``'vertical'``. The default value is ``'vertical'`` (i.e., + branch downwards). + + - ``shapeable``: whether the subtrees can be independently + dragged by the user. THIS property simply sets the + ``DRAGGABLE`` property on all of the ``TreeWidget``'s tree + segments. + - ``draggable``: whether the widget can be dragged by the user. + """ + + def __init__( + self, canvas, t, make_node=TextWidget, make_leaf=TextWidget, **attribs + ): + # Node & leaf canvas widget constructors + self._make_node = make_node + self._make_leaf = make_leaf + self._tree = t + + # Attributes. + self._nodeattribs = {} + self._leafattribs = {} + self._locattribs = {"color": "#008000"} + self._line_color = "#008080" + self._line_width = 1 + self._roof_color = "#008080" + self._roof_fill = "#c0c0c0" + self._shapeable = False + self._xspace = 10 + self._yspace = 10 + self._orientation = "vertical" + self._ordered = False + + # Build trees. + self._keys = {} # treeseg -> key + self._expanded_trees = {} + self._collapsed_trees = {} + self._nodes = [] + self._leaves = [] + # self._locs = [] + self._make_collapsed_trees(canvas, t, ()) + self._treeseg = self._make_expanded_tree(canvas, t, ()) + self._add_child_widget(self._treeseg) + + CanvasWidget.__init__(self, canvas, **attribs) + + def expanded_tree(self, *path_to_tree): + """ + Return the ``TreeSegmentWidget`` for the specified subtree. + + :param path_to_tree: A list of indices i1, i2, ..., in, where + the desired widget is the widget corresponding to + ``tree.children()[i1].children()[i2]....children()[in]``. + For the root, the path is ``()``. + """ + return self._expanded_trees[path_to_tree] + + def collapsed_tree(self, *path_to_tree): + """ + Return the ``TreeSegmentWidget`` for the specified subtree. + + :param path_to_tree: A list of indices i1, i2, ..., in, where + the desired widget is the widget corresponding to + ``tree.children()[i1].children()[i2]....children()[in]``. + For the root, the path is ``()``. + """ + return self._collapsed_trees[path_to_tree] + + def bind_click_trees(self, callback, button=1): + """ + Add a binding to all tree segments. + """ + for tseg in list(self._expanded_trees.values()): + tseg.bind_click(callback, button) + for tseg in list(self._collapsed_trees.values()): + tseg.bind_click(callback, button) + + def bind_drag_trees(self, callback, button=1): + """ + Add a binding to all tree segments. + """ + for tseg in list(self._expanded_trees.values()): + tseg.bind_drag(callback, button) + for tseg in list(self._collapsed_trees.values()): + tseg.bind_drag(callback, button) + + def bind_click_leaves(self, callback, button=1): + """ + Add a binding to all leaves. + """ + for leaf in self._leaves: + leaf.bind_click(callback, button) + for leaf in self._leaves: + leaf.bind_click(callback, button) + + def bind_drag_leaves(self, callback, button=1): + """ + Add a binding to all leaves. + """ + for leaf in self._leaves: + leaf.bind_drag(callback, button) + for leaf in self._leaves: + leaf.bind_drag(callback, button) + + def bind_click_nodes(self, callback, button=1): + """ + Add a binding to all nodes. + """ + for node in self._nodes: + node.bind_click(callback, button) + for node in self._nodes: + node.bind_click(callback, button) + + def bind_drag_nodes(self, callback, button=1): + """ + Add a binding to all nodes. + """ + for node in self._nodes: + node.bind_drag(callback, button) + for node in self._nodes: + node.bind_drag(callback, button) + + def _make_collapsed_trees(self, canvas, t, key): + if not isinstance(t, Tree): + return + make_node = self._make_node + make_leaf = self._make_leaf + + node = make_node(canvas, t.label(), **self._nodeattribs) + self._nodes.append(node) + leaves = [make_leaf(canvas, l, **self._leafattribs) for l in t.leaves()] + self._leaves += leaves + treeseg = TreeSegmentWidget( + canvas, + node, + leaves, + roof=1, + color=self._roof_color, + fill=self._roof_fill, + width=self._line_width, + ) + + self._collapsed_trees[key] = treeseg + self._keys[treeseg] = key + # self._add_child_widget(treeseg) + treeseg.hide() + + # Build trees for children. + for i in range(len(t)): + child = t[i] + self._make_collapsed_trees(canvas, child, key + (i,)) + + def _make_expanded_tree(self, canvas, t, key): + make_node = self._make_node + make_leaf = self._make_leaf + + if isinstance(t, Tree): + node = make_node(canvas, t.label(), **self._nodeattribs) + self._nodes.append(node) + children = t + subtrees = [ + self._make_expanded_tree(canvas, children[i], key + (i,)) + for i in range(len(children)) + ] + treeseg = TreeSegmentWidget( + canvas, node, subtrees, color=self._line_color, width=self._line_width + ) + self._expanded_trees[key] = treeseg + self._keys[treeseg] = key + return treeseg + else: + leaf = make_leaf(canvas, t, **self._leafattribs) + self._leaves.append(leaf) + return leaf + + def __setitem__(self, attr, value): + if attr[:5] == "node_": + for node in self._nodes: + node[attr[5:]] = value + elif attr[:5] == "leaf_": + for leaf in self._leaves: + leaf[attr[5:]] = value + elif attr == "line_color": + self._line_color = value + for tseg in list(self._expanded_trees.values()): + tseg["color"] = value + elif attr == "line_width": + self._line_width = value + for tseg in list(self._expanded_trees.values()): + tseg["width"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["width"] = value + elif attr == "roof_color": + self._roof_color = value + for tseg in list(self._collapsed_trees.values()): + tseg["color"] = value + elif attr == "roof_fill": + self._roof_fill = value + for tseg in list(self._collapsed_trees.values()): + tseg["fill"] = value + elif attr == "shapeable": + self._shapeable = value + for tseg in list(self._expanded_trees.values()): + tseg["draggable"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["draggable"] = value + for leaf in self._leaves: + leaf["draggable"] = value + elif attr == "xspace": + self._xspace = value + for tseg in list(self._expanded_trees.values()): + tseg["xspace"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["xspace"] = value + self.manage() + elif attr == "yspace": + self._yspace = value + for tseg in list(self._expanded_trees.values()): + tseg["yspace"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["yspace"] = value + self.manage() + elif attr == "orientation": + self._orientation = value + for tseg in list(self._expanded_trees.values()): + tseg["orientation"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["orientation"] = value + self.manage() + elif attr == "ordered": + self._ordered = value + for tseg in list(self._expanded_trees.values()): + tseg["ordered"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["ordered"] = value + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr[:5] == "node_": + return self._nodeattribs.get(attr[5:], None) + elif attr[:5] == "leaf_": + return self._leafattribs.get(attr[5:], None) + elif attr[:4] == "loc_": + return self._locattribs.get(attr[4:], None) + elif attr == "line_color": + return self._line_color + elif attr == "line_width": + return self._line_width + elif attr == "roof_color": + return self._roof_color + elif attr == "roof_fill": + return self._roof_fill + elif attr == "shapeable": + return self._shapeable + elif attr == "xspace": + return self._xspace + elif attr == "yspace": + return self._yspace + elif attr == "orientation": + return self._orientation + else: + return CanvasWidget.__getitem__(self, attr) + + def _tags(self): + return [] + + def _manage(self): + segs = list(self._expanded_trees.values()) + list( + self._collapsed_trees.values() + ) + for tseg in segs: + if tseg.hidden(): + tseg.show() + tseg.manage() + tseg.hide() + + def toggle_collapsed(self, treeseg): + """ + Collapse/expand a tree. + """ + old_treeseg = treeseg + if old_treeseg["roof"]: + new_treeseg = self._expanded_trees[self._keys[old_treeseg]] + else: + new_treeseg = self._collapsed_trees[self._keys[old_treeseg]] + + # Replace the old tree with the new tree. + if old_treeseg.parent() is self: + self._remove_child_widget(old_treeseg) + self._add_child_widget(new_treeseg) + self._treeseg = new_treeseg + else: + old_treeseg.parent().replace_child(old_treeseg, new_treeseg) + + # Move the new tree to where the old tree was. Show it first, + # so we can find its bounding box. + new_treeseg.show() + (newx, newy) = new_treeseg.label().bbox()[:2] + (oldx, oldy) = old_treeseg.label().bbox()[:2] + new_treeseg.move(oldx - newx, oldy - newy) + + # Hide the old tree + old_treeseg.hide() + + # We could do parent.manage() here instead, if we wanted. + new_treeseg.parent().update(new_treeseg) + + +##////////////////////////////////////////////////////// +## draw_trees +##////////////////////////////////////////////////////// + + +class TreeView: + def __init__(self, *trees): + from math import ceil, sqrt + + self._trees = trees + + self._top = Tk() + self._top.title("NLTK") + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + + cf = self._cframe = CanvasFrame(self._top) + self._top.bind("", self._cframe.print_to_file) + + # Size is variable. + self._size = IntVar(self._top) + self._size.set(12) + bold = ("helvetica", -self._size.get(), "bold") + helv = ("helvetica", -self._size.get()) + + # Lay the trees out in a square. + self._width = int(ceil(sqrt(len(trees)))) + self._widgets = [] + for i in range(len(trees)): + widget = TreeWidget( + cf.canvas(), + trees[i], + node_font=bold, + leaf_color="#008040", + node_color="#004080", + roof_color="#004040", + roof_fill="white", + line_color="#004040", + draggable=1, + leaf_font=helv, + ) + widget.bind_click_trees(widget.toggle_collapsed) + self._widgets.append(widget) + cf.add_widget(widget, 0, 0) + + self._layout() + self._cframe.pack(expand=1, fill="both") + self._init_menubar() + + def _layout(self): + i = x = y = ymax = 0 + width = self._width + for i in range(len(self._widgets)): + widget = self._widgets[i] + (oldx, oldy) = widget.bbox()[:2] + if i % width == 0: + y = ymax + x = 0 + widget.move(x - oldx, y - oldy) + x = widget.bbox()[2] + 10 + ymax = max(ymax, widget.bbox()[3] + 10) + + def _init_menubar(self): + menubar = Menu(self._top) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Print to Postscript", + underline=0, + command=self._cframe.print_to_file, + accelerator="Ctrl-p", + ) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + zoommenu = Menu(menubar, tearoff=0) + zoommenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=28, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=50, + command=self.resize, + ) + menubar.add_cascade(label="Zoom", underline=0, menu=zoommenu) + + self._top.config(menu=menubar) + + def resize(self, *e): + bold = ("helvetica", -self._size.get(), "bold") + helv = ("helvetica", -self._size.get()) + xspace = self._size.get() + yspace = self._size.get() + for widget in self._widgets: + widget["node_font"] = bold + widget["leaf_font"] = helv + widget["xspace"] = xspace + widget["yspace"] = yspace + if self._size.get() < 20: + widget["line_width"] = 1 + elif self._size.get() < 30: + widget["line_width"] = 2 + else: + widget["line_width"] = 3 + self._layout() + + def destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + +def draw_trees(*trees): + """ + Open a new window containing a graphical diagram of the given + trees. + + :rtype: None + """ + TreeView(*trees).mainloop() + return + + +##////////////////////////////////////////////////////// +## Demo Code +##////////////////////////////////////////////////////// + + +def demo(): + import random + + def fill(cw): + cw["fill"] = "#%06d" % random.randint(0, 999999) + + cf = CanvasFrame(width=550, height=450, closeenough=2) + + t = Tree.fromstring( + """ + (S (NP the very big cat) + (VP (Adv sorta) (V saw) (NP (Det the) (N dog))))""" + ) + + tc = TreeWidget( + cf.canvas(), + t, + draggable=1, + node_font=("helvetica", -14, "bold"), + leaf_font=("helvetica", -12, "italic"), + roof_fill="white", + roof_color="black", + leaf_color="green4", + node_color="blue2", + ) + cf.add_widget(tc, 10, 10) + + def boxit(canvas, text): + big = ("helvetica", -16, "bold") + return BoxWidget(canvas, TextWidget(canvas, text, font=big), fill="green") + + def ovalit(canvas, text): + return OvalWidget(canvas, TextWidget(canvas, text), fill="cyan") + + treetok = Tree.fromstring("(S (NP this tree) (VP (V is) (AdjP shapeable)))") + tc2 = TreeWidget(cf.canvas(), treetok, boxit, ovalit, shapeable=1) + + def color(node): + node["color"] = "#%04d00" % random.randint(0, 9999) + + def color2(treeseg): + treeseg.label()["fill"] = "#%06d" % random.randint(0, 9999) + treeseg.label().child()["color"] = "white" + + tc.bind_click_trees(tc.toggle_collapsed) + tc2.bind_click_trees(tc2.toggle_collapsed) + tc.bind_click_nodes(color, 3) + tc2.expanded_tree(1).bind_click(color2, 3) + tc2.expanded_tree().bind_click(color2, 3) + + paren = ParenWidget(cf.canvas(), tc2) + cf.add_widget(paren, tc.bbox()[2] + 10, 10) + + tree3 = Tree.fromstring( + """ + (S (NP this tree) (AUX was) + (VP (V built) (PP (P with) (NP (N tree_to_treesegment)))))""" + ) + tc3 = tree_to_treesegment( + cf.canvas(), tree3, tree_color="green4", tree_xspace=2, tree_width=2 + ) + tc3["draggable"] = 1 + cf.add_widget(tc3, 10, tc.bbox()[3] + 10) + + def orientswitch(treewidget): + if treewidget["orientation"] == "horizontal": + treewidget.expanded_tree(1, 1).subtrees()[0].set_text("vertical") + treewidget.collapsed_tree(1, 1).subtrees()[0].set_text("vertical") + treewidget.collapsed_tree(1).subtrees()[1].set_text("vertical") + treewidget.collapsed_tree().subtrees()[3].set_text("vertical") + treewidget["orientation"] = "vertical" + else: + treewidget.expanded_tree(1, 1).subtrees()[0].set_text("horizontal") + treewidget.collapsed_tree(1, 1).subtrees()[0].set_text("horizontal") + treewidget.collapsed_tree(1).subtrees()[1].set_text("horizontal") + treewidget.collapsed_tree().subtrees()[3].set_text("horizontal") + treewidget["orientation"] = "horizontal" + + text = """ +Try clicking, right clicking, and dragging +different elements of each of the trees. +The top-left tree is a TreeWidget built from +a Tree. The top-right is a TreeWidget built +from a Tree, using non-default widget +constructors for the nodes & leaves (BoxWidget +and OvalWidget). The bottom-left tree is +built from tree_to_treesegment.""" + twidget = TextWidget(cf.canvas(), text.strip()) + textbox = BoxWidget(cf.canvas(), twidget, fill="white", draggable=1) + cf.add_widget(textbox, tc3.bbox()[2] + 10, tc2.bbox()[3] + 10) + + tree4 = Tree.fromstring("(S (NP this tree) (VP (V is) (Adj horizontal)))") + tc4 = TreeWidget( + cf.canvas(), + tree4, + draggable=1, + line_color="brown2", + roof_color="brown2", + node_font=("helvetica", -12, "bold"), + node_color="brown4", + orientation="horizontal", + ) + tc4.manage() + cf.add_widget(tc4, tc3.bbox()[2] + 10, textbox.bbox()[3] + 10) + tc4.bind_click(orientswitch) + tc4.bind_click_trees(tc4.toggle_collapsed, 3) + + # Run mainloop + cf.mainloop() + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/draw/util.py b/llmeval-env/lib/python3.10/site-packages/nltk/draw/util.py new file mode 100644 index 0000000000000000000000000000000000000000..31ae442099a892a6e84a0dbf3ff284d7aa184b3f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/draw/util.py @@ -0,0 +1,2575 @@ +# Natural Language Toolkit: Drawing utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Tools for graphically displaying and interacting with the objects and +processing classes defined by the Toolkit. These tools are primarily +intended to help students visualize the objects that they create. + +The graphical tools are typically built using "canvas widgets", each +of which encapsulates the graphical elements and bindings used to +display a complex object on a Tkinter ``Canvas``. For example, NLTK +defines canvas widgets for displaying trees and directed graphs, as +well as a number of simpler widgets. These canvas widgets make it +easier to build new graphical tools and demos. See the class +documentation for ``CanvasWidget`` for more information. + +The ``nltk.draw`` module defines the abstract ``CanvasWidget`` base +class, and a number of simple canvas widgets. The remaining canvas +widgets are defined by submodules, such as ``nltk.draw.tree``. + +The ``nltk.draw`` module also defines ``CanvasFrame``, which +encapsulates a ``Canvas`` and its scrollbars. It uses a +``ScrollWatcherWidget`` to ensure that all canvas widgets contained on +its canvas are within the scroll region. + +Acknowledgements: Many of the ideas behind the canvas widget system +are derived from ``CLIG``, a Tk-based grapher for linguistic data +structures. For more information, see the CLIG +homepage (http://www.ags.uni-sb.de/~konrad/clig.html). + +""" +from abc import ABCMeta, abstractmethod +from tkinter import ( + RAISED, + Button, + Canvas, + Entry, + Frame, + Label, + Menu, + Menubutton, + Scrollbar, + StringVar, + Text, + Tk, + Toplevel, + Widget, +) +from tkinter.filedialog import asksaveasfilename + +from nltk.util import in_idle + +##////////////////////////////////////////////////////// +## CanvasWidget +##////////////////////////////////////////////////////// + + +class CanvasWidget(metaclass=ABCMeta): + """ + A collection of graphical elements and bindings used to display a + complex object on a Tkinter ``Canvas``. A canvas widget is + responsible for managing the ``Canvas`` tags and callback bindings + necessary to display and interact with the object. Canvas widgets + are often organized into hierarchies, where parent canvas widgets + control aspects of their child widgets. + + Each canvas widget is bound to a single ``Canvas``. This ``Canvas`` + is specified as the first argument to the ``CanvasWidget``'s + constructor. + + Attributes. Each canvas widget can support a variety of + "attributes", which control how the canvas widget is displayed. + Some typical examples attributes are ``color``, ``font``, and + ``radius``. Each attribute has a default value. This default + value can be overridden in the constructor, using keyword + arguments of the form ``attribute=value``: + + >>> from nltk.draw.util import TextWidget + >>> cn = TextWidget(Canvas(), 'test', color='red') # doctest: +SKIP + + Attribute values can also be changed after a canvas widget has + been constructed, using the ``__setitem__`` operator: + + >>> cn['font'] = 'times' # doctest: +SKIP + + The current value of an attribute value can be queried using the + ``__getitem__`` operator: + + >>> cn['color'] # doctest: +SKIP + 'red' + + For a list of the attributes supported by a type of canvas widget, + see its class documentation. + + Interaction. The attribute ``'draggable'`` controls whether the + user can drag a canvas widget around the canvas. By default, + canvas widgets are not draggable. + + ``CanvasWidget`` provides callback support for two types of user + interaction: clicking and dragging. The method ``bind_click`` + registers a callback function that is called whenever the canvas + widget is clicked. The method ``bind_drag`` registers a callback + function that is called after the canvas widget is dragged. If + the user clicks or drags a canvas widget with no registered + callback function, then the interaction event will propagate to + its parent. For each canvas widget, only one callback function + may be registered for an interaction event. Callback functions + can be deregistered with the ``unbind_click`` and ``unbind_drag`` + methods. + + Subclassing. ``CanvasWidget`` is an abstract class. Subclasses + are required to implement the following methods: + + - ``__init__``: Builds a new canvas widget. It must perform the + following three tasks (in order): + + - Create any new graphical elements. + - Call ``_add_child_widget`` on each child widget. + - Call the ``CanvasWidget`` constructor. + - ``_tags``: Returns a list of the canvas tags for all graphical + elements managed by this canvas widget, not including + graphical elements managed by its child widgets. + - ``_manage``: Arranges the child widgets of this canvas widget. + This is typically only called when the canvas widget is + created. + - ``_update``: Update this canvas widget in response to a + change in a single child. + + For a ``CanvasWidget`` with no child widgets, the default + definitions for ``_manage`` and ``_update`` may be used. + + If a subclass defines any attributes, then it should implement + ``__getitem__`` and ``__setitem__``. If either of these methods is + called with an unknown attribute, then they should propagate the + request to ``CanvasWidget``. + + Most subclasses implement a number of additional methods that + modify the ``CanvasWidget`` in some way. These methods must call + ``parent.update(self)`` after making any changes to the canvas + widget's graphical elements. The canvas widget must also call + ``parent.update(self)`` after changing any attribute value that + affects the shape or position of the canvas widget's graphical + elements. + + :type __canvas: Tkinter.Canvas + :ivar __canvas: This ``CanvasWidget``'s canvas. + + :type __parent: CanvasWidget or None + :ivar __parent: This ``CanvasWidget``'s hierarchical parent widget. + :type __children: list(CanvasWidget) + :ivar __children: This ``CanvasWidget``'s hierarchical child widgets. + + :type __updating: bool + :ivar __updating: Is this canvas widget currently performing an + update? If it is, then it will ignore any new update requests + from child widgets. + + :type __draggable: bool + :ivar __draggable: Is this canvas widget draggable? + :type __press: event + :ivar __press: The ButtonPress event that we're currently handling. + :type __drag_x: int + :ivar __drag_x: Where it's been moved to (to find dx) + :type __drag_y: int + :ivar __drag_y: Where it's been moved to (to find dy) + :type __callbacks: dictionary + :ivar __callbacks: Registered callbacks. Currently, four keys are + used: ``1``, ``2``, ``3``, and ``'drag'``. The values are + callback functions. Each callback function takes a single + argument, which is the ``CanvasWidget`` that triggered the + callback. + """ + + def __init__(self, canvas, parent=None, **attribs): + """ + Create a new canvas widget. This constructor should only be + called by subclass constructors; and it should be called only + "after" the subclass has constructed all graphical canvas + objects and registered all child widgets. + + :param canvas: This canvas widget's canvas. + :type canvas: Tkinter.Canvas + :param parent: This canvas widget's hierarchical parent. + :type parent: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + if self.__class__ == CanvasWidget: + raise TypeError("CanvasWidget is an abstract base class") + + if not isinstance(canvas, Canvas): + raise TypeError("Expected a canvas!") + + self.__canvas = canvas + self.__parent = parent + + # If the subclass constructor called _add_child_widget, then + # self.__children will already exist. + if not hasattr(self, "_CanvasWidget__children"): + self.__children = [] + + # Is this widget hidden? + self.__hidden = 0 + + # Update control (prevents infinite loops) + self.__updating = 0 + + # Button-press and drag callback handling. + self.__press = None + self.__drag_x = self.__drag_y = 0 + self.__callbacks = {} + self.__draggable = 0 + + # Set up attributes. + for (attr, value) in list(attribs.items()): + self[attr] = value + + # Manage this canvas widget + self._manage() + + # Register any new bindings + for tag in self._tags(): + self.__canvas.tag_bind(tag, "", self.__press_cb) + self.__canvas.tag_bind(tag, "", self.__press_cb) + self.__canvas.tag_bind(tag, "", self.__press_cb) + + ##////////////////////////////////////////////////////// + ## Inherited methods. + ##////////////////////////////////////////////////////// + + def bbox(self): + """ + :return: A bounding box for this ``CanvasWidget``. The bounding + box is a tuple of four coordinates, *(xmin, ymin, xmax, ymax)*, + for a rectangle which encloses all of the canvas + widget's graphical elements. Bounding box coordinates are + specified with respect to the coordinate space of the ``Canvas``. + :rtype: tuple(int, int, int, int) + """ + if self.__hidden: + return (0, 0, 0, 0) + if len(self.tags()) == 0: + raise ValueError("No tags") + return self.__canvas.bbox(*self.tags()) + + def width(self): + """ + :return: The width of this canvas widget's bounding box, in + its ``Canvas``'s coordinate space. + :rtype: int + """ + if len(self.tags()) == 0: + raise ValueError("No tags") + bbox = self.__canvas.bbox(*self.tags()) + return bbox[2] - bbox[0] + + def height(self): + """ + :return: The height of this canvas widget's bounding box, in + its ``Canvas``'s coordinate space. + :rtype: int + """ + if len(self.tags()) == 0: + raise ValueError("No tags") + bbox = self.__canvas.bbox(*self.tags()) + return bbox[3] - bbox[1] + + def parent(self): + """ + :return: The hierarchical parent of this canvas widget. + ``self`` is considered a subpart of its parent for + purposes of user interaction. + :rtype: CanvasWidget or None + """ + return self.__parent + + def child_widgets(self): + """ + :return: A list of the hierarchical children of this canvas + widget. These children are considered part of ``self`` + for purposes of user interaction. + :rtype: list of CanvasWidget + """ + return self.__children + + def canvas(self): + """ + :return: The canvas that this canvas widget is bound to. + :rtype: Tkinter.Canvas + """ + return self.__canvas + + def move(self, dx, dy): + """ + Move this canvas widget by a given distance. In particular, + shift the canvas widget right by ``dx`` pixels, and down by + ``dy`` pixels. Both ``dx`` and ``dy`` may be negative, resulting + in leftward or upward movement. + + :type dx: int + :param dx: The number of pixels to move this canvas widget + rightwards. + :type dy: int + :param dy: The number of pixels to move this canvas widget + downwards. + :rtype: None + """ + if dx == dy == 0: + return + for tag in self.tags(): + self.__canvas.move(tag, dx, dy) + if self.__parent: + self.__parent.update(self) + + def moveto(self, x, y, anchor="NW"): + """ + Move this canvas widget to the given location. In particular, + shift the canvas widget such that the corner or side of the + bounding box specified by ``anchor`` is at location (``x``, + ``y``). + + :param x,y: The location that the canvas widget should be moved + to. + :param anchor: The corner or side of the canvas widget that + should be moved to the specified location. ``'N'`` + specifies the top center; ``'NE'`` specifies the top right + corner; etc. + """ + x1, y1, x2, y2 = self.bbox() + if anchor == "NW": + self.move(x - x1, y - y1) + if anchor == "N": + self.move(x - x1 / 2 - x2 / 2, y - y1) + if anchor == "NE": + self.move(x - x2, y - y1) + if anchor == "E": + self.move(x - x2, y - y1 / 2 - y2 / 2) + if anchor == "SE": + self.move(x - x2, y - y2) + if anchor == "S": + self.move(x - x1 / 2 - x2 / 2, y - y2) + if anchor == "SW": + self.move(x - x1, y - y2) + if anchor == "W": + self.move(x - x1, y - y1 / 2 - y2 / 2) + + def destroy(self): + """ + Remove this ``CanvasWidget`` from its ``Canvas``. After a + ``CanvasWidget`` has been destroyed, it should not be accessed. + + Note that you only need to destroy a top-level + ``CanvasWidget``; its child widgets will be destroyed + automatically. If you destroy a non-top-level + ``CanvasWidget``, then the entire top-level widget will be + destroyed. + + :raise ValueError: if this ``CanvasWidget`` has a parent. + :rtype: None + """ + if self.__parent is not None: + self.__parent.destroy() + return + + for tag in self.tags(): + self.__canvas.tag_unbind(tag, "") + self.__canvas.tag_unbind(tag, "") + self.__canvas.tag_unbind(tag, "") + self.__canvas.delete(*self.tags()) + self.__canvas = None + + def update(self, child): + """ + Update the graphical display of this canvas widget, and all of + its ancestors, in response to a change in one of this canvas + widget's children. + + :param child: The child widget that changed. + :type child: CanvasWidget + """ + if self.__hidden or child.__hidden: + return + # If we're already updating, then do nothing. This prevents + # infinite loops when _update modifies its children. + if self.__updating: + return + self.__updating = 1 + + # Update this CanvasWidget. + self._update(child) + + # Propagate update request to the parent. + if self.__parent: + self.__parent.update(self) + + # We're done updating. + self.__updating = 0 + + def manage(self): + """ + Arrange this canvas widget and all of its descendants. + + :rtype: None + """ + if self.__hidden: + return + for child in self.__children: + child.manage() + self._manage() + + def tags(self): + """ + :return: a list of the canvas tags for all graphical + elements managed by this canvas widget, including + graphical elements managed by its child widgets. + :rtype: list of int + """ + if self.__canvas is None: + raise ValueError("Attempt to access a destroyed canvas widget") + tags = [] + tags += self._tags() + for child in self.__children: + tags += child.tags() + return tags + + def __setitem__(self, attr, value): + """ + Set the value of the attribute ``attr`` to ``value``. See the + class documentation for a list of attributes supported by this + canvas widget. + + :rtype: None + """ + if attr == "draggable": + self.__draggable = value + else: + raise ValueError("Unknown attribute %r" % attr) + + def __getitem__(self, attr): + """ + :return: the value of the attribute ``attr``. See the class + documentation for a list of attributes supported by this + canvas widget. + :rtype: (any) + """ + if attr == "draggable": + return self.__draggable + else: + raise ValueError("Unknown attribute %r" % attr) + + def __repr__(self): + """ + :return: a string representation of this canvas widget. + :rtype: str + """ + return "<%s>" % self.__class__.__name__ + + def hide(self): + """ + Temporarily hide this canvas widget. + + :rtype: None + """ + self.__hidden = 1 + for tag in self.tags(): + self.__canvas.itemconfig(tag, state="hidden") + + def show(self): + """ + Show a hidden canvas widget. + + :rtype: None + """ + self.__hidden = 0 + for tag in self.tags(): + self.__canvas.itemconfig(tag, state="normal") + + def hidden(self): + """ + :return: True if this canvas widget is hidden. + :rtype: bool + """ + return self.__hidden + + ##////////////////////////////////////////////////////// + ## Callback interface + ##////////////////////////////////////////////////////// + + def bind_click(self, callback, button=1): + """ + Register a new callback that will be called whenever this + ``CanvasWidget`` is clicked on. + + :type callback: function + :param callback: The callback function that will be called + whenever this ``CanvasWidget`` is clicked. This function + will be called with this ``CanvasWidget`` as its argument. + :type button: int + :param button: Which button the user should use to click on + this ``CanvasWidget``. Typically, this should be 1 (left + button), 3 (right button), or 2 (middle button). + """ + self.__callbacks[button] = callback + + def bind_drag(self, callback): + """ + Register a new callback that will be called after this + ``CanvasWidget`` is dragged. This implicitly makes this + ``CanvasWidget`` draggable. + + :type callback: function + :param callback: The callback function that will be called + whenever this ``CanvasWidget`` is clicked. This function + will be called with this ``CanvasWidget`` as its argument. + """ + self.__draggable = 1 + self.__callbacks["drag"] = callback + + def unbind_click(self, button=1): + """ + Remove a callback that was registered with ``bind_click``. + + :type button: int + :param button: Which button the user should use to click on + this ``CanvasWidget``. Typically, this should be 1 (left + button), 3 (right button), or 2 (middle button). + """ + try: + del self.__callbacks[button] + except: + pass + + def unbind_drag(self): + """ + Remove a callback that was registered with ``bind_drag``. + """ + try: + del self.__callbacks["drag"] + except: + pass + + ##////////////////////////////////////////////////////// + ## Callback internals + ##////////////////////////////////////////////////////// + + def __press_cb(self, event): + """ + Handle a button-press event: + - record the button press event in ``self.__press`` + - register a button-release callback. + - if this CanvasWidget or any of its ancestors are + draggable, then register the appropriate motion callback. + """ + # If we're already waiting for a button release, then ignore + # this new button press. + if ( + self.__canvas.bind("") + or self.__canvas.bind("") + or self.__canvas.bind("") + ): + return + + # Unbind motion (just in case; this shouldn't be necessary) + self.__canvas.unbind("") + + # Record the button press event. + self.__press = event + + # If any ancestor is draggable, set up a motion callback. + # (Only if they pressed button number 1) + if event.num == 1: + widget = self + while widget is not None: + if widget["draggable"]: + widget.__start_drag(event) + break + widget = widget.parent() + + # Set up the button release callback. + self.__canvas.bind("" % event.num, self.__release_cb) + + def __start_drag(self, event): + """ + Begin dragging this object: + - register a motion callback + - record the drag coordinates + """ + self.__canvas.bind("", self.__motion_cb) + self.__drag_x = event.x + self.__drag_y = event.y + + def __motion_cb(self, event): + """ + Handle a motion event: + - move this object to the new location + - record the new drag coordinates + """ + self.move(event.x - self.__drag_x, event.y - self.__drag_y) + self.__drag_x = event.x + self.__drag_y = event.y + + def __release_cb(self, event): + """ + Handle a release callback: + - unregister motion & button release callbacks. + - decide whether they clicked, dragged, or cancelled + - call the appropriate handler. + """ + # Unbind the button release & motion callbacks. + self.__canvas.unbind("" % event.num) + self.__canvas.unbind("") + + # Is it a click or a drag? + if ( + event.time - self.__press.time < 100 + and abs(event.x - self.__press.x) + abs(event.y - self.__press.y) < 5 + ): + # Move it back, if we were dragging. + if self.__draggable and event.num == 1: + self.move( + self.__press.x - self.__drag_x, self.__press.y - self.__drag_y + ) + self.__click(event.num) + elif event.num == 1: + self.__drag() + + self.__press = None + + def __drag(self): + """ + If this ``CanvasWidget`` has a drag callback, then call it; + otherwise, find the closest ancestor with a drag callback, and + call it. If no ancestors have a drag callback, do nothing. + """ + if self.__draggable: + if "drag" in self.__callbacks: + cb = self.__callbacks["drag"] + try: + cb(self) + except: + print("Error in drag callback for %r" % self) + elif self.__parent is not None: + self.__parent.__drag() + + def __click(self, button): + """ + If this ``CanvasWidget`` has a drag callback, then call it; + otherwise, find the closest ancestor with a click callback, and + call it. If no ancestors have a click callback, do nothing. + """ + if button in self.__callbacks: + cb = self.__callbacks[button] + # try: + cb(self) + # except: + # print('Error in click callback for %r' % self) + # raise + elif self.__parent is not None: + self.__parent.__click(button) + + ##////////////////////////////////////////////////////// + ## Child/parent Handling + ##////////////////////////////////////////////////////// + + def _add_child_widget(self, child): + """ + Register a hierarchical child widget. The child will be + considered part of this canvas widget for purposes of user + interaction. ``_add_child_widget`` has two direct effects: + - It sets ``child``'s parent to this canvas widget. + - It adds ``child`` to the list of canvas widgets returned by + the ``child_widgets`` member function. + + :param child: The new child widget. ``child`` must not already + have a parent. + :type child: CanvasWidget + """ + if not hasattr(self, "_CanvasWidget__children"): + self.__children = [] + if child.__parent is not None: + raise ValueError(f"{child} already has a parent") + child.__parent = self + self.__children.append(child) + + def _remove_child_widget(self, child): + """ + Remove a hierarchical child widget. This child will no longer + be considered part of this canvas widget for purposes of user + interaction. ``_add_child_widget`` has two direct effects: + - It sets ``child``'s parent to None. + - It removes ``child`` from the list of canvas widgets + returned by the ``child_widgets`` member function. + + :param child: The child widget to remove. ``child`` must be a + child of this canvas widget. + :type child: CanvasWidget + """ + self.__children.remove(child) + child.__parent = None + + ##////////////////////////////////////////////////////// + ## Defined by subclass + ##////////////////////////////////////////////////////// + + @abstractmethod + def _tags(self): + """ + :return: a list of canvas tags for all graphical elements + managed by this canvas widget, not including graphical + elements managed by its child widgets. + :rtype: list of int + """ + + def _manage(self): + """ + Arrange the child widgets of this canvas widget. This method + is called when the canvas widget is initially created. It is + also called if the user calls the ``manage`` method on this + canvas widget or any of its ancestors. + + :rtype: None + """ + + def _update(self, child): + """ + Update this canvas widget in response to a change in one of + its children. + + :param child: The child that changed. + :type child: CanvasWidget + :rtype: None + """ + + +##////////////////////////////////////////////////////// +## Basic widgets. +##////////////////////////////////////////////////////// + + +class TextWidget(CanvasWidget): + """ + A canvas widget that displays a single string of text. + + Attributes: + - ``color``: the color of the text. + - ``font``: the font used to display the text. + - ``justify``: justification for multi-line texts. Valid values + are ``left``, ``center``, and ``right``. + - ``width``: the width of the text. If the text is wider than + this width, it will be line-wrapped at whitespace. + - ``draggable``: whether the text can be dragged by the user. + """ + + def __init__(self, canvas, text, **attribs): + """ + Create a new text widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :type text: str + :param text: The string of text to display. + :param attribs: The new canvas widget's attributes. + """ + self._text = text + self._tag = canvas.create_text(1, 1, text=text) + CanvasWidget.__init__(self, canvas, **attribs) + + def __setitem__(self, attr, value): + if attr in ("color", "font", "justify", "width"): + if attr == "color": + attr = "fill" + self.canvas().itemconfig(self._tag, {attr: value}) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "width": + return int(self.canvas().itemcget(self._tag, attr)) + elif attr in ("color", "font", "justify"): + if attr == "color": + attr = "fill" + return self.canvas().itemcget(self._tag, attr) + else: + return CanvasWidget.__getitem__(self, attr) + + def _tags(self): + return [self._tag] + + def text(self): + """ + :return: The text displayed by this text widget. + :rtype: str + """ + return self.canvas().itemcget(self._tag, "TEXT") + + def set_text(self, text): + """ + Change the text that is displayed by this text widget. + + :type text: str + :param text: The string of text to display. + :rtype: None + """ + self.canvas().itemconfig(self._tag, text=text) + if self.parent() is not None: + self.parent().update(self) + + def __repr__(self): + return "[Text: %r]" % self._text + + +class SymbolWidget(TextWidget): + """ + A canvas widget that displays special symbols, such as the + negation sign and the exists operator. Symbols are specified by + name. Currently, the following symbol names are defined: ``neg``, + ``disj``, ``conj``, ``lambda``, ``merge``, ``forall``, ``exists``, + ``subseteq``, ``subset``, ``notsubset``, ``emptyset``, ``imp``, + ``rightarrow``, ``equal``, ``notequal``, ``epsilon``. + + Attributes: + + - ``color``: the color of the text. + - ``draggable``: whether the text can be dragged by the user. + + :cvar SYMBOLS: A dictionary mapping from symbols to the character + in the ``symbol`` font used to render them. + """ + + SYMBOLS = { + "neg": "\330", + "disj": "\332", + "conj": "\331", + "lambda": "\154", + "merge": "\304", + "forall": "\042", + "exists": "\044", + "subseteq": "\315", + "subset": "\314", + "notsubset": "\313", + "emptyset": "\306", + "imp": "\336", + "rightarrow": chr(222), #'\256', + "equal": "\75", + "notequal": "\271", + "intersection": "\307", + "union": "\310", + "epsilon": "e", + } + + def __init__(self, canvas, symbol, **attribs): + """ + Create a new symbol widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :type symbol: str + :param symbol: The name of the symbol to display. + :param attribs: The new canvas widget's attributes. + """ + attribs["font"] = "symbol" + TextWidget.__init__(self, canvas, "", **attribs) + self.set_symbol(symbol) + + def symbol(self): + """ + :return: the name of the symbol that is displayed by this + symbol widget. + :rtype: str + """ + return self._symbol + + def set_symbol(self, symbol): + """ + Change the symbol that is displayed by this symbol widget. + + :type symbol: str + :param symbol: The name of the symbol to display. + """ + if symbol not in SymbolWidget.SYMBOLS: + raise ValueError("Unknown symbol: %s" % symbol) + self._symbol = symbol + self.set_text(SymbolWidget.SYMBOLS[symbol]) + + def __repr__(self): + return "[Symbol: %r]" % self._symbol + + @staticmethod + def symbolsheet(size=20): + """ + Open a new Tkinter window that displays the entire alphabet + for the symbol font. This is useful for constructing the + ``SymbolWidget.SYMBOLS`` dictionary. + """ + top = Tk() + + def destroy(e, top=top): + top.destroy() + + top.bind("q", destroy) + Button(top, text="Quit", command=top.destroy).pack(side="bottom") + text = Text(top, font=("helvetica", -size), width=20, height=30) + text.pack(side="left") + sb = Scrollbar(top, command=text.yview) + text["yscrollcommand"] = sb.set + sb.pack(side="right", fill="y") + text.tag_config("symbol", font=("symbol", -size)) + for i in range(256): + if i in (0, 10): + continue # null and newline + for k, v in list(SymbolWidget.SYMBOLS.items()): + if v == chr(i): + text.insert("end", "%-10s\t" % k) + break + else: + text.insert("end", "%-10d \t" % i) + text.insert("end", "[%s]\n" % chr(i), "symbol") + top.mainloop() + + +class AbstractContainerWidget(CanvasWidget): + """ + An abstract class for canvas widgets that contain a single child, + such as ``BoxWidget`` and ``OvalWidget``. Subclasses must define + a constructor, which should create any new graphical elements and + then call the ``AbstractCanvasContainer`` constructor. Subclasses + must also define the ``_update`` method and the ``_tags`` method; + and any subclasses that define attributes should define + ``__setitem__`` and ``__getitem__``. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new container widget. This constructor should only + be called by subclass constructors. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The container's child widget. ``child`` must not + have a parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._add_child_widget(child) + CanvasWidget.__init__(self, canvas, **attribs) + + def _manage(self): + self._update(self._child) + + def child(self): + """ + :return: The child widget contained by this container widget. + :rtype: CanvasWidget + """ + return self._child + + def set_child(self, child): + """ + Change the child widget contained by this container widget. + + :param child: The new child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :rtype: None + """ + self._remove_child_widget(self._child) + self._add_child_widget(child) + self._child = child + self.update(child) + + def __repr__(self): + name = self.__class__.__name__ + if name[-6:] == "Widget": + name = name[:-6] + return f"[{name}: {self._child!r}]" + + +class BoxWidget(AbstractContainerWidget): + """ + A canvas widget that places a box around a child widget. + + Attributes: + - ``fill``: The color used to fill the interior of the box. + - ``outline``: The color used to draw the outline of the box. + - ``width``: The width of the outline of the box. + - ``margin``: The number of pixels space left between the child + and the box. + - ``draggable``: whether the text can be dragged by the user. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new box widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._margin = 1 + self._box = canvas.create_rectangle(1, 1, 1, 1) + canvas.tag_lower(self._box) + AbstractContainerWidget.__init__(self, canvas, child, **attribs) + + def __setitem__(self, attr, value): + if attr == "margin": + self._margin = value + elif attr in ("outline", "fill", "width"): + self.canvas().itemconfig(self._box, {attr: value}) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "margin": + return self._margin + elif attr == "width": + return float(self.canvas().itemcget(self._box, attr)) + elif attr in ("outline", "fill", "width"): + return self.canvas().itemcget(self._box, attr) + else: + return CanvasWidget.__getitem__(self, attr) + + def _update(self, child): + (x1, y1, x2, y2) = child.bbox() + margin = self._margin + self["width"] / 2 + self.canvas().coords( + self._box, x1 - margin, y1 - margin, x2 + margin, y2 + margin + ) + + def _tags(self): + return [self._box] + + +class OvalWidget(AbstractContainerWidget): + """ + A canvas widget that places a oval around a child widget. + + Attributes: + - ``fill``: The color used to fill the interior of the oval. + - ``outline``: The color used to draw the outline of the oval. + - ``width``: The width of the outline of the oval. + - ``margin``: The number of pixels space left between the child + and the oval. + - ``draggable``: whether the text can be dragged by the user. + - ``double``: If true, then a double-oval is drawn. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new oval widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._margin = 1 + self._oval = canvas.create_oval(1, 1, 1, 1) + self._circle = attribs.pop("circle", False) + self._double = attribs.pop("double", False) + if self._double: + self._oval2 = canvas.create_oval(1, 1, 1, 1) + else: + self._oval2 = None + canvas.tag_lower(self._oval) + AbstractContainerWidget.__init__(self, canvas, child, **attribs) + + def __setitem__(self, attr, value): + c = self.canvas() + if attr == "margin": + self._margin = value + elif attr == "double": + if value == True and self._oval2 is None: + # Copy attributes & position from self._oval. + x1, y1, x2, y2 = c.bbox(self._oval) + w = self["width"] * 2 + self._oval2 = c.create_oval( + x1 - w, + y1 - w, + x2 + w, + y2 + w, + outline=c.itemcget(self._oval, "outline"), + width=c.itemcget(self._oval, "width"), + ) + c.tag_lower(self._oval2) + if value == False and self._oval2 is not None: + c.delete(self._oval2) + self._oval2 = None + elif attr in ("outline", "fill", "width"): + c.itemconfig(self._oval, {attr: value}) + if self._oval2 is not None and attr != "fill": + c.itemconfig(self._oval2, {attr: value}) + if self._oval2 is not None and attr != "fill": + self.canvas().itemconfig(self._oval2, {attr: value}) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "margin": + return self._margin + elif attr == "double": + return self._double is not None + elif attr == "width": + return float(self.canvas().itemcget(self._oval, attr)) + elif attr in ("outline", "fill", "width"): + return self.canvas().itemcget(self._oval, attr) + else: + return CanvasWidget.__getitem__(self, attr) + + # The ratio between inscribed & circumscribed ovals + RATIO = 1.4142135623730949 + + def _update(self, child): + R = OvalWidget.RATIO + (x1, y1, x2, y2) = child.bbox() + margin = self._margin + + # If we're a circle, pretend our contents are square. + if self._circle: + dx, dy = abs(x1 - x2), abs(y1 - y2) + if dx > dy: + y = (y1 + y2) / 2 + y1, y2 = y - dx / 2, y + dx / 2 + elif dy > dx: + x = (x1 + x2) / 2 + x1, x2 = x - dy / 2, x + dy / 2 + + # Find the four corners. + left = int((x1 * (1 + R) + x2 * (1 - R)) / 2) + right = left + int((x2 - x1) * R) + top = int((y1 * (1 + R) + y2 * (1 - R)) / 2) + bot = top + int((y2 - y1) * R) + self.canvas().coords( + self._oval, left - margin, top - margin, right + margin, bot + margin + ) + if self._oval2 is not None: + self.canvas().coords( + self._oval2, + left - margin + 2, + top - margin + 2, + right + margin - 2, + bot + margin - 2, + ) + + def _tags(self): + if self._oval2 is None: + return [self._oval] + else: + return [self._oval, self._oval2] + + +class ParenWidget(AbstractContainerWidget): + """ + A canvas widget that places a pair of parenthases around a child + widget. + + Attributes: + - ``color``: The color used to draw the parenthases. + - ``width``: The width of the parenthases. + - ``draggable``: whether the text can be dragged by the user. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new parenthasis widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._oparen = canvas.create_arc(1, 1, 1, 1, style="arc", start=90, extent=180) + self._cparen = canvas.create_arc(1, 1, 1, 1, style="arc", start=-90, extent=180) + AbstractContainerWidget.__init__(self, canvas, child, **attribs) + + def __setitem__(self, attr, value): + if attr == "color": + self.canvas().itemconfig(self._oparen, outline=value) + self.canvas().itemconfig(self._cparen, outline=value) + elif attr == "width": + self.canvas().itemconfig(self._oparen, width=value) + self.canvas().itemconfig(self._cparen, width=value) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "color": + return self.canvas().itemcget(self._oparen, "outline") + elif attr == "width": + return self.canvas().itemcget(self._oparen, "width") + else: + return CanvasWidget.__getitem__(self, attr) + + def _update(self, child): + (x1, y1, x2, y2) = child.bbox() + width = max((y2 - y1) / 6, 4) + self.canvas().coords(self._oparen, x1 - width, y1, x1 + width, y2) + self.canvas().coords(self._cparen, x2 - width, y1, x2 + width, y2) + + def _tags(self): + return [self._oparen, self._cparen] + + +class BracketWidget(AbstractContainerWidget): + """ + A canvas widget that places a pair of brackets around a child + widget. + + Attributes: + - ``color``: The color used to draw the brackets. + - ``width``: The width of the brackets. + - ``draggable``: whether the text can be dragged by the user. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new bracket widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._obrack = canvas.create_line(1, 1, 1, 1, 1, 1, 1, 1) + self._cbrack = canvas.create_line(1, 1, 1, 1, 1, 1, 1, 1) + AbstractContainerWidget.__init__(self, canvas, child, **attribs) + + def __setitem__(self, attr, value): + if attr == "color": + self.canvas().itemconfig(self._obrack, fill=value) + self.canvas().itemconfig(self._cbrack, fill=value) + elif attr == "width": + self.canvas().itemconfig(self._obrack, width=value) + self.canvas().itemconfig(self._cbrack, width=value) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "color": + return self.canvas().itemcget(self._obrack, "outline") + elif attr == "width": + return self.canvas().itemcget(self._obrack, "width") + else: + return CanvasWidget.__getitem__(self, attr) + + def _update(self, child): + (x1, y1, x2, y2) = child.bbox() + width = max((y2 - y1) / 8, 2) + self.canvas().coords( + self._obrack, x1, y1, x1 - width, y1, x1 - width, y2, x1, y2 + ) + self.canvas().coords( + self._cbrack, x2, y1, x2 + width, y1, x2 + width, y2, x2, y2 + ) + + def _tags(self): + return [self._obrack, self._cbrack] + + +class SequenceWidget(CanvasWidget): + """ + A canvas widget that keeps a list of canvas widgets in a + horizontal line. + + Attributes: + - ``align``: The vertical alignment of the children. Possible + values are ``'top'``, ``'center'``, and ``'bottom'``. By + default, children are center-aligned. + - ``space``: The amount of horizontal space to place between + children. By default, one pixel of space is used. + - ``ordered``: If true, then keep the children in their + original order. + """ + + def __init__(self, canvas, *children, **attribs): + """ + Create a new sequence widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param children: The widgets that should be aligned + horizontally. Each child must not have a parent. + :type children: list(CanvasWidget) + :param attribs: The new canvas widget's attributes. + """ + self._align = "center" + self._space = 1 + self._ordered = False + self._children = list(children) + for child in children: + self._add_child_widget(child) + CanvasWidget.__init__(self, canvas, **attribs) + + def __setitem__(self, attr, value): + if attr == "align": + if value not in ("top", "bottom", "center"): + raise ValueError("Bad alignment: %r" % value) + self._align = value + elif attr == "space": + self._space = value + elif attr == "ordered": + self._ordered = value + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "align": + return self._align + elif attr == "space": + return self._space + elif attr == "ordered": + return self._ordered + else: + return CanvasWidget.__getitem__(self, attr) + + def _tags(self): + return [] + + def _yalign(self, top, bot): + if self._align == "top": + return top + if self._align == "bottom": + return bot + if self._align == "center": + return (top + bot) / 2 + + def _update(self, child): + # Align all children with child. + (left, top, right, bot) = child.bbox() + y = self._yalign(top, bot) + for c in self._children: + (x1, y1, x2, y2) = c.bbox() + c.move(0, y - self._yalign(y1, y2)) + + if self._ordered and len(self._children) > 1: + index = self._children.index(child) + + x = right + self._space + for i in range(index + 1, len(self._children)): + (x1, y1, x2, y2) = self._children[i].bbox() + if x > x1: + self._children[i].move(x - x1, 0) + x += x2 - x1 + self._space + + x = left - self._space + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._children[i].bbox() + if x < x2: + self._children[i].move(x - x2, 0) + x -= x2 - x1 + self._space + + def _manage(self): + if len(self._children) == 0: + return + child = self._children[0] + + # Align all children with child. + (left, top, right, bot) = child.bbox() + y = self._yalign(top, bot) + + index = self._children.index(child) + + # Line up children to the right of child. + x = right + self._space + for i in range(index + 1, len(self._children)): + (x1, y1, x2, y2) = self._children[i].bbox() + self._children[i].move(x - x1, y - self._yalign(y1, y2)) + x += x2 - x1 + self._space + + # Line up children to the left of child. + x = left - self._space + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._children[i].bbox() + self._children[i].move(x - x2, y - self._yalign(y1, y2)) + x -= x2 - x1 + self._space + + def __repr__(self): + return "[Sequence: " + repr(self._children)[1:-1] + "]" + + # Provide an alias for the child_widgets() member. + children = CanvasWidget.child_widgets + + def replace_child(self, oldchild, newchild): + """ + Replace the child canvas widget ``oldchild`` with ``newchild``. + ``newchild`` must not have a parent. ``oldchild``'s parent will + be set to None. + + :type oldchild: CanvasWidget + :param oldchild: The child canvas widget to remove. + :type newchild: CanvasWidget + :param newchild: The canvas widget that should replace + ``oldchild``. + """ + index = self._children.index(oldchild) + self._children[index] = newchild + self._remove_child_widget(oldchild) + self._add_child_widget(newchild) + self.update(newchild) + + def remove_child(self, child): + """ + Remove the given child canvas widget. ``child``'s parent will + be set to None. + + :type child: CanvasWidget + :param child: The child canvas widget to remove. + """ + index = self._children.index(child) + del self._children[index] + self._remove_child_widget(child) + if len(self._children) > 0: + self.update(self._children[0]) + + def insert_child(self, index, child): + """ + Insert a child canvas widget before a given index. + + :type child: CanvasWidget + :param child: The canvas widget that should be inserted. + :type index: int + :param index: The index where the child widget should be + inserted. In particular, the index of ``child`` will be + ``index``; and the index of any children whose indices were + greater than equal to ``index`` before ``child`` was + inserted will be incremented by one. + """ + self._children.insert(index, child) + self._add_child_widget(child) + + +class StackWidget(CanvasWidget): + """ + A canvas widget that keeps a list of canvas widgets in a vertical + line. + + Attributes: + - ``align``: The horizontal alignment of the children. Possible + values are ``'left'``, ``'center'``, and ``'right'``. By + default, children are center-aligned. + - ``space``: The amount of vertical space to place between + children. By default, one pixel of space is used. + - ``ordered``: If true, then keep the children in their + original order. + """ + + def __init__(self, canvas, *children, **attribs): + """ + Create a new stack widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param children: The widgets that should be aligned + vertically. Each child must not have a parent. + :type children: list(CanvasWidget) + :param attribs: The new canvas widget's attributes. + """ + self._align = "center" + self._space = 1 + self._ordered = False + self._children = list(children) + for child in children: + self._add_child_widget(child) + CanvasWidget.__init__(self, canvas, **attribs) + + def __setitem__(self, attr, value): + if attr == "align": + if value not in ("left", "right", "center"): + raise ValueError("Bad alignment: %r" % value) + self._align = value + elif attr == "space": + self._space = value + elif attr == "ordered": + self._ordered = value + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "align": + return self._align + elif attr == "space": + return self._space + elif attr == "ordered": + return self._ordered + else: + return CanvasWidget.__getitem__(self, attr) + + def _tags(self): + return [] + + def _xalign(self, left, right): + if self._align == "left": + return left + if self._align == "right": + return right + if self._align == "center": + return (left + right) / 2 + + def _update(self, child): + # Align all children with child. + (left, top, right, bot) = child.bbox() + x = self._xalign(left, right) + for c in self._children: + (x1, y1, x2, y2) = c.bbox() + c.move(x - self._xalign(x1, x2), 0) + + if self._ordered and len(self._children) > 1: + index = self._children.index(child) + + y = bot + self._space + for i in range(index + 1, len(self._children)): + (x1, y1, x2, y2) = self._children[i].bbox() + if y > y1: + self._children[i].move(0, y - y1) + y += y2 - y1 + self._space + + y = top - self._space + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._children[i].bbox() + if y < y2: + self._children[i].move(0, y - y2) + y -= y2 - y1 + self._space + + def _manage(self): + if len(self._children) == 0: + return + child = self._children[0] + + # Align all children with child. + (left, top, right, bot) = child.bbox() + x = self._xalign(left, right) + + index = self._children.index(child) + + # Line up children below the child. + y = bot + self._space + for i in range(index + 1, len(self._children)): + (x1, y1, x2, y2) = self._children[i].bbox() + self._children[i].move(x - self._xalign(x1, x2), y - y1) + y += y2 - y1 + self._space + + # Line up children above the child. + y = top - self._space + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._children[i].bbox() + self._children[i].move(x - self._xalign(x1, x2), y - y2) + y -= y2 - y1 + self._space + + def __repr__(self): + return "[Stack: " + repr(self._children)[1:-1] + "]" + + # Provide an alias for the child_widgets() member. + children = CanvasWidget.child_widgets + + def replace_child(self, oldchild, newchild): + """ + Replace the child canvas widget ``oldchild`` with ``newchild``. + ``newchild`` must not have a parent. ``oldchild``'s parent will + be set to None. + + :type oldchild: CanvasWidget + :param oldchild: The child canvas widget to remove. + :type newchild: CanvasWidget + :param newchild: The canvas widget that should replace + ``oldchild``. + """ + index = self._children.index(oldchild) + self._children[index] = newchild + self._remove_child_widget(oldchild) + self._add_child_widget(newchild) + self.update(newchild) + + def remove_child(self, child): + """ + Remove the given child canvas widget. ``child``'s parent will + be set to None. + + :type child: CanvasWidget + :param child: The child canvas widget to remove. + """ + index = self._children.index(child) + del self._children[index] + self._remove_child_widget(child) + if len(self._children) > 0: + self.update(self._children[0]) + + def insert_child(self, index, child): + """ + Insert a child canvas widget before a given index. + + :type child: CanvasWidget + :param child: The canvas widget that should be inserted. + :type index: int + :param index: The index where the child widget should be + inserted. In particular, the index of ``child`` will be + ``index``; and the index of any children whose indices were + greater than equal to ``index`` before ``child`` was + inserted will be incremented by one. + """ + self._children.insert(index, child) + self._add_child_widget(child) + + +class SpaceWidget(CanvasWidget): + """ + A canvas widget that takes up space but does not display + anything. A ``SpaceWidget`` can be used to add space between + elements. Each space widget is characterized by a width and a + height. If you wish to only create horizontal space, then use a + height of zero; and if you wish to only create vertical space, use + a width of zero. + """ + + def __init__(self, canvas, width, height, **attribs): + """ + Create a new space widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :type width: int + :param width: The width of the new space widget. + :type height: int + :param height: The height of the new space widget. + :param attribs: The new canvas widget's attributes. + """ + # For some reason, + if width > 4: + width -= 4 + if height > 4: + height -= 4 + self._tag = canvas.create_line(1, 1, width, height, fill="") + CanvasWidget.__init__(self, canvas, **attribs) + + # note: width() and height() are already defined by CanvasWidget. + def set_width(self, width): + """ + Change the width of this space widget. + + :param width: The new width. + :type width: int + :rtype: None + """ + [x1, y1, x2, y2] = self.bbox() + self.canvas().coords(self._tag, x1, y1, x1 + width, y2) + + def set_height(self, height): + """ + Change the height of this space widget. + + :param height: The new height. + :type height: int + :rtype: None + """ + [x1, y1, x2, y2] = self.bbox() + self.canvas().coords(self._tag, x1, y1, x2, y1 + height) + + def _tags(self): + return [self._tag] + + def __repr__(self): + return "[Space]" + + +class ScrollWatcherWidget(CanvasWidget): + """ + A special canvas widget that adjusts its ``Canvas``'s scrollregion + to always include the bounding boxes of all of its children. The + scroll-watcher widget will only increase the size of the + ``Canvas``'s scrollregion; it will never decrease it. + """ + + def __init__(self, canvas, *children, **attribs): + """ + Create a new scroll-watcher widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :type children: list(CanvasWidget) + :param children: The canvas widgets watched by the + scroll-watcher. The scroll-watcher will ensure that these + canvas widgets are always contained in their canvas's + scrollregion. + :param attribs: The new canvas widget's attributes. + """ + for child in children: + self._add_child_widget(child) + CanvasWidget.__init__(self, canvas, **attribs) + + def add_child(self, canvaswidget): + """ + Add a new canvas widget to the scroll-watcher. The + scroll-watcher will ensure that the new canvas widget is + always contained in its canvas's scrollregion. + + :param canvaswidget: The new canvas widget. + :type canvaswidget: CanvasWidget + :rtype: None + """ + self._add_child_widget(canvaswidget) + self.update(canvaswidget) + + def remove_child(self, canvaswidget): + """ + Remove a canvas widget from the scroll-watcher. The + scroll-watcher will no longer ensure that the new canvas + widget is always contained in its canvas's scrollregion. + + :param canvaswidget: The canvas widget to remove. + :type canvaswidget: CanvasWidget + :rtype: None + """ + self._remove_child_widget(canvaswidget) + + def _tags(self): + return [] + + def _update(self, child): + self._adjust_scrollregion() + + def _adjust_scrollregion(self): + """ + Adjust the scrollregion of this scroll-watcher's ``Canvas`` to + include the bounding boxes of all of its children. + """ + bbox = self.bbox() + canvas = self.canvas() + scrollregion = [int(n) for n in canvas["scrollregion"].split()] + if len(scrollregion) != 4: + return + if ( + bbox[0] < scrollregion[0] + or bbox[1] < scrollregion[1] + or bbox[2] > scrollregion[2] + or bbox[3] > scrollregion[3] + ): + scrollregion = "%d %d %d %d" % ( + min(bbox[0], scrollregion[0]), + min(bbox[1], scrollregion[1]), + max(bbox[2], scrollregion[2]), + max(bbox[3], scrollregion[3]), + ) + canvas["scrollregion"] = scrollregion + + +##////////////////////////////////////////////////////// +## Canvas Frame +##////////////////////////////////////////////////////// + + +class CanvasFrame: + """ + A ``Tkinter`` frame containing a canvas and scrollbars. + ``CanvasFrame`` uses a ``ScrollWatcherWidget`` to ensure that all of + the canvas widgets contained on its canvas are within its + scrollregion. In order for ``CanvasFrame`` to make these checks, + all canvas widgets must be registered with ``add_widget`` when they + are added to the canvas; and destroyed with ``destroy_widget`` when + they are no longer needed. + + If a ``CanvasFrame`` is created with no parent, then it will create + its own main window, including a "Done" button and a "Print" + button. + """ + + def __init__(self, parent=None, **kw): + """ + Create a new ``CanvasFrame``. + + :type parent: Tkinter.BaseWidget or Tkinter.Tk + :param parent: The parent ``Tkinter`` widget. If no parent is + specified, then ``CanvasFrame`` will create a new main + window. + :param kw: Keyword arguments for the new ``Canvas``. See the + documentation for ``Tkinter.Canvas`` for more information. + """ + # If no parent was given, set up a top-level window. + if parent is None: + self._parent = Tk() + self._parent.title("NLTK") + self._parent.bind("", lambda e: self.print_to_file()) + self._parent.bind("", self.destroy) + self._parent.bind("", self.destroy) + else: + self._parent = parent + + # Create a frame for the canvas & scrollbars + self._frame = frame = Frame(self._parent) + self._canvas = canvas = Canvas(frame, **kw) + xscrollbar = Scrollbar(self._frame, orient="horizontal") + yscrollbar = Scrollbar(self._frame, orient="vertical") + xscrollbar["command"] = canvas.xview + yscrollbar["command"] = canvas.yview + canvas["xscrollcommand"] = xscrollbar.set + canvas["yscrollcommand"] = yscrollbar.set + yscrollbar.pack(fill="y", side="right") + xscrollbar.pack(fill="x", side="bottom") + canvas.pack(expand=1, fill="both", side="left") + + # Set initial scroll region. + scrollregion = "0 0 {} {}".format(canvas["width"], canvas["height"]) + canvas["scrollregion"] = scrollregion + + self._scrollwatcher = ScrollWatcherWidget(canvas) + + # If no parent was given, pack the frame, and add a menu. + if parent is None: + self.pack(expand=1, fill="both") + self._init_menubar() + + def _init_menubar(self): + menubar = Menu(self._parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Print to Postscript", + underline=0, + command=self.print_to_file, + accelerator="Ctrl-p", + ) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + self._parent.config(menu=menubar) + + def print_to_file(self, filename=None): + """ + Print the contents of this ``CanvasFrame`` to a postscript + file. If no filename is given, then prompt the user for one. + + :param filename: The name of the file to print the tree to. + :type filename: str + :rtype: None + """ + if filename is None: + ftypes = [("Postscript files", ".ps"), ("All files", "*")] + filename = asksaveasfilename(filetypes=ftypes, defaultextension=".ps") + if not filename: + return + (x0, y0, w, h) = self.scrollregion() + postscript = self._canvas.postscript( + x=x0, + y=y0, + width=w + 2, + height=h + 2, + pagewidth=w + 2, # points = 1/72 inch + pageheight=h + 2, # points = 1/72 inch + pagex=0, + pagey=0, + ) + # workaround for bug in Tk font handling + postscript = postscript.replace(" 0 scalefont ", " 9 scalefont ") + with open(filename, "wb") as f: + f.write(postscript.encode("utf8")) + + def scrollregion(self): + """ + :return: The current scroll region for the canvas managed by + this ``CanvasFrame``. + :rtype: 4-tuple of int + """ + (x1, y1, x2, y2) = self._canvas["scrollregion"].split() + return (int(x1), int(y1), int(x2), int(y2)) + + def canvas(self): + """ + :return: The canvas managed by this ``CanvasFrame``. + :rtype: Tkinter.Canvas + """ + return self._canvas + + def add_widget(self, canvaswidget, x=None, y=None): + """ + Register a canvas widget with this ``CanvasFrame``. The + ``CanvasFrame`` will ensure that this canvas widget is always + within the ``Canvas``'s scrollregion. If no coordinates are + given for the canvas widget, then the ``CanvasFrame`` will + attempt to find a clear area of the canvas for it. + + :type canvaswidget: CanvasWidget + :param canvaswidget: The new canvas widget. ``canvaswidget`` + must have been created on this ``CanvasFrame``'s canvas. + :type x: int + :param x: The initial x coordinate for the upper left hand + corner of ``canvaswidget``, in the canvas's coordinate + space. + :type y: int + :param y: The initial y coordinate for the upper left hand + corner of ``canvaswidget``, in the canvas's coordinate + space. + """ + if x is None or y is None: + (x, y) = self._find_room(canvaswidget, x, y) + + # Move to (x,y) + (x1, y1, x2, y2) = canvaswidget.bbox() + canvaswidget.move(x - x1, y - y1) + + # Register with scrollwatcher. + self._scrollwatcher.add_child(canvaswidget) + + def _find_room(self, widget, desired_x, desired_y): + """ + Try to find a space for a given widget. + """ + (left, top, right, bot) = self.scrollregion() + w = widget.width() + h = widget.height() + + if w >= (right - left): + return (0, 0) + if h >= (bot - top): + return (0, 0) + + # Move the widget out of the way, for now. + (x1, y1, x2, y2) = widget.bbox() + widget.move(left - x2 - 50, top - y2 - 50) + + if desired_x is not None: + x = desired_x + for y in range(top, bot - h, int((bot - top - h) / 10)): + if not self._canvas.find_overlapping( + x - 5, y - 5, x + w + 5, y + h + 5 + ): + return (x, y) + + if desired_y is not None: + y = desired_y + for x in range(left, right - w, int((right - left - w) / 10)): + if not self._canvas.find_overlapping( + x - 5, y - 5, x + w + 5, y + h + 5 + ): + return (x, y) + + for y in range(top, bot - h, int((bot - top - h) / 10)): + for x in range(left, right - w, int((right - left - w) / 10)): + if not self._canvas.find_overlapping( + x - 5, y - 5, x + w + 5, y + h + 5 + ): + return (x, y) + return (0, 0) + + def destroy_widget(self, canvaswidget): + """ + Remove a canvas widget from this ``CanvasFrame``. This + deregisters the canvas widget, and destroys it. + """ + self.remove_widget(canvaswidget) + canvaswidget.destroy() + + def remove_widget(self, canvaswidget): + # Deregister with scrollwatcher. + self._scrollwatcher.remove_child(canvaswidget) + + def pack(self, cnf={}, **kw): + """ + Pack this ``CanvasFrame``. See the documentation for + ``Tkinter.Pack`` for more information. + """ + self._frame.pack(cnf, **kw) + # Adjust to be big enough for kids? + + def destroy(self, *e): + """ + Destroy this ``CanvasFrame``. If this ``CanvasFrame`` created a + top-level window, then this will close that window. + """ + if self._parent is None: + return + self._parent.destroy() + self._parent = None + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this frame is created from a non-interactive program (e.g. + from a secript); otherwise, the frame will close as soon as + the script completes. + """ + if in_idle(): + return + self._parent.mainloop(*args, **kwargs) + + +##////////////////////////////////////////////////////// +## Text display +##////////////////////////////////////////////////////// + + +class ShowText: + """ + A ``Tkinter`` window used to display a text. ``ShowText`` is + typically used by graphical tools to display help text, or similar + information. + """ + + def __init__(self, root, title, text, width=None, height=None, **textbox_options): + if width is None or height is None: + (width, height) = self.find_dimentions(text, width, height) + + # Create the main window. + if root is None: + self._top = top = Tk() + else: + self._top = top = Toplevel(root) + top.title(title) + + b = Button(top, text="Ok", command=self.destroy) + b.pack(side="bottom") + + tbf = Frame(top) + tbf.pack(expand=1, fill="both") + scrollbar = Scrollbar(tbf, orient="vertical") + scrollbar.pack(side="right", fill="y") + textbox = Text(tbf, wrap="word", width=width, height=height, **textbox_options) + textbox.insert("end", text) + textbox["state"] = "disabled" + textbox.pack(side="left", expand=1, fill="both") + scrollbar["command"] = textbox.yview + textbox["yscrollcommand"] = scrollbar.set + + # Make it easy to close the window. + top.bind("q", self.destroy) + top.bind("x", self.destroy) + top.bind("c", self.destroy) + top.bind("", self.destroy) + top.bind("", self.destroy) + + # Focus the scrollbar, so they can use up/down, etc. + scrollbar.focus() + + def find_dimentions(self, text, width, height): + lines = text.split("\n") + if width is None: + maxwidth = max(len(line) for line in lines) + width = min(maxwidth, 80) + + # Now, find height. + height = 0 + for line in lines: + while len(line) > width: + brk = line[:width].rfind(" ") + line = line[brk:] + height += 1 + height += 1 + height = min(height, 25) + + return (width, height) + + def destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this window is created from a non-interactive program (e.g. + from a secript); otherwise, the window will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + +##////////////////////////////////////////////////////// +## Entry dialog +##////////////////////////////////////////////////////// + + +class EntryDialog: + """ + A dialog box for entering + """ + + def __init__( + self, parent, original_text="", instructions="", set_callback=None, title=None + ): + self._parent = parent + self._original_text = original_text + self._set_callback = set_callback + + width = int(max(30, len(original_text) * 3 / 2)) + self._top = Toplevel(parent) + + if title: + self._top.title(title) + + # The text entry box. + entryframe = Frame(self._top) + entryframe.pack(expand=1, fill="both", padx=5, pady=5, ipady=10) + if instructions: + l = Label(entryframe, text=instructions) + l.pack(side="top", anchor="w", padx=30) + self._entry = Entry(entryframe, width=width) + self._entry.pack(expand=1, fill="x", padx=30) + self._entry.insert(0, original_text) + + # A divider + divider = Frame(self._top, borderwidth=1, relief="sunken") + divider.pack(fill="x", ipady=1, padx=10) + + # The buttons. + buttons = Frame(self._top) + buttons.pack(expand=0, fill="x", padx=5, pady=5) + b = Button(buttons, text="Cancel", command=self._cancel, width=8) + b.pack(side="right", padx=5) + b = Button(buttons, text="Ok", command=self._ok, width=8, default="active") + b.pack(side="left", padx=5) + b = Button(buttons, text="Apply", command=self._apply, width=8) + b.pack(side="left") + + self._top.bind("", self._ok) + self._top.bind("", self._cancel) + self._top.bind("", self._cancel) + + self._entry.focus() + + def _reset(self, *e): + self._entry.delete(0, "end") + self._entry.insert(0, self._original_text) + if self._set_callback: + self._set_callback(self._original_text) + + def _cancel(self, *e): + try: + self._reset() + except: + pass + self._destroy() + + def _ok(self, *e): + self._apply() + self._destroy() + + def _apply(self, *e): + if self._set_callback: + self._set_callback(self._entry.get()) + + def _destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + +##////////////////////////////////////////////////////// +## Colorized List +##////////////////////////////////////////////////////// + + +class ColorizedList: + """ + An abstract base class for displaying a colorized list of items. + Subclasses should define: + + - ``_init_colortags``, which sets up Text color tags that + will be used by the list. + - ``_item_repr``, which returns a list of (text,colortag) + tuples that make up the colorized representation of the + item. + + :note: Typically, you will want to register a callback for + ``'select'`` that calls ``mark`` on the given item. + """ + + def __init__(self, parent, items=[], **options): + """ + Construct a new list. + + :param parent: The Tk widget that contains the colorized list + :param items: The initial contents of the colorized list. + :param options: + """ + self._parent = parent + self._callbacks = {} + + # Which items are marked? + self._marks = {} + + # Initialize the Tkinter frames. + self._init_itemframe(options.copy()) + + # Set up key & mouse bindings. + self._textwidget.bind("", self._keypress) + self._textwidget.bind("", self._buttonpress) + + # Fill in the given CFG's items. + self._items = None + self.set(items) + + # //////////////////////////////////////////////////////////// + # Abstract methods + # //////////////////////////////////////////////////////////// + @abstractmethod + def _init_colortags(self, textwidget, options): + """ + Set up any colortags that will be used by this colorized list. + E.g.: + textwidget.tag_config('terminal', foreground='black') + """ + + @abstractmethod + def _item_repr(self, item): + """ + Return a list of (text, colortag) tuples that make up the + colorized representation of the item. Colorized + representations may not span multiple lines. I.e., the text + strings returned may not contain newline characters. + """ + + # //////////////////////////////////////////////////////////// + # Item Access + # //////////////////////////////////////////////////////////// + + def get(self, index=None): + """ + :return: A list of the items contained by this list. + """ + if index is None: + return self._items[:] + else: + return self._items[index] + + def set(self, items): + """ + Modify the list of items contained by this list. + """ + items = list(items) + if self._items == items: + return + self._items = list(items) + + self._textwidget["state"] = "normal" + self._textwidget.delete("1.0", "end") + for item in items: + for (text, colortag) in self._item_repr(item): + assert "\n" not in text, "item repr may not contain newline" + self._textwidget.insert("end", text, colortag) + self._textwidget.insert("end", "\n") + # Remove the final newline + self._textwidget.delete("end-1char", "end") + self._textwidget.mark_set("insert", "1.0") + self._textwidget["state"] = "disabled" + # Clear all marks + self._marks.clear() + + def unmark(self, item=None): + """ + Remove highlighting from the given item; or from every item, + if no item is given. + :raise ValueError: If ``item`` is not contained in the list. + :raise KeyError: If ``item`` is not marked. + """ + if item is None: + self._marks.clear() + self._textwidget.tag_remove("highlight", "1.0", "end+1char") + else: + index = self._items.index(item) + del self._marks[item] + (start, end) = ("%d.0" % (index + 1), "%d.0" % (index + 2)) + self._textwidget.tag_remove("highlight", start, end) + + def mark(self, item): + """ + Highlight the given item. + :raise ValueError: If ``item`` is not contained in the list. + """ + self._marks[item] = 1 + index = self._items.index(item) + (start, end) = ("%d.0" % (index + 1), "%d.0" % (index + 2)) + self._textwidget.tag_add("highlight", start, end) + + def markonly(self, item): + """ + Remove any current highlighting, and mark the given item. + :raise ValueError: If ``item`` is not contained in the list. + """ + self.unmark() + self.mark(item) + + def view(self, item): + """ + Adjust the view such that the given item is visible. If + the item is already visible, then do nothing. + """ + index = self._items.index(item) + self._textwidget.see("%d.0" % (index + 1)) + + # //////////////////////////////////////////////////////////// + # Callbacks + # //////////////////////////////////////////////////////////// + + def add_callback(self, event, func): + """ + Register a callback function with the list. This function + will be called whenever the given event occurs. + + :param event: The event that will trigger the callback + function. Valid events are: click1, click2, click3, + space, return, select, up, down, next, prior, move + :param func: The function that should be called when + the event occurs. ``func`` will be called with a + single item as its argument. (The item selected + or the item moved to). + """ + if event == "select": + events = ["click1", "space", "return"] + elif event == "move": + events = ["up", "down", "next", "prior"] + else: + events = [event] + + for e in events: + self._callbacks.setdefault(e, {})[func] = 1 + + def remove_callback(self, event, func=None): + """ + Deregister a callback function. If ``func`` is none, then + all callbacks are removed for the given event. + """ + if event is None: + events = list(self._callbacks.keys()) + elif event == "select": + events = ["click1", "space", "return"] + elif event == "move": + events = ["up", "down", "next", "prior"] + else: + events = [event] + + for e in events: + if func is None: + del self._callbacks[e] + else: + try: + del self._callbacks[e][func] + except: + pass + + # //////////////////////////////////////////////////////////// + # Tkinter Methods + # //////////////////////////////////////////////////////////// + + def pack(self, cnf={}, **kw): + # "@include: Tkinter.Pack.pack" + self._itemframe.pack(cnf, **kw) + + def grid(self, cnf={}, **kw): + # "@include: Tkinter.Grid.grid" + self._itemframe.grid(cnf, *kw) + + def focus(self): + # "@include: Tkinter.Widget.focus" + self._textwidget.focus() + + # //////////////////////////////////////////////////////////// + # Internal Methods + # //////////////////////////////////////////////////////////// + + def _init_itemframe(self, options): + self._itemframe = Frame(self._parent) + + # Create the basic Text widget & scrollbar. + options.setdefault("background", "#e0e0e0") + self._textwidget = Text(self._itemframe, **options) + self._textscroll = Scrollbar(self._itemframe, takefocus=0, orient="vertical") + self._textwidget.config(yscrollcommand=self._textscroll.set) + self._textscroll.config(command=self._textwidget.yview) + self._textscroll.pack(side="right", fill="y") + self._textwidget.pack(expand=1, fill="both", side="left") + + # Initialize the colorization tags + self._textwidget.tag_config( + "highlight", background="#e0ffff", border="1", relief="raised" + ) + self._init_colortags(self._textwidget, options) + + # How do I want to mark keyboard selection? + self._textwidget.tag_config("sel", foreground="") + self._textwidget.tag_config( + "sel", foreground="", background="", border="", underline=1 + ) + self._textwidget.tag_lower("highlight", "sel") + + def _fire_callback(self, event, itemnum): + if event not in self._callbacks: + return + if 0 <= itemnum < len(self._items): + item = self._items[itemnum] + else: + item = None + for cb_func in list(self._callbacks[event].keys()): + cb_func(item) + + def _buttonpress(self, event): + clickloc = "@%d,%d" % (event.x, event.y) + insert_point = self._textwidget.index(clickloc) + itemnum = int(insert_point.split(".")[0]) - 1 + self._fire_callback("click%d" % event.num, itemnum) + + def _keypress(self, event): + if event.keysym == "Return" or event.keysym == "space": + insert_point = self._textwidget.index("insert") + itemnum = int(insert_point.split(".")[0]) - 1 + self._fire_callback(event.keysym.lower(), itemnum) + return + elif event.keysym == "Down": + delta = "+1line" + elif event.keysym == "Up": + delta = "-1line" + elif event.keysym == "Next": + delta = "+10lines" + elif event.keysym == "Prior": + delta = "-10lines" + else: + return "continue" + + self._textwidget.mark_set("insert", "insert" + delta) + self._textwidget.see("insert") + self._textwidget.tag_remove("sel", "1.0", "end+1char") + self._textwidget.tag_add("sel", "insert linestart", "insert lineend") + + insert_point = self._textwidget.index("insert") + itemnum = int(insert_point.split(".")[0]) - 1 + self._fire_callback(event.keysym.lower(), itemnum) + + return "break" + + +##////////////////////////////////////////////////////// +## Improved OptionMenu +##////////////////////////////////////////////////////// + + +class MutableOptionMenu(Menubutton): + def __init__(self, master, values, **options): + self._callback = options.get("command") + if "command" in options: + del options["command"] + + # Create a variable + self._variable = variable = StringVar() + if len(values) > 0: + variable.set(values[0]) + + kw = { + "borderwidth": 2, + "textvariable": variable, + "indicatoron": 1, + "relief": RAISED, + "anchor": "c", + "highlightthickness": 2, + } + kw.update(options) + Widget.__init__(self, master, "menubutton", kw) + self.widgetName = "tk_optionMenu" + self._menu = Menu(self, name="menu", tearoff=0) + self.menuname = self._menu._w + + self._values = [] + for value in values: + self.add(value) + + self["menu"] = self._menu + + def add(self, value): + if value in self._values: + return + + def set(value=value): + self.set(value) + + self._menu.add_command(label=value, command=set) + self._values.append(value) + + def set(self, value): + self._variable.set(value) + if self._callback: + self._callback(value) + + def remove(self, value): + # Might raise indexerror: pass to parent. + i = self._values.index(value) + del self._values[i] + self._menu.delete(i, i) + + def __getitem__(self, name): + if name == "menu": + return self.__menu + return Widget.__getitem__(self, name) + + def destroy(self): + """Destroy this widget and the associated menu.""" + Menubutton.destroy(self) + self._menu = None + + +##////////////////////////////////////////////////////// +## Test code. +##////////////////////////////////////////////////////// + + +def demo(): + """ + A simple demonstration showing how to use canvas widgets. + """ + + def fill(cw): + from random import randint + + cw["fill"] = "#00%04d" % randint(0, 9999) + + def color(cw): + from random import randint + + cw["color"] = "#ff%04d" % randint(0, 9999) + + cf = CanvasFrame(closeenough=10, width=300, height=300) + c = cf.canvas() + ct3 = TextWidget(c, "hiya there", draggable=1) + ct2 = TextWidget(c, "o o\n||\n___\n U", draggable=1, justify="center") + co = OvalWidget(c, ct2, outline="red") + ct = TextWidget(c, "o o\n||\n\\___/", draggable=1, justify="center") + cp = ParenWidget(c, ct, color="red") + cb = BoxWidget(c, cp, fill="cyan", draggable=1, width=3, margin=10) + equation = SequenceWidget( + c, + SymbolWidget(c, "forall"), + TextWidget(c, "x"), + SymbolWidget(c, "exists"), + TextWidget(c, "y: "), + TextWidget(c, "x"), + SymbolWidget(c, "notequal"), + TextWidget(c, "y"), + ) + space = SpaceWidget(c, 0, 30) + cstack = StackWidget(c, cb, ct3, space, co, equation, align="center") + prompt_msg = TextWidget( + c, "try clicking\nand dragging", draggable=1, justify="center" + ) + cs = SequenceWidget(c, cstack, prompt_msg) + zz = BracketWidget(c, cs, color="green4", width=3) + cf.add_widget(zz, 60, 30) + + cb.bind_click(fill) + ct.bind_click(color) + co.bind_click(fill) + ct2.bind_click(color) + ct3.bind_click(color) + + cf.mainloop() + # ShowText(None, 'title', ((('this is text'*150)+'\n')*5)) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..11d31b9a6aeded7e96f7db4395801af082a25737 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__init__.py @@ -0,0 +1,235 @@ +# Natural Language Toolkit: Language Models +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Ilia Kurenkov +# URL: >> text = [['a', 'b', 'c'], ['a', 'c', 'd', 'c', 'e', 'f']] + +If we want to train a bigram model, we need to turn this text into bigrams. +Here's what the first sentence of our text would look like if we use a function +from NLTK for this. + + >>> from nltk.util import bigrams + >>> list(bigrams(text[0])) + [('a', 'b'), ('b', 'c')] + +Notice how "b" occurs both as the first and second member of different bigrams +but "a" and "c" don't? Wouldn't it be nice to somehow indicate how often sentences +start with "a" and end with "c"? +A standard way to deal with this is to add special "padding" symbols to the +sentence before splitting it into ngrams. +Fortunately, NLTK also has a function for that, let's see what it does to the +first sentence. + + >>> from nltk.util import pad_sequence + >>> list(pad_sequence(text[0], + ... pad_left=True, + ... left_pad_symbol="", + ... pad_right=True, + ... right_pad_symbol="", + ... n=2)) + ['', 'a', 'b', 'c', ''] + +Note the `n` argument, that tells the function we need padding for bigrams. +Now, passing all these parameters every time is tedious and in most cases they +can be safely assumed as defaults anyway. +Thus our module provides a convenience function that has all these arguments +already set while the other arguments remain the same as for `pad_sequence`. + + >>> from nltk.lm.preprocessing import pad_both_ends + >>> list(pad_both_ends(text[0], n=2)) + ['', 'a', 'b', 'c', ''] + +Combining the two parts discussed so far we get the following preparation steps +for one sentence. + + >>> list(bigrams(pad_both_ends(text[0], n=2))) + [('', 'a'), ('a', 'b'), ('b', 'c'), ('c', '')] + +To make our model more robust we could also train it on unigrams (single words) +as well as bigrams, its main source of information. +NLTK once again helpfully provides a function called `everygrams`. +While not the most efficient, it is conceptually simple. + + + >>> from nltk.util import everygrams + >>> padded_bigrams = list(pad_both_ends(text[0], n=2)) + >>> list(everygrams(padded_bigrams, max_len=2)) + [('',), ('', 'a'), ('a',), ('a', 'b'), ('b',), ('b', 'c'), ('c',), ('c', ''), ('',)] + +We are almost ready to start counting ngrams, just one more step left. +During training and evaluation our model will rely on a vocabulary that +defines which words are "known" to the model. +To create this vocabulary we need to pad our sentences (just like for counting +ngrams) and then combine the sentences into one flat stream of words. + + >>> from nltk.lm.preprocessing import flatten + >>> list(flatten(pad_both_ends(sent, n=2) for sent in text)) + ['', 'a', 'b', 'c', '', '', 'a', 'c', 'd', 'c', 'e', 'f', ''] + +In most cases we want to use the same text as the source for both vocabulary +and ngram counts. +Now that we understand what this means for our preprocessing, we can simply import +a function that does everything for us. + + >>> from nltk.lm.preprocessing import padded_everygram_pipeline + >>> train, vocab = padded_everygram_pipeline(2, text) + +So as to avoid re-creating the text in memory, both `train` and `vocab` are lazy +iterators. They are evaluated on demand at training time. + + +Training +======== +Having prepared our data we are ready to start training a model. +As a simple example, let us train a Maximum Likelihood Estimator (MLE). +We only need to specify the highest ngram order to instantiate it. + + >>> from nltk.lm import MLE + >>> lm = MLE(2) + +This automatically creates an empty vocabulary... + + >>> len(lm.vocab) + 0 + +... which gets filled as we fit the model. + + >>> lm.fit(train, vocab) + >>> print(lm.vocab) + + >>> len(lm.vocab) + 9 + +The vocabulary helps us handle words that have not occurred during training. + + >>> lm.vocab.lookup(text[0]) + ('a', 'b', 'c') + >>> lm.vocab.lookup(["aliens", "from", "Mars"]) + ('', '', '') + +Moreover, in some cases we want to ignore words that we did see during training +but that didn't occur frequently enough, to provide us useful information. +You can tell the vocabulary to ignore such words. +To find out how that works, check out the docs for the `Vocabulary` class. + + +Using a Trained Model +===================== +When it comes to ngram models the training boils down to counting up the ngrams +from the training corpus. + + >>> print(lm.counts) + + +This provides a convenient interface to access counts for unigrams... + + >>> lm.counts['a'] + 2 + +...and bigrams (in this case "a b") + + >>> lm.counts[['a']]['b'] + 1 + +And so on. However, the real purpose of training a language model is to have it +score how probable words are in certain contexts. +This being MLE, the model returns the item's relative frequency as its score. + + >>> lm.score("a") + 0.15384615384615385 + +Items that are not seen during training are mapped to the vocabulary's +"unknown label" token. This is "" by default. + + >>> lm.score("") == lm.score("aliens") + True + +Here's how you get the score for a word given some preceding context. +For example we want to know what is the chance that "b" is preceded by "a". + + >>> lm.score("b", ["a"]) + 0.5 + +To avoid underflow when working with many small score values it makes sense to +take their logarithm. +For convenience this can be done with the `logscore` method. + + >>> lm.logscore("a") + -2.700439718141092 + +Building on this method, we can also evaluate our model's cross-entropy and +perplexity with respect to sequences of ngrams. + + >>> test = [('a', 'b'), ('c', 'd')] + >>> lm.entropy(test) + 1.292481250360578 + >>> lm.perplexity(test) + 2.449489742783178 + +It is advisable to preprocess your test text exactly the same way as you did +the training text. + +One cool feature of ngram models is that they can be used to generate text. + + >>> lm.generate(1, random_seed=3) + '' + >>> lm.generate(5, random_seed=3) + ['', 'a', 'b', 'c', 'd'] + +Provide `random_seed` if you want to consistently reproduce the same text all +other things being equal. Here we are using it to test the examples. + +You can also condition your generation on some preceding text with the `context` +argument. + + >>> lm.generate(5, text_seed=['c'], random_seed=3) + ['', 'c', 'd', 'c', 'd'] + +Note that an ngram model is restricted in how much preceding context it can +take into account. For example, a trigram model can only condition its output +on 2 preceding words. If you pass in a 4-word context, the first two words +will be ignored. +""" + +from nltk.lm.counter import NgramCounter +from nltk.lm.models import ( + MLE, + AbsoluteDiscountingInterpolated, + KneserNeyInterpolated, + Laplace, + Lidstone, + StupidBackoff, + WittenBellInterpolated, +) +from nltk.lm.vocabulary import Vocabulary + +__all__ = [ + "Vocabulary", + "NgramCounter", + "MLE", + "Lidstone", + "Laplace", + "WittenBellInterpolated", + "KneserNeyInterpolated", + "AbsoluteDiscountingInterpolated", + "StupidBackoff", +] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68072cd586994b678e162a66b4e87fce91fddaf3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/counter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/counter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96b2d8e910535f218904591867b7a6618ef3827c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/counter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/models.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e3461379c300e72da18df6775da33f1f2fb78f0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/models.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/preprocessing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/preprocessing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f62b436ca41f0573087fd80a518963dd15852cc8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/preprocessing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/smoothing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/smoothing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8959b30e71622de0bedbb47d7ed34d4f0f3a5032 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/smoothing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/api.py b/llmeval-env/lib/python3.10/site-packages/nltk/lm/api.py new file mode 100644 index 0000000000000000000000000000000000000000..470c4d4ac4c495c4cf9b7bbb3af66cb2a49a02db --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/lm/api.py @@ -0,0 +1,235 @@ +# Natural Language Toolkit: Language Models +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT +"""Language Model Interface.""" + +import random +import warnings +from abc import ABCMeta, abstractmethod +from bisect import bisect +from itertools import accumulate + +from nltk.lm.counter import NgramCounter +from nltk.lm.util import log_base2 +from nltk.lm.vocabulary import Vocabulary + + +class Smoothing(metaclass=ABCMeta): + """Ngram Smoothing Interface + + Implements Chen & Goodman 1995's idea that all smoothing algorithms have + certain features in common. This should ideally allow smoothing algorithms to + work both with Backoff and Interpolation. + """ + + def __init__(self, vocabulary, counter): + """ + :param vocabulary: The Ngram vocabulary object. + :type vocabulary: nltk.lm.vocab.Vocabulary + :param counter: The counts of the vocabulary items. + :type counter: nltk.lm.counter.NgramCounter + """ + self.vocab = vocabulary + self.counts = counter + + @abstractmethod + def unigram_score(self, word): + raise NotImplementedError() + + @abstractmethod + def alpha_gamma(self, word, context): + raise NotImplementedError() + + +def _mean(items): + """Return average (aka mean) for sequence of items.""" + return sum(items) / len(items) + + +def _random_generator(seed_or_generator): + if isinstance(seed_or_generator, random.Random): + return seed_or_generator + return random.Random(seed_or_generator) + + +def _weighted_choice(population, weights, random_generator=None): + """Like random.choice, but with weights. + + Heavily inspired by python 3.6 `random.choices`. + """ + if not population: + raise ValueError("Can't choose from empty population") + if len(population) != len(weights): + raise ValueError("The number of weights does not match the population") + cum_weights = list(accumulate(weights)) + total = cum_weights[-1] + threshold = random_generator.random() + return population[bisect(cum_weights, total * threshold)] + + +class LanguageModel(metaclass=ABCMeta): + """ABC for Language Models. + + Cannot be directly instantiated itself. + + """ + + def __init__(self, order, vocabulary=None, counter=None): + """Creates new LanguageModel. + + :param vocabulary: If provided, this vocabulary will be used instead + of creating a new one when training. + :type vocabulary: `nltk.lm.Vocabulary` or None + :param counter: If provided, use this object to count ngrams. + :type counter: `nltk.lm.NgramCounter` or None + :param ngrams_fn: If given, defines how sentences in training text are turned to ngram + sequences. + :type ngrams_fn: function or None + :param pad_fn: If given, defines how sentences in training text are padded. + :type pad_fn: function or None + """ + self.order = order + if vocabulary and not isinstance(vocabulary, Vocabulary): + warnings.warn( + f"The `vocabulary` argument passed to {self.__class__.__name__!r} " + "must be an instance of `nltk.lm.Vocabulary`.", + stacklevel=3, + ) + self.vocab = Vocabulary() if vocabulary is None else vocabulary + self.counts = NgramCounter() if counter is None else counter + + def fit(self, text, vocabulary_text=None): + """Trains the model on a text. + + :param text: Training text as a sequence of sentences. + + """ + if not self.vocab: + if vocabulary_text is None: + raise ValueError( + "Cannot fit without a vocabulary or text to create it from." + ) + self.vocab.update(vocabulary_text) + self.counts.update(self.vocab.lookup(sent) for sent in text) + + def score(self, word, context=None): + """Masks out of vocab (OOV) words and computes their model score. + + For model-specific logic of calculating scores, see the `unmasked_score` + method. + """ + return self.unmasked_score( + self.vocab.lookup(word), self.vocab.lookup(context) if context else None + ) + + @abstractmethod + def unmasked_score(self, word, context=None): + """Score a word given some optional context. + + Concrete models are expected to provide an implementation. + Note that this method does not mask its arguments with the OOV label. + Use the `score` method for that. + + :param str word: Word for which we want the score + :param tuple(str) context: Context the word is in. + If `None`, compute unigram score. + :param context: tuple(str) or None + :rtype: float + """ + raise NotImplementedError() + + def logscore(self, word, context=None): + """Evaluate the log score of this word in this context. + + The arguments are the same as for `score` and `unmasked_score`. + + """ + return log_base2(self.score(word, context)) + + def context_counts(self, context): + """Helper method for retrieving counts for a given context. + + Assumes context has been checked and oov words in it masked. + :type context: tuple(str) or None + + """ + return ( + self.counts[len(context) + 1][context] if context else self.counts.unigrams + ) + + def entropy(self, text_ngrams): + """Calculate cross-entropy of model for given evaluation text. + + :param Iterable(tuple(str)) text_ngrams: A sequence of ngram tuples. + :rtype: float + + """ + return -1 * _mean( + [self.logscore(ngram[-1], ngram[:-1]) for ngram in text_ngrams] + ) + + def perplexity(self, text_ngrams): + """Calculates the perplexity of the given text. + + This is simply 2 ** cross-entropy for the text, so the arguments are the same. + + """ + return pow(2.0, self.entropy(text_ngrams)) + + def generate(self, num_words=1, text_seed=None, random_seed=None): + """Generate words from the model. + + :param int num_words: How many words to generate. By default 1. + :param text_seed: Generation can be conditioned on preceding context. + :param random_seed: A random seed or an instance of `random.Random`. If provided, + makes the random sampling part of generation reproducible. + :return: One (str) word or a list of words generated from model. + + Examples: + + >>> from nltk.lm import MLE + >>> lm = MLE(2) + >>> lm.fit([[("a", "b"), ("b", "c")]], vocabulary_text=['a', 'b', 'c']) + >>> lm.fit([[("a",), ("b",), ("c",)]]) + >>> lm.generate(random_seed=3) + 'a' + >>> lm.generate(text_seed=['a']) + 'b' + + """ + text_seed = [] if text_seed is None else list(text_seed) + random_generator = _random_generator(random_seed) + # This is the base recursion case. + if num_words == 1: + context = ( + text_seed[-self.order + 1 :] + if len(text_seed) >= self.order + else text_seed + ) + samples = self.context_counts(self.vocab.lookup(context)) + while context and not samples: + context = context[1:] if len(context) > 1 else [] + samples = self.context_counts(self.vocab.lookup(context)) + # Sorting samples achieves two things: + # - reproducible randomness when sampling + # - turns Mapping into Sequence which `_weighted_choice` expects + samples = sorted(samples) + return _weighted_choice( + samples, + tuple(self.score(w, context) for w in samples), + random_generator, + ) + # We build up text one word at a time using the preceding context. + generated = [] + for _ in range(num_words): + generated.append( + self.generate( + num_words=1, + text_seed=text_seed + generated, + random_seed=random_generator, + ) + ) + return generated diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/counter.py b/llmeval-env/lib/python3.10/site-packages/nltk/lm/counter.py new file mode 100644 index 0000000000000000000000000000000000000000..6a5ab9c6096d9015b14f4e9f3814dda75417391a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/lm/counter.py @@ -0,0 +1,163 @@ +# Natural Language Toolkit +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT +""" +Language Model Counter +---------------------- +""" + +from collections import defaultdict +from collections.abc import Sequence + +from nltk.probability import ConditionalFreqDist, FreqDist + + +class NgramCounter: + """Class for counting ngrams. + + Will count any ngram sequence you give it ;) + + First we need to make sure we are feeding the counter sentences of ngrams. + + >>> text = [["a", "b", "c", "d"], ["a", "c", "d", "c"]] + >>> from nltk.util import ngrams + >>> text_bigrams = [ngrams(sent, 2) for sent in text] + >>> text_unigrams = [ngrams(sent, 1) for sent in text] + + The counting itself is very simple. + + >>> from nltk.lm import NgramCounter + >>> ngram_counts = NgramCounter(text_bigrams + text_unigrams) + + You can conveniently access ngram counts using standard python dictionary notation. + String keys will give you unigram counts. + + >>> ngram_counts['a'] + 2 + >>> ngram_counts['aliens'] + 0 + + If you want to access counts for higher order ngrams, use a list or a tuple. + These are treated as "context" keys, so what you get is a frequency distribution + over all continuations after the given context. + + >>> sorted(ngram_counts[['a']].items()) + [('b', 1), ('c', 1)] + >>> sorted(ngram_counts[('a',)].items()) + [('b', 1), ('c', 1)] + + This is equivalent to specifying explicitly the order of the ngram (in this case + 2 for bigram) and indexing on the context. + + >>> ngram_counts[2][('a',)] is ngram_counts[['a']] + True + + Note that the keys in `ConditionalFreqDist` cannot be lists, only tuples! + It is generally advisable to use the less verbose and more flexible square + bracket notation. + + To get the count of the full ngram "a b", do this: + + >>> ngram_counts[['a']]['b'] + 1 + + Specifying the ngram order as a number can be useful for accessing all ngrams + in that order. + + >>> ngram_counts[2] + + + The keys of this `ConditionalFreqDist` are the contexts we discussed earlier. + Unigrams can also be accessed with a human-friendly alias. + + >>> ngram_counts.unigrams is ngram_counts[1] + True + + Similarly to `collections.Counter`, you can update counts after initialization. + + >>> ngram_counts['e'] + 0 + >>> ngram_counts.update([ngrams(["d", "e", "f"], 1)]) + >>> ngram_counts['e'] + 1 + + """ + + def __init__(self, ngram_text=None): + """Creates a new NgramCounter. + + If `ngram_text` is specified, counts ngrams from it, otherwise waits for + `update` method to be called explicitly. + + :param ngram_text: Optional text containing sentences of ngrams, as for `update` method. + :type ngram_text: Iterable(Iterable(tuple(str))) or None + + """ + self._counts = defaultdict(ConditionalFreqDist) + self._counts[1] = self.unigrams = FreqDist() + + if ngram_text: + self.update(ngram_text) + + def update(self, ngram_text): + """Updates ngram counts from `ngram_text`. + + Expects `ngram_text` to be a sequence of sentences (sequences). + Each sentence consists of ngrams as tuples of strings. + + :param Iterable(Iterable(tuple(str))) ngram_text: Text containing sentences of ngrams. + :raises TypeError: if the ngrams are not tuples. + + """ + + for sent in ngram_text: + for ngram in sent: + if not isinstance(ngram, tuple): + raise TypeError( + "Ngram <{}> isn't a tuple, " "but {}".format(ngram, type(ngram)) + ) + + ngram_order = len(ngram) + if ngram_order == 1: + self.unigrams[ngram[0]] += 1 + continue + + context, word = ngram[:-1], ngram[-1] + self[ngram_order][context][word] += 1 + + def N(self): + """Returns grand total number of ngrams stored. + + This includes ngrams from all orders, so some duplication is expected. + :rtype: int + + >>> from nltk.lm import NgramCounter + >>> counts = NgramCounter([[("a", "b"), ("c",), ("d", "e")]]) + >>> counts.N() + 3 + + """ + return sum(val.N() for val in self._counts.values()) + + def __getitem__(self, item): + """User-friendly access to ngram counts.""" + if isinstance(item, int): + return self._counts[item] + elif isinstance(item, str): + return self._counts.__getitem__(1)[item] + elif isinstance(item, Sequence): + return self._counts.__getitem__(len(item) + 1)[tuple(item)] + + def __str__(self): + return "<{} with {} ngram orders and {} ngrams>".format( + self.__class__.__name__, len(self._counts), self.N() + ) + + def __len__(self): + return self._counts.__len__() + + def __contains__(self, item): + return item in self._counts diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/models.py b/llmeval-env/lib/python3.10/site-packages/nltk/lm/models.py new file mode 100644 index 0000000000000000000000000000000000000000..ee5094901a14802b835c934e0054762666cd467f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/lm/models.py @@ -0,0 +1,141 @@ +# Natural Language Toolkit: Language Models +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# Manu Joseph +# URL: +# For license information, see LICENSE.TXT +"""Language Models""" + +from nltk.lm.api import LanguageModel, Smoothing +from nltk.lm.smoothing import AbsoluteDiscounting, KneserNey, WittenBell + + +class MLE(LanguageModel): + """Class for providing MLE ngram model scores. + + Inherits initialization from BaseNgramModel. + """ + + def unmasked_score(self, word, context=None): + """Returns the MLE score for a word given a context. + + Args: + - word is expected to be a string + - context is expected to be something reasonably convertible to a tuple + """ + return self.context_counts(context).freq(word) + + +class Lidstone(LanguageModel): + """Provides Lidstone-smoothed scores. + + In addition to initialization arguments from BaseNgramModel also requires + a number by which to increase the counts, gamma. + """ + + def __init__(self, gamma, *args, **kwargs): + super().__init__(*args, **kwargs) + self.gamma = gamma + + def unmasked_score(self, word, context=None): + """Add-one smoothing: Lidstone or Laplace. + + To see what kind, look at `gamma` attribute on the class. + + """ + counts = self.context_counts(context) + word_count = counts[word] + norm_count = counts.N() + return (word_count + self.gamma) / (norm_count + len(self.vocab) * self.gamma) + + +class Laplace(Lidstone): + """Implements Laplace (add one) smoothing. + + Initialization identical to BaseNgramModel because gamma is always 1. + """ + + def __init__(self, *args, **kwargs): + super().__init__(1, *args, **kwargs) + + +class StupidBackoff(LanguageModel): + """Provides StupidBackoff scores. + + In addition to initialization arguments from BaseNgramModel also requires + a parameter alpha with which we scale the lower order probabilities. + Note that this is not a true probability distribution as scores for ngrams + of the same order do not sum up to unity. + """ + + def __init__(self, alpha=0.4, *args, **kwargs): + super().__init__(*args, **kwargs) + self.alpha = alpha + + def unmasked_score(self, word, context=None): + if not context: + # Base recursion + return self.counts.unigrams.freq(word) + counts = self.context_counts(context) + word_count = counts[word] + norm_count = counts.N() + if word_count > 0: + return word_count / norm_count + else: + return self.alpha * self.unmasked_score(word, context[1:]) + + +class InterpolatedLanguageModel(LanguageModel): + """Logic common to all interpolated language models. + + The idea to abstract this comes from Chen & Goodman 1995. + Do not instantiate this class directly! + """ + + def __init__(self, smoothing_cls, order, **kwargs): + params = kwargs.pop("params", {}) + super().__init__(order, **kwargs) + self.estimator = smoothing_cls(self.vocab, self.counts, **params) + + def unmasked_score(self, word, context=None): + if not context: + # The base recursion case: no context, we only have a unigram. + return self.estimator.unigram_score(word) + if not self.counts[context]: + # It can also happen that we have no data for this context. + # In that case we defer to the lower-order ngram. + # This is the same as setting alpha to 0 and gamma to 1. + alpha, gamma = 0, 1 + else: + alpha, gamma = self.estimator.alpha_gamma(word, context) + return alpha + gamma * self.unmasked_score(word, context[1:]) + + +class WittenBellInterpolated(InterpolatedLanguageModel): + """Interpolated version of Witten-Bell smoothing.""" + + def __init__(self, order, **kwargs): + super().__init__(WittenBell, order, **kwargs) + + +class AbsoluteDiscountingInterpolated(InterpolatedLanguageModel): + """Interpolated version of smoothing with absolute discount.""" + + def __init__(self, order, discount=0.75, **kwargs): + super().__init__( + AbsoluteDiscounting, order, params={"discount": discount}, **kwargs + ) + + +class KneserNeyInterpolated(InterpolatedLanguageModel): + """Interpolated version of Kneser-Ney smoothing.""" + + def __init__(self, order, discount=0.1, **kwargs): + if not (0 <= discount <= 1): + raise ValueError( + "Discount must be between 0 and 1 for probabilities to sum to unity." + ) + super().__init__( + KneserNey, order, params={"discount": discount, "order": order}, **kwargs + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/preprocessing.py b/llmeval-env/lib/python3.10/site-packages/nltk/lm/preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..9ba6d5bd2cfb59d479b203ebf99878024b2a0f76 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/lm/preprocessing.py @@ -0,0 +1,51 @@ +# Natural Language Toolkit: Language Model Unit Tests +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT +from functools import partial +from itertools import chain + +from nltk.util import everygrams, pad_sequence + +flatten = chain.from_iterable +pad_both_ends = partial( + pad_sequence, + pad_left=True, + left_pad_symbol="", + pad_right=True, + right_pad_symbol="", +) +pad_both_ends.__doc__ = """Pads both ends of a sentence to length specified by ngram order. + + Following convention pads the start of sentence pads its end. + """ + + +def padded_everygrams(order, sentence): + """Helper with some useful defaults. + + Applies pad_both_ends to sentence and follows it up with everygrams. + """ + return everygrams(list(pad_both_ends(sentence, n=order)), max_len=order) + + +def padded_everygram_pipeline(order, text): + """Default preprocessing for a sequence of sentences. + + Creates two iterators: + + - sentences padded and turned into sequences of `nltk.util.everygrams` + - sentences padded as above and chained together for a flat stream of words + + :param order: Largest ngram length produced by `everygrams`. + :param text: Text to iterate over. Expected to be an iterable of sentences. + :type text: Iterable[Iterable[str]] + :return: iterator over text as ngrams, iterator over text as vocabulary data + """ + padding_fn = partial(pad_both_ends, n=order) + return ( + (everygrams(list(padding_fn(sent)), max_len=order) for sent in text), + flatten(map(padding_fn, text)), + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/smoothing.py b/llmeval-env/lib/python3.10/site-packages/nltk/lm/smoothing.py new file mode 100644 index 0000000000000000000000000000000000000000..6761f1ead23f7ab5b410d9a5795b8b4fce189d2b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/lm/smoothing.py @@ -0,0 +1,127 @@ +# Natural Language Toolkit: Language Model Unit Tests +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# Manu Joseph +# URL: +# For license information, see LICENSE.TXT +"""Smoothing algorithms for language modeling. + +According to Chen & Goodman 1995 these should work with both Backoff and +Interpolation. +""" +from operator import methodcaller + +from nltk.lm.api import Smoothing +from nltk.probability import ConditionalFreqDist + + +def _count_values_gt_zero(distribution): + """Count values that are greater than zero in a distribution. + + Assumes distribution is either a mapping with counts as values or + an instance of `nltk.ConditionalFreqDist`. + """ + as_count = ( + methodcaller("N") + if isinstance(distribution, ConditionalFreqDist) + else lambda count: count + ) + # We explicitly check that values are > 0 to guard against negative counts. + return sum( + 1 for dist_or_count in distribution.values() if as_count(dist_or_count) > 0 + ) + + +class WittenBell(Smoothing): + """Witten-Bell smoothing.""" + + def __init__(self, vocabulary, counter, **kwargs): + super().__init__(vocabulary, counter, **kwargs) + + def alpha_gamma(self, word, context): + alpha = self.counts[context].freq(word) + gamma = self._gamma(context) + return (1.0 - gamma) * alpha, gamma + + def _gamma(self, context): + n_plus = _count_values_gt_zero(self.counts[context]) + return n_plus / (n_plus + self.counts[context].N()) + + def unigram_score(self, word): + return self.counts.unigrams.freq(word) + + +class AbsoluteDiscounting(Smoothing): + """Smoothing with absolute discount.""" + + def __init__(self, vocabulary, counter, discount=0.75, **kwargs): + super().__init__(vocabulary, counter, **kwargs) + self.discount = discount + + def alpha_gamma(self, word, context): + alpha = ( + max(self.counts[context][word] - self.discount, 0) + / self.counts[context].N() + ) + gamma = self._gamma(context) + return alpha, gamma + + def _gamma(self, context): + n_plus = _count_values_gt_zero(self.counts[context]) + return (self.discount * n_plus) / self.counts[context].N() + + def unigram_score(self, word): + return self.counts.unigrams.freq(word) + + +class KneserNey(Smoothing): + """Kneser-Ney Smoothing. + + This is an extension of smoothing with a discount. + + Resources: + - https://pages.ucsd.edu/~rlevy/lign256/winter2008/kneser_ney_mini_example.pdf + - https://www.youtube.com/watch?v=ody1ysUTD7o + - https://medium.com/@dennyc/a-simple-numerical-example-for-kneser-ney-smoothing-nlp-4600addf38b8 + - https://www.cl.uni-heidelberg.de/courses/ss15/smt/scribe6.pdf + - https://www-i6.informatik.rwth-aachen.de/publications/download/951/Kneser-ICASSP-1995.pdf + """ + + def __init__(self, vocabulary, counter, order, discount=0.1, **kwargs): + super().__init__(vocabulary, counter, **kwargs) + self.discount = discount + self._order = order + + def unigram_score(self, word): + word_continuation_count, total_count = self._continuation_counts(word) + return word_continuation_count / total_count + + def alpha_gamma(self, word, context): + prefix_counts = self.counts[context] + word_continuation_count, total_count = ( + (prefix_counts[word], prefix_counts.N()) + if len(context) + 1 == self._order + else self._continuation_counts(word, context) + ) + alpha = max(word_continuation_count - self.discount, 0.0) / total_count + gamma = self.discount * _count_values_gt_zero(prefix_counts) / total_count + return alpha, gamma + + def _continuation_counts(self, word, context=tuple()): + """Count continuations that end with context and word. + + Continuations track unique ngram "types", regardless of how many + instances were observed for each "type". + This is different than raw ngram counts which track number of instances. + """ + higher_order_ngrams_with_context = ( + counts + for prefix_ngram, counts in self.counts[len(context) + 2].items() + if prefix_ngram[1:] == context + ) + higher_order_ngrams_with_word_count, total = 0, 0 + for counts in higher_order_ngrams_with_context: + higher_order_ngrams_with_word_count += int(counts[word] > 0) + total += _count_values_gt_zero(counts) + return higher_order_ngrams_with_word_count, total diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/util.py b/llmeval-env/lib/python3.10/site-packages/nltk/lm/util.py new file mode 100644 index 0000000000000000000000000000000000000000..483e64c26abd85ad9bb6caf74d3bb38fd9ae7d66 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/lm/util.py @@ -0,0 +1,19 @@ +# Natural Language Toolkit +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT +"""Language Model Utilities""" + +from math import log + +NEG_INF = float("-inf") +POS_INF = float("inf") + + +def log_base2(score): + """Convenience function for computing logarithms with base 2.""" + if score == 0.0: + return NEG_INF + return log(score, 2) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/vocabulary.py b/llmeval-env/lib/python3.10/site-packages/nltk/lm/vocabulary.py new file mode 100644 index 0000000000000000000000000000000000000000..74964b262f78fe305cd9e5445d833683ed172978 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/lm/vocabulary.py @@ -0,0 +1,218 @@ +# Natural Language Toolkit +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT +"""Language Model Vocabulary""" + +import sys +from collections import Counter +from collections.abc import Iterable +from functools import singledispatch +from itertools import chain + + +@singledispatch +def _dispatched_lookup(words, vocab): + raise TypeError(f"Unsupported type for looking up in vocabulary: {type(words)}") + + +@_dispatched_lookup.register(Iterable) +def _(words, vocab): + """Look up a sequence of words in the vocabulary. + + Returns an iterator over looked up words. + + """ + return tuple(_dispatched_lookup(w, vocab) for w in words) + + +@_dispatched_lookup.register(str) +def _string_lookup(word, vocab): + """Looks up one word in the vocabulary.""" + return word if word in vocab else vocab.unk_label + + +class Vocabulary: + """Stores language model vocabulary. + + Satisfies two common language modeling requirements for a vocabulary: + + - When checking membership and calculating its size, filters items + by comparing their counts to a cutoff value. + - Adds a special "unknown" token which unseen words are mapped to. + + >>> words = ['a', 'c', '-', 'd', 'c', 'a', 'b', 'r', 'a', 'c', 'd'] + >>> from nltk.lm import Vocabulary + >>> vocab = Vocabulary(words, unk_cutoff=2) + + Tokens with counts greater than or equal to the cutoff value will + be considered part of the vocabulary. + + >>> vocab['c'] + 3 + >>> 'c' in vocab + True + >>> vocab['d'] + 2 + >>> 'd' in vocab + True + + Tokens with frequency counts less than the cutoff value will be considered not + part of the vocabulary even though their entries in the count dictionary are + preserved. + + >>> vocab['b'] + 1 + >>> 'b' in vocab + False + >>> vocab['aliens'] + 0 + >>> 'aliens' in vocab + False + + Keeping the count entries for seen words allows us to change the cutoff value + without having to recalculate the counts. + + >>> vocab2 = Vocabulary(vocab.counts, unk_cutoff=1) + >>> "b" in vocab2 + True + + The cutoff value influences not only membership checking but also the result of + getting the size of the vocabulary using the built-in `len`. + Note that while the number of keys in the vocabulary's counter stays the same, + the items in the vocabulary differ depending on the cutoff. + We use `sorted` to demonstrate because it keeps the order consistent. + + >>> sorted(vocab2.counts) + ['-', 'a', 'b', 'c', 'd', 'r'] + >>> sorted(vocab2) + ['-', '', 'a', 'b', 'c', 'd', 'r'] + >>> sorted(vocab.counts) + ['-', 'a', 'b', 'c', 'd', 'r'] + >>> sorted(vocab) + ['', 'a', 'c', 'd'] + + In addition to items it gets populated with, the vocabulary stores a special + token that stands in for so-called "unknown" items. By default it's "". + + >>> "" in vocab + True + + We can look up words in a vocabulary using its `lookup` method. + "Unseen" words (with counts less than cutoff) are looked up as the unknown label. + If given one word (a string) as an input, this method will return a string. + + >>> vocab.lookup("a") + 'a' + >>> vocab.lookup("aliens") + '' + + If given a sequence, it will return an tuple of the looked up words. + + >>> vocab.lookup(["p", 'a', 'r', 'd', 'b', 'c']) + ('', 'a', '', 'd', '', 'c') + + It's possible to update the counts after the vocabulary has been created. + In general, the interface is the same as that of `collections.Counter`. + + >>> vocab['b'] + 1 + >>> vocab.update(["b", "b", "c"]) + >>> vocab['b'] + 3 + """ + + def __init__(self, counts=None, unk_cutoff=1, unk_label=""): + """Create a new Vocabulary. + + :param counts: Optional iterable or `collections.Counter` instance to + pre-seed the Vocabulary. In case it is iterable, counts + are calculated. + :param int unk_cutoff: Words that occur less frequently than this value + are not considered part of the vocabulary. + :param unk_label: Label for marking words not part of vocabulary. + + """ + self.unk_label = unk_label + if unk_cutoff < 1: + raise ValueError(f"Cutoff value cannot be less than 1. Got: {unk_cutoff}") + self._cutoff = unk_cutoff + + self.counts = Counter() + self.update(counts if counts is not None else "") + + @property + def cutoff(self): + """Cutoff value. + + Items with count below this value are not considered part of vocabulary. + + """ + return self._cutoff + + def update(self, *counter_args, **counter_kwargs): + """Update vocabulary counts. + + Wraps `collections.Counter.update` method. + + """ + self.counts.update(*counter_args, **counter_kwargs) + self._len = sum(1 for _ in self) + + def lookup(self, words): + """Look up one or more words in the vocabulary. + + If passed one word as a string will return that word or `self.unk_label`. + Otherwise will assume it was passed a sequence of words, will try to look + each of them up and return an iterator over the looked up words. + + :param words: Word(s) to look up. + :type words: Iterable(str) or str + :rtype: generator(str) or str + :raises: TypeError for types other than strings or iterables + + >>> from nltk.lm import Vocabulary + >>> vocab = Vocabulary(["a", "b", "c", "a", "b"], unk_cutoff=2) + >>> vocab.lookup("a") + 'a' + >>> vocab.lookup("aliens") + '' + >>> vocab.lookup(["a", "b", "c", ["x", "b"]]) + ('a', 'b', '', ('', 'b')) + + """ + return _dispatched_lookup(words, self) + + def __getitem__(self, item): + return self._cutoff if item == self.unk_label else self.counts[item] + + def __contains__(self, item): + """Only consider items with counts GE to cutoff as being in the + vocabulary.""" + return self[item] >= self.cutoff + + def __iter__(self): + """Building on membership check define how to iterate over + vocabulary.""" + return chain( + (item for item in self.counts if item in self), + [self.unk_label] if self.counts else [], + ) + + def __len__(self): + """Computing size of vocabulary reflects the cutoff.""" + return self._len + + def __eq__(self, other): + return ( + self.unk_label == other.unk_label + and self.cutoff == other.cutoff + and self.counts == other.counts + ) + + def __str__(self): + return "<{} with cutoff={} unk_label='{}' and {} items>".format( + self.__class__.__name__, self.cutoff, self.unk_label, len(self) + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..558d72fc9e8c7ac80a41ec49869ed53dc94e1a97 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__pycache__/sentiment_analyzer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__pycache__/sentiment_analyzer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3cd5c78cda685ba7ec218a9aabdd0571fc1aa3f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__pycache__/sentiment_analyzer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c749b1711c71659a84d16ef5f633f2b3019b56c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__pycache__/vader.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__pycache__/vader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..244879f380995051cd86e51875c51d08b5a4957a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__pycache__/vader.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/sentiment_analyzer.py b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/sentiment_analyzer.py new file mode 100644 index 0000000000000000000000000000000000000000..6654de34d8fbe801bcdde6ef37e1951e207f5ff9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/sentiment_analyzer.py @@ -0,0 +1,255 @@ +# +# Natural Language Toolkit: Sentiment Analyzer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +A SentimentAnalyzer is a tool to implement and facilitate Sentiment Analysis tasks +using NLTK features and classifiers, especially for teaching and demonstrative +purposes. +""" + +import sys +from collections import defaultdict + +from nltk.classify.util import accuracy as eval_accuracy +from nltk.classify.util import apply_features +from nltk.collocations import BigramCollocationFinder +from nltk.metrics import BigramAssocMeasures +from nltk.metrics import f_measure as eval_f_measure +from nltk.metrics import precision as eval_precision +from nltk.metrics import recall as eval_recall +from nltk.probability import FreqDist + + +class SentimentAnalyzer: + """ + A Sentiment Analysis tool based on machine learning approaches. + """ + + def __init__(self, classifier=None): + self.feat_extractors = defaultdict(list) + self.classifier = classifier + + def all_words(self, documents, labeled=None): + """ + Return all words/tokens from the documents (with duplicates). + + :param documents: a list of (words, label) tuples. + :param labeled: if `True`, assume that each document is represented by a + (words, label) tuple: (list(str), str). If `False`, each document is + considered as being a simple list of strings: list(str). + :rtype: list(str) + :return: A list of all words/tokens in `documents`. + """ + all_words = [] + if labeled is None: + labeled = documents and isinstance(documents[0], tuple) + if labeled: + for words, _sentiment in documents: + all_words.extend(words) + elif not labeled: + for words in documents: + all_words.extend(words) + return all_words + + def apply_features(self, documents, labeled=None): + """ + Apply all feature extractor functions to the documents. This is a wrapper + around `nltk.classify.util.apply_features`. + + If `labeled=False`, return featuresets as: + [feature_func(doc) for doc in documents] + If `labeled=True`, return featuresets as: + [(feature_func(tok), label) for (tok, label) in toks] + + :param documents: a list of documents. `If labeled=True`, the method expects + a list of (words, label) tuples. + :rtype: LazyMap + """ + return apply_features(self.extract_features, documents, labeled) + + def unigram_word_feats(self, words, top_n=None, min_freq=0): + """ + Return most common top_n word features. + + :param words: a list of words/tokens. + :param top_n: number of best words/tokens to use, sorted by frequency. + :rtype: list(str) + :return: A list of `top_n` words/tokens (with no duplicates) sorted by + frequency. + """ + # Stopwords are not removed + unigram_feats_freqs = FreqDist(word for word in words) + return [ + w + for w, f in unigram_feats_freqs.most_common(top_n) + if unigram_feats_freqs[w] > min_freq + ] + + def bigram_collocation_feats( + self, documents, top_n=None, min_freq=3, assoc_measure=BigramAssocMeasures.pmi + ): + """ + Return `top_n` bigram features (using `assoc_measure`). + Note that this method is based on bigram collocations measures, and not + on simple bigram frequency. + + :param documents: a list (or iterable) of tokens. + :param top_n: number of best words/tokens to use, sorted by association + measure. + :param assoc_measure: bigram association measure to use as score function. + :param min_freq: the minimum number of occurrencies of bigrams to take + into consideration. + + :return: `top_n` ngrams scored by the given association measure. + """ + finder = BigramCollocationFinder.from_documents(documents) + finder.apply_freq_filter(min_freq) + return finder.nbest(assoc_measure, top_n) + + def classify(self, instance): + """ + Classify a single instance applying the features that have already been + stored in the SentimentAnalyzer. + + :param instance: a list (or iterable) of tokens. + :return: the classification result given by applying the classifier. + """ + instance_feats = self.apply_features([instance], labeled=False) + return self.classifier.classify(instance_feats[0]) + + def add_feat_extractor(self, function, **kwargs): + """ + Add a new function to extract features from a document. This function will + be used in extract_features(). + Important: in this step our kwargs are only representing additional parameters, + and NOT the document we have to parse. The document will always be the first + parameter in the parameter list, and it will be added in the extract_features() + function. + + :param function: the extractor function to add to the list of feature extractors. + :param kwargs: additional parameters required by the `function` function. + """ + self.feat_extractors[function].append(kwargs) + + def extract_features(self, document): + """ + Apply extractor functions (and their parameters) to the present document. + We pass `document` as the first parameter of the extractor functions. + If we want to use the same extractor function multiple times, we have to + add it to the extractors with `add_feat_extractor` using multiple sets of + parameters (one for each call of the extractor function). + + :param document: the document that will be passed as argument to the + feature extractor functions. + :return: A dictionary of populated features extracted from the document. + :rtype: dict + """ + all_features = {} + for extractor in self.feat_extractors: + for param_set in self.feat_extractors[extractor]: + feats = extractor(document, **param_set) + all_features.update(feats) + return all_features + + def train(self, trainer, training_set, save_classifier=None, **kwargs): + """ + Train classifier on the training set, optionally saving the output in the + file specified by `save_classifier`. + Additional arguments depend on the specific trainer used. For example, + a MaxentClassifier can use `max_iter` parameter to specify the number + of iterations, while a NaiveBayesClassifier cannot. + + :param trainer: `train` method of a classifier. + E.g.: NaiveBayesClassifier.train + :param training_set: the training set to be passed as argument to the + classifier `train` method. + :param save_classifier: the filename of the file where the classifier + will be stored (optional). + :param kwargs: additional parameters that will be passed as arguments to + the classifier `train` function. + :return: A classifier instance trained on the training set. + :rtype: + """ + print("Training classifier") + self.classifier = trainer(training_set, **kwargs) + if save_classifier: + self.save_file(self.classifier, save_classifier) + + return self.classifier + + def save_file(self, content, filename): + """ + Store `content` in `filename`. Can be used to store a SentimentAnalyzer. + """ + print("Saving", filename, file=sys.stderr) + with open(filename, "wb") as storage_file: + import pickle + + # The protocol=2 parameter is for python2 compatibility + pickle.dump(content, storage_file, protocol=2) + + def evaluate( + self, + test_set, + classifier=None, + accuracy=True, + f_measure=True, + precision=True, + recall=True, + verbose=False, + ): + """ + Evaluate and print classifier performance on the test set. + + :param test_set: A list of (tokens, label) tuples to use as gold set. + :param classifier: a classifier instance (previously trained). + :param accuracy: if `True`, evaluate classifier accuracy. + :param f_measure: if `True`, evaluate classifier f_measure. + :param precision: if `True`, evaluate classifier precision. + :param recall: if `True`, evaluate classifier recall. + :return: evaluation results. + :rtype: dict(str): float + """ + if classifier is None: + classifier = self.classifier + print(f"Evaluating {type(classifier).__name__} results...") + metrics_results = {} + if accuracy: + accuracy_score = eval_accuracy(classifier, test_set) + metrics_results["Accuracy"] = accuracy_score + + gold_results = defaultdict(set) + test_results = defaultdict(set) + labels = set() + for i, (feats, label) in enumerate(test_set): + labels.add(label) + gold_results[label].add(i) + observed = classifier.classify(feats) + test_results[observed].add(i) + + for label in labels: + if precision: + precision_score = eval_precision( + gold_results[label], test_results[label] + ) + metrics_results[f"Precision [{label}]"] = precision_score + if recall: + recall_score = eval_recall(gold_results[label], test_results[label]) + metrics_results[f"Recall [{label}]"] = recall_score + if f_measure: + f_measure_score = eval_f_measure( + gold_results[label], test_results[label] + ) + metrics_results[f"F-measure [{label}]"] = f_measure_score + + # Print evaluation results (in alphabetical order) + if verbose: + for result in sorted(metrics_results): + print(f"{result}: {metrics_results[result]}") + + return metrics_results diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/util.py b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/util.py new file mode 100644 index 0000000000000000000000000000000000000000..0a698981e1d2be99e97e5e474f016781921a2595 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/util.py @@ -0,0 +1,887 @@ +# +# Natural Language Toolkit: Sentiment Analyzer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +Utility methods for Sentiment Analysis. +""" + +import codecs +import csv +import json +import pickle +import random +import re +import sys +import time +from copy import deepcopy + +import nltk +from nltk.corpus import CategorizedPlaintextCorpusReader +from nltk.data import load +from nltk.tokenize.casual import EMOTICON_RE + +# //////////////////////////////////////////////////////////// +# { Regular expressions +# //////////////////////////////////////////////////////////// + +# Regular expression for negation by Christopher Potts +NEGATION = r""" + (?: + ^(?:never|no|nothing|nowhere|noone|none|not| + havent|hasnt|hadnt|cant|couldnt|shouldnt| + wont|wouldnt|dont|doesnt|didnt|isnt|arent|aint + )$ + ) + | + n't""" + +NEGATION_RE = re.compile(NEGATION, re.VERBOSE) + +CLAUSE_PUNCT = r"^[.:;!?]$" +CLAUSE_PUNCT_RE = re.compile(CLAUSE_PUNCT) + +# Happy and sad emoticons + +HAPPY = { + ":-)", + ":)", + ";)", + ":o)", + ":]", + ":3", + ":c)", + ":>", + "=]", + "8)", + "=)", + ":}", + ":^)", + ":-D", + ":D", + "8-D", + "8D", + "x-D", + "xD", + "X-D", + "XD", + "=-D", + "=D", + "=-3", + "=3", + ":-))", + ":'-)", + ":')", + ":*", + ":^*", + ">:P", + ":-P", + ":P", + "X-P", + "x-p", + "xp", + "XP", + ":-p", + ":p", + "=p", + ":-b", + ":b", + ">:)", + ">;)", + ">:-)", + "<3", +} + +SAD = { + ":L", + ":-/", + ">:/", + ":S", + ">:[", + ":@", + ":-(", + ":[", + ":-||", + "=L", + ":<", + ":-[", + ":-<", + "=\\", + "=/", + ">:(", + ":(", + ">.<", + ":'-(", + ":'(", + ":\\", + ":-c", + ":c", + ":{", + ">:\\", + ";(", +} + + +def timer(method): + """ + A timer decorator to measure execution performance of methods. + """ + + def timed(*args, **kw): + start = time.time() + result = method(*args, **kw) + end = time.time() + tot_time = end - start + hours = tot_time // 3600 + mins = tot_time // 60 % 60 + # in Python 2.x round() will return a float, so we convert it to int + secs = int(round(tot_time % 60)) + if hours == 0 and mins == 0 and secs < 10: + print(f"[TIMER] {method.__name__}(): {method.__name__:.3f} seconds") + else: + print(f"[TIMER] {method.__name__}(): {hours}h {mins}m {secs}s") + return result + + return timed + + +# //////////////////////////////////////////////////////////// +# { Feature extractor functions +# //////////////////////////////////////////////////////////// +""" +Feature extractor functions are declared outside the SentimentAnalyzer class. +Users should have the possibility to create their own feature extractors +without modifying SentimentAnalyzer. +""" + + +def extract_unigram_feats(document, unigrams, handle_negation=False): + """ + Populate a dictionary of unigram features, reflecting the presence/absence in + the document of each of the tokens in `unigrams`. + + :param document: a list of words/tokens. + :param unigrams: a list of words/tokens whose presence/absence has to be + checked in `document`. + :param handle_negation: if `handle_negation == True` apply `mark_negation` + method to `document` before checking for unigram presence/absence. + :return: a dictionary of unigram features {unigram : boolean}. + + >>> words = ['ice', 'police', 'riot'] + >>> document = 'ice is melting due to global warming'.split() + >>> sorted(extract_unigram_feats(document, words).items()) + [('contains(ice)', True), ('contains(police)', False), ('contains(riot)', False)] + """ + features = {} + if handle_negation: + document = mark_negation(document) + for word in unigrams: + features[f"contains({word})"] = word in set(document) + return features + + +def extract_bigram_feats(document, bigrams): + """ + Populate a dictionary of bigram features, reflecting the presence/absence in + the document of each of the tokens in `bigrams`. This extractor function only + considers contiguous bigrams obtained by `nltk.bigrams`. + + :param document: a list of words/tokens. + :param unigrams: a list of bigrams whose presence/absence has to be + checked in `document`. + :return: a dictionary of bigram features {bigram : boolean}. + + >>> bigrams = [('global', 'warming'), ('police', 'prevented'), ('love', 'you')] + >>> document = 'ice is melting due to global warming'.split() + >>> sorted(extract_bigram_feats(document, bigrams).items()) # doctest: +NORMALIZE_WHITESPACE + [('contains(global - warming)', True), ('contains(love - you)', False), + ('contains(police - prevented)', False)] + """ + features = {} + for bigr in bigrams: + features[f"contains({bigr[0]} - {bigr[1]})"] = bigr in nltk.bigrams(document) + return features + + +# //////////////////////////////////////////////////////////// +# { Helper Functions +# //////////////////////////////////////////////////////////// + + +def mark_negation(document, double_neg_flip=False, shallow=False): + """ + Append _NEG suffix to words that appear in the scope between a negation + and a punctuation mark. + + :param document: a list of words/tokens, or a tuple (words, label). + :param shallow: if True, the method will modify the original document in place. + :param double_neg_flip: if True, double negation is considered affirmation + (we activate/deactivate negation scope every time we find a negation). + :return: if `shallow == True` the method will modify the original document + and return it. If `shallow == False` the method will return a modified + document, leaving the original unmodified. + + >>> sent = "I didn't like this movie . It was bad .".split() + >>> mark_negation(sent) + ['I', "didn't", 'like_NEG', 'this_NEG', 'movie_NEG', '.', 'It', 'was', 'bad', '.'] + """ + if not shallow: + document = deepcopy(document) + # check if the document is labeled. If so, do not consider the label. + labeled = document and isinstance(document[0], (tuple, list)) + if labeled: + doc = document[0] + else: + doc = document + neg_scope = False + for i, word in enumerate(doc): + if NEGATION_RE.search(word): + if not neg_scope or (neg_scope and double_neg_flip): + neg_scope = not neg_scope + continue + else: + doc[i] += "_NEG" + elif neg_scope and CLAUSE_PUNCT_RE.search(word): + neg_scope = not neg_scope + elif neg_scope and not CLAUSE_PUNCT_RE.search(word): + doc[i] += "_NEG" + + return document + + +def output_markdown(filename, **kwargs): + """ + Write the output of an analysis to a file. + """ + with codecs.open(filename, "at") as outfile: + text = "\n*** \n\n" + text += "{} \n\n".format(time.strftime("%d/%m/%Y, %H:%M")) + for k in sorted(kwargs): + if isinstance(kwargs[k], dict): + dictionary = kwargs[k] + text += f" - **{k}:**\n" + for entry in sorted(dictionary): + text += f" - {entry}: {dictionary[entry]} \n" + elif isinstance(kwargs[k], list): + text += f" - **{k}:**\n" + for entry in kwargs[k]: + text += f" - {entry}\n" + else: + text += f" - **{k}:** {kwargs[k]} \n" + outfile.write(text) + + +def split_train_test(all_instances, n=None): + """ + Randomly split `n` instances of the dataset into train and test sets. + + :param all_instances: a list of instances (e.g. documents) that will be split. + :param n: the number of instances to consider (in case we want to use only a + subset). + :return: two lists of instances. Train set is 8/10 of the total and test set + is 2/10 of the total. + """ + random.seed(12345) + random.shuffle(all_instances) + if not n or n > len(all_instances): + n = len(all_instances) + train_set = all_instances[: int(0.8 * n)] + test_set = all_instances[int(0.8 * n) : n] + + return train_set, test_set + + +def _show_plot(x_values, y_values, x_labels=None, y_labels=None): + try: + import matplotlib.pyplot as plt + except ImportError as e: + raise ImportError( + "The plot function requires matplotlib to be installed." + "See https://matplotlib.org/" + ) from e + + plt.locator_params(axis="y", nbins=3) + axes = plt.axes() + axes.yaxis.grid() + plt.plot(x_values, y_values, "ro", color="red") + plt.ylim(ymin=-1.2, ymax=1.2) + plt.tight_layout(pad=5) + if x_labels: + plt.xticks(x_values, x_labels, rotation="vertical") + if y_labels: + plt.yticks([-1, 0, 1], y_labels, rotation="horizontal") + # Pad margins so that markers are not clipped by the axes + plt.margins(0.2) + plt.show() + + +# //////////////////////////////////////////////////////////// +# { Parsing and conversion functions +# //////////////////////////////////////////////////////////// + + +def json2csv_preprocess( + json_file, + outfile, + fields, + encoding="utf8", + errors="replace", + gzip_compress=False, + skip_retweets=True, + skip_tongue_tweets=True, + skip_ambiguous_tweets=True, + strip_off_emoticons=True, + remove_duplicates=True, + limit=None, +): + """ + Convert json file to csv file, preprocessing each row to obtain a suitable + dataset for tweets Semantic Analysis. + + :param json_file: the original json file containing tweets. + :param outfile: the output csv filename. + :param fields: a list of fields that will be extracted from the json file and + kept in the output csv file. + :param encoding: the encoding of the files. + :param errors: the error handling strategy for the output writer. + :param gzip_compress: if True, create a compressed GZIP file. + + :param skip_retweets: if True, remove retweets. + :param skip_tongue_tweets: if True, remove tweets containing ":P" and ":-P" + emoticons. + :param skip_ambiguous_tweets: if True, remove tweets containing both happy + and sad emoticons. + :param strip_off_emoticons: if True, strip off emoticons from all tweets. + :param remove_duplicates: if True, remove tweets appearing more than once. + :param limit: an integer to set the number of tweets to convert. After the + limit is reached the conversion will stop. It can be useful to create + subsets of the original tweets json data. + """ + with codecs.open(json_file, encoding=encoding) as fp: + (writer, outf) = _outf_writer(outfile, encoding, errors, gzip_compress) + # write the list of fields as header + writer.writerow(fields) + + if remove_duplicates == True: + tweets_cache = [] + i = 0 + for line in fp: + tweet = json.loads(line) + row = extract_fields(tweet, fields) + try: + text = row[fields.index("text")] + # Remove retweets + if skip_retweets == True: + if re.search(r"\bRT\b", text): + continue + # Remove tweets containing ":P" and ":-P" emoticons + if skip_tongue_tweets == True: + if re.search(r"\:\-?P\b", text): + continue + # Remove tweets containing both happy and sad emoticons + if skip_ambiguous_tweets == True: + all_emoticons = EMOTICON_RE.findall(text) + if all_emoticons: + if (set(all_emoticons) & HAPPY) and (set(all_emoticons) & SAD): + continue + # Strip off emoticons from all tweets + if strip_off_emoticons == True: + row[fields.index("text")] = re.sub( + r"(?!\n)\s+", " ", EMOTICON_RE.sub("", text) + ) + # Remove duplicate tweets + if remove_duplicates == True: + if row[fields.index("text")] in tweets_cache: + continue + else: + tweets_cache.append(row[fields.index("text")]) + except ValueError: + pass + writer.writerow(row) + i += 1 + if limit and i >= limit: + break + outf.close() + + +def parse_tweets_set( + filename, label, word_tokenizer=None, sent_tokenizer=None, skip_header=True +): + """ + Parse csv file containing tweets and output data a list of (text, label) tuples. + + :param filename: the input csv filename. + :param label: the label to be appended to each tweet contained in the csv file. + :param word_tokenizer: the tokenizer instance that will be used to tokenize + each sentence into tokens (e.g. WordPunctTokenizer() or BlanklineTokenizer()). + If no word_tokenizer is specified, tweets will not be tokenized. + :param sent_tokenizer: the tokenizer that will be used to split each tweet into + sentences. + :param skip_header: if True, skip the first line of the csv file (which usually + contains headers). + + :return: a list of (text, label) tuples. + """ + tweets = [] + if not sent_tokenizer: + sent_tokenizer = load("tokenizers/punkt/english.pickle") + + with codecs.open(filename, "rt") as csvfile: + reader = csv.reader(csvfile) + if skip_header == True: + next(reader, None) # skip the header + i = 0 + for tweet_id, text in reader: + # text = text[1] + i += 1 + sys.stdout.write(f"Loaded {i} tweets\r") + # Apply sentence and word tokenizer to text + if word_tokenizer: + tweet = [ + w + for sent in sent_tokenizer.tokenize(text) + for w in word_tokenizer.tokenize(sent) + ] + else: + tweet = text + tweets.append((tweet, label)) + + print(f"Loaded {i} tweets") + return tweets + + +# //////////////////////////////////////////////////////////// +# { Demos +# //////////////////////////////////////////////////////////// + + +def demo_tweets(trainer, n_instances=None, output=None): + """ + Train and test Naive Bayes classifier on 10000 tweets, tokenized using + TweetTokenizer. + Features are composed of: + + - 1000 most frequent unigrams + - 100 top bigrams (using BigramAssocMeasures.pmi) + + :param trainer: `train` method of a classifier. + :param n_instances: the number of total tweets that have to be used for + training and testing. Tweets will be equally split between positive and + negative. + :param output: the output file where results have to be reported. + """ + from nltk.corpus import stopwords, twitter_samples + from nltk.sentiment import SentimentAnalyzer + from nltk.tokenize import TweetTokenizer + + # Different customizations for the TweetTokenizer + tokenizer = TweetTokenizer(preserve_case=False) + # tokenizer = TweetTokenizer(preserve_case=True, strip_handles=True) + # tokenizer = TweetTokenizer(reduce_len=True, strip_handles=True) + + if n_instances is not None: + n_instances = int(n_instances / 2) + + fields = ["id", "text"] + positive_json = twitter_samples.abspath("positive_tweets.json") + positive_csv = "positive_tweets.csv" + json2csv_preprocess(positive_json, positive_csv, fields, limit=n_instances) + + negative_json = twitter_samples.abspath("negative_tweets.json") + negative_csv = "negative_tweets.csv" + json2csv_preprocess(negative_json, negative_csv, fields, limit=n_instances) + + neg_docs = parse_tweets_set(negative_csv, label="neg", word_tokenizer=tokenizer) + pos_docs = parse_tweets_set(positive_csv, label="pos", word_tokenizer=tokenizer) + + # We separately split subjective and objective instances to keep a balanced + # uniform class distribution in both train and test sets. + train_pos_docs, test_pos_docs = split_train_test(pos_docs) + train_neg_docs, test_neg_docs = split_train_test(neg_docs) + + training_tweets = train_pos_docs + train_neg_docs + testing_tweets = test_pos_docs + test_neg_docs + + sentim_analyzer = SentimentAnalyzer() + # stopwords = stopwords.words('english') + # all_words = [word for word in sentim_analyzer.all_words(training_tweets) if word.lower() not in stopwords] + all_words = [word for word in sentim_analyzer.all_words(training_tweets)] + + # Add simple unigram word features + unigram_feats = sentim_analyzer.unigram_word_feats(all_words, top_n=1000) + sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) + + # Add bigram collocation features + bigram_collocs_feats = sentim_analyzer.bigram_collocation_feats( + [tweet[0] for tweet in training_tweets], top_n=100, min_freq=12 + ) + sentim_analyzer.add_feat_extractor( + extract_bigram_feats, bigrams=bigram_collocs_feats + ) + + training_set = sentim_analyzer.apply_features(training_tweets) + test_set = sentim_analyzer.apply_features(testing_tweets) + + classifier = sentim_analyzer.train(trainer, training_set) + # classifier = sentim_analyzer.train(trainer, training_set, max_iter=4) + try: + classifier.show_most_informative_features() + except AttributeError: + print( + "Your classifier does not provide a show_most_informative_features() method." + ) + results = sentim_analyzer.evaluate(test_set) + + if output: + extr = [f.__name__ for f in sentim_analyzer.feat_extractors] + output_markdown( + output, + Dataset="labeled_tweets", + Classifier=type(classifier).__name__, + Tokenizer=tokenizer.__class__.__name__, + Feats=extr, + Results=results, + Instances=n_instances, + ) + + +def demo_movie_reviews(trainer, n_instances=None, output=None): + """ + Train classifier on all instances of the Movie Reviews dataset. + The corpus has been preprocessed using the default sentence tokenizer and + WordPunctTokenizer. + Features are composed of: + + - most frequent unigrams + + :param trainer: `train` method of a classifier. + :param n_instances: the number of total reviews that have to be used for + training and testing. Reviews will be equally split between positive and + negative. + :param output: the output file where results have to be reported. + """ + from nltk.corpus import movie_reviews + from nltk.sentiment import SentimentAnalyzer + + if n_instances is not None: + n_instances = int(n_instances / 2) + + pos_docs = [ + (list(movie_reviews.words(pos_id)), "pos") + for pos_id in movie_reviews.fileids("pos")[:n_instances] + ] + neg_docs = [ + (list(movie_reviews.words(neg_id)), "neg") + for neg_id in movie_reviews.fileids("neg")[:n_instances] + ] + # We separately split positive and negative instances to keep a balanced + # uniform class distribution in both train and test sets. + train_pos_docs, test_pos_docs = split_train_test(pos_docs) + train_neg_docs, test_neg_docs = split_train_test(neg_docs) + + training_docs = train_pos_docs + train_neg_docs + testing_docs = test_pos_docs + test_neg_docs + + sentim_analyzer = SentimentAnalyzer() + all_words = sentim_analyzer.all_words(training_docs) + + # Add simple unigram word features + unigram_feats = sentim_analyzer.unigram_word_feats(all_words, min_freq=4) + sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) + # Apply features to obtain a feature-value representation of our datasets + training_set = sentim_analyzer.apply_features(training_docs) + test_set = sentim_analyzer.apply_features(testing_docs) + + classifier = sentim_analyzer.train(trainer, training_set) + try: + classifier.show_most_informative_features() + except AttributeError: + print( + "Your classifier does not provide a show_most_informative_features() method." + ) + results = sentim_analyzer.evaluate(test_set) + + if output: + extr = [f.__name__ for f in sentim_analyzer.feat_extractors] + output_markdown( + output, + Dataset="Movie_reviews", + Classifier=type(classifier).__name__, + Tokenizer="WordPunctTokenizer", + Feats=extr, + Results=results, + Instances=n_instances, + ) + + +def demo_subjectivity(trainer, save_analyzer=False, n_instances=None, output=None): + """ + Train and test a classifier on instances of the Subjective Dataset by Pang and + Lee. The dataset is made of 5000 subjective and 5000 objective sentences. + All tokens (words and punctuation marks) are separated by a whitespace, so + we use the basic WhitespaceTokenizer to parse the data. + + :param trainer: `train` method of a classifier. + :param save_analyzer: if `True`, store the SentimentAnalyzer in a pickle file. + :param n_instances: the number of total sentences that have to be used for + training and testing. Sentences will be equally split between positive + and negative. + :param output: the output file where results have to be reported. + """ + from nltk.corpus import subjectivity + from nltk.sentiment import SentimentAnalyzer + + if n_instances is not None: + n_instances = int(n_instances / 2) + + subj_docs = [ + (sent, "subj") for sent in subjectivity.sents(categories="subj")[:n_instances] + ] + obj_docs = [ + (sent, "obj") for sent in subjectivity.sents(categories="obj")[:n_instances] + ] + + # We separately split subjective and objective instances to keep a balanced + # uniform class distribution in both train and test sets. + train_subj_docs, test_subj_docs = split_train_test(subj_docs) + train_obj_docs, test_obj_docs = split_train_test(obj_docs) + + training_docs = train_subj_docs + train_obj_docs + testing_docs = test_subj_docs + test_obj_docs + + sentim_analyzer = SentimentAnalyzer() + all_words_neg = sentim_analyzer.all_words( + [mark_negation(doc) for doc in training_docs] + ) + + # Add simple unigram word features handling negation + unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) + sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) + + # Apply features to obtain a feature-value representation of our datasets + training_set = sentim_analyzer.apply_features(training_docs) + test_set = sentim_analyzer.apply_features(testing_docs) + + classifier = sentim_analyzer.train(trainer, training_set) + try: + classifier.show_most_informative_features() + except AttributeError: + print( + "Your classifier does not provide a show_most_informative_features() method." + ) + results = sentim_analyzer.evaluate(test_set) + + if save_analyzer == True: + sentim_analyzer.save_file(sentim_analyzer, "sa_subjectivity.pickle") + + if output: + extr = [f.__name__ for f in sentim_analyzer.feat_extractors] + output_markdown( + output, + Dataset="subjectivity", + Classifier=type(classifier).__name__, + Tokenizer="WhitespaceTokenizer", + Feats=extr, + Instances=n_instances, + Results=results, + ) + + return sentim_analyzer + + +def demo_sent_subjectivity(text): + """ + Classify a single sentence as subjective or objective using a stored + SentimentAnalyzer. + + :param text: a sentence whose subjectivity has to be classified. + """ + from nltk.classify import NaiveBayesClassifier + from nltk.tokenize import regexp + + word_tokenizer = regexp.WhitespaceTokenizer() + try: + sentim_analyzer = load("sa_subjectivity.pickle") + except LookupError: + print("Cannot find the sentiment analyzer you want to load.") + print("Training a new one using NaiveBayesClassifier.") + sentim_analyzer = demo_subjectivity(NaiveBayesClassifier.train, True) + + # Tokenize and convert to lower case + tokenized_text = [word.lower() for word in word_tokenizer.tokenize(text)] + print(sentim_analyzer.classify(tokenized_text)) + + +def demo_liu_hu_lexicon(sentence, plot=False): + """ + Basic example of sentiment classification using Liu and Hu opinion lexicon. + This function simply counts the number of positive, negative and neutral words + in the sentence and classifies it depending on which polarity is more represented. + Words that do not appear in the lexicon are considered as neutral. + + :param sentence: a sentence whose polarity has to be classified. + :param plot: if True, plot a visual representation of the sentence polarity. + """ + from nltk.corpus import opinion_lexicon + from nltk.tokenize import treebank + + tokenizer = treebank.TreebankWordTokenizer() + pos_words = 0 + neg_words = 0 + tokenized_sent = [word.lower() for word in tokenizer.tokenize(sentence)] + + x = list(range(len(tokenized_sent))) # x axis for the plot + y = [] + + for word in tokenized_sent: + if word in opinion_lexicon.positive(): + pos_words += 1 + y.append(1) # positive + elif word in opinion_lexicon.negative(): + neg_words += 1 + y.append(-1) # negative + else: + y.append(0) # neutral + + if pos_words > neg_words: + print("Positive") + elif pos_words < neg_words: + print("Negative") + elif pos_words == neg_words: + print("Neutral") + + if plot == True: + _show_plot( + x, y, x_labels=tokenized_sent, y_labels=["Negative", "Neutral", "Positive"] + ) + + +def demo_vader_instance(text): + """ + Output polarity scores for a text using Vader approach. + + :param text: a text whose polarity has to be evaluated. + """ + from nltk.sentiment import SentimentIntensityAnalyzer + + vader_analyzer = SentimentIntensityAnalyzer() + print(vader_analyzer.polarity_scores(text)) + + +def demo_vader_tweets(n_instances=None, output=None): + """ + Classify 10000 positive and negative tweets using Vader approach. + + :param n_instances: the number of total tweets that have to be classified. + :param output: the output file where results have to be reported. + """ + from collections import defaultdict + + from nltk.corpus import twitter_samples + from nltk.metrics import accuracy as eval_accuracy + from nltk.metrics import f_measure as eval_f_measure + from nltk.metrics import precision as eval_precision + from nltk.metrics import recall as eval_recall + from nltk.sentiment import SentimentIntensityAnalyzer + + if n_instances is not None: + n_instances = int(n_instances / 2) + + fields = ["id", "text"] + positive_json = twitter_samples.abspath("positive_tweets.json") + positive_csv = "positive_tweets.csv" + json2csv_preprocess( + positive_json, + positive_csv, + fields, + strip_off_emoticons=False, + limit=n_instances, + ) + + negative_json = twitter_samples.abspath("negative_tweets.json") + negative_csv = "negative_tweets.csv" + json2csv_preprocess( + negative_json, + negative_csv, + fields, + strip_off_emoticons=False, + limit=n_instances, + ) + + pos_docs = parse_tweets_set(positive_csv, label="pos") + neg_docs = parse_tweets_set(negative_csv, label="neg") + + # We separately split subjective and objective instances to keep a balanced + # uniform class distribution in both train and test sets. + train_pos_docs, test_pos_docs = split_train_test(pos_docs) + train_neg_docs, test_neg_docs = split_train_test(neg_docs) + + training_tweets = train_pos_docs + train_neg_docs + testing_tweets = test_pos_docs + test_neg_docs + + vader_analyzer = SentimentIntensityAnalyzer() + + gold_results = defaultdict(set) + test_results = defaultdict(set) + acc_gold_results = [] + acc_test_results = [] + labels = set() + num = 0 + for i, (text, label) in enumerate(testing_tweets): + labels.add(label) + gold_results[label].add(i) + acc_gold_results.append(label) + score = vader_analyzer.polarity_scores(text)["compound"] + if score > 0: + observed = "pos" + else: + observed = "neg" + num += 1 + acc_test_results.append(observed) + test_results[observed].add(i) + metrics_results = {} + for label in labels: + accuracy_score = eval_accuracy(acc_gold_results, acc_test_results) + metrics_results["Accuracy"] = accuracy_score + precision_score = eval_precision(gold_results[label], test_results[label]) + metrics_results[f"Precision [{label}]"] = precision_score + recall_score = eval_recall(gold_results[label], test_results[label]) + metrics_results[f"Recall [{label}]"] = recall_score + f_measure_score = eval_f_measure(gold_results[label], test_results[label]) + metrics_results[f"F-measure [{label}]"] = f_measure_score + + for result in sorted(metrics_results): + print(f"{result}: {metrics_results[result]}") + + if output: + output_markdown( + output, + Approach="Vader", + Dataset="labeled_tweets", + Instances=n_instances, + Results=metrics_results, + ) + + +if __name__ == "__main__": + from sklearn.svm import LinearSVC + + from nltk.classify import MaxentClassifier, NaiveBayesClassifier + from nltk.classify.scikitlearn import SklearnClassifier + from nltk.twitter.common import _outf_writer, extract_fields + + naive_bayes = NaiveBayesClassifier.train + svm = SklearnClassifier(LinearSVC()).train + maxent = MaxentClassifier.train + + demo_tweets(naive_bayes) + # demo_movie_reviews(svm) + # demo_subjectivity(svm) + # demo_sent_subjectivity("she's an artist , but hasn't picked up a brush in a year . ") + # demo_liu_hu_lexicon("This movie was actually neither that funny, nor super witty.", plot=True) + # demo_vader_instance("This movie was actually neither that funny, nor super witty.") + # demo_vader_tweets() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/vader.py b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/vader.py new file mode 100644 index 0000000000000000000000000000000000000000..2381b39a3a0da7750506283db9b2d3a5fe1d4633 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/vader.py @@ -0,0 +1,633 @@ +# Natural Language Toolkit: vader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: C.J. Hutto +# Ewan Klein (modifications) +# Pierpaolo Pantone <24alsecondo@gmail.com> (modifications) +# George Berry (modifications) +# Malavika Suresh (modifications) +# URL: +# For license information, see LICENSE.TXT +# +# Modifications to the original VADER code have been made in order to +# integrate it into NLTK. These have involved changes to +# ensure Python 3 compatibility, and refactoring to achieve greater modularity. + +""" +If you use the VADER sentiment analysis tools, please cite: + +Hutto, C.J. & Gilbert, E.E. (2014). VADER: A Parsimonious Rule-based Model for +Sentiment Analysis of Social Media Text. Eighth International Conference on +Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014. +""" + +import math +import re +import string +from itertools import product + +import nltk.data +from nltk.util import pairwise + + +class VaderConstants: + """ + A class to keep the Vader lists and constants. + """ + + ##Constants## + # (empirically derived mean sentiment intensity rating increase for booster words) + B_INCR = 0.293 + B_DECR = -0.293 + + # (empirically derived mean sentiment intensity rating increase for using + # ALLCAPs to emphasize a word) + C_INCR = 0.733 + + N_SCALAR = -0.74 + + NEGATE = { + "aint", + "arent", + "cannot", + "cant", + "couldnt", + "darent", + "didnt", + "doesnt", + "ain't", + "aren't", + "can't", + "couldn't", + "daren't", + "didn't", + "doesn't", + "dont", + "hadnt", + "hasnt", + "havent", + "isnt", + "mightnt", + "mustnt", + "neither", + "don't", + "hadn't", + "hasn't", + "haven't", + "isn't", + "mightn't", + "mustn't", + "neednt", + "needn't", + "never", + "none", + "nope", + "nor", + "not", + "nothing", + "nowhere", + "oughtnt", + "shant", + "shouldnt", + "uhuh", + "wasnt", + "werent", + "oughtn't", + "shan't", + "shouldn't", + "uh-uh", + "wasn't", + "weren't", + "without", + "wont", + "wouldnt", + "won't", + "wouldn't", + "rarely", + "seldom", + "despite", + } + + # booster/dampener 'intensifiers' or 'degree adverbs' + # https://en.wiktionary.org/wiki/Category:English_degree_adverbs + + BOOSTER_DICT = { + "absolutely": B_INCR, + "amazingly": B_INCR, + "awfully": B_INCR, + "completely": B_INCR, + "considerably": B_INCR, + "decidedly": B_INCR, + "deeply": B_INCR, + "effing": B_INCR, + "enormously": B_INCR, + "entirely": B_INCR, + "especially": B_INCR, + "exceptionally": B_INCR, + "extremely": B_INCR, + "fabulously": B_INCR, + "flipping": B_INCR, + "flippin": B_INCR, + "fricking": B_INCR, + "frickin": B_INCR, + "frigging": B_INCR, + "friggin": B_INCR, + "fully": B_INCR, + "fucking": B_INCR, + "greatly": B_INCR, + "hella": B_INCR, + "highly": B_INCR, + "hugely": B_INCR, + "incredibly": B_INCR, + "intensely": B_INCR, + "majorly": B_INCR, + "more": B_INCR, + "most": B_INCR, + "particularly": B_INCR, + "purely": B_INCR, + "quite": B_INCR, + "really": B_INCR, + "remarkably": B_INCR, + "so": B_INCR, + "substantially": B_INCR, + "thoroughly": B_INCR, + "totally": B_INCR, + "tremendously": B_INCR, + "uber": B_INCR, + "unbelievably": B_INCR, + "unusually": B_INCR, + "utterly": B_INCR, + "very": B_INCR, + "almost": B_DECR, + "barely": B_DECR, + "hardly": B_DECR, + "just enough": B_DECR, + "kind of": B_DECR, + "kinda": B_DECR, + "kindof": B_DECR, + "kind-of": B_DECR, + "less": B_DECR, + "little": B_DECR, + "marginally": B_DECR, + "occasionally": B_DECR, + "partly": B_DECR, + "scarcely": B_DECR, + "slightly": B_DECR, + "somewhat": B_DECR, + "sort of": B_DECR, + "sorta": B_DECR, + "sortof": B_DECR, + "sort-of": B_DECR, + } + + # check for special case idioms using a sentiment-laden keyword known to SAGE + SPECIAL_CASE_IDIOMS = { + "the shit": 3, + "the bomb": 3, + "bad ass": 1.5, + "yeah right": -2, + "cut the mustard": 2, + "kiss of death": -1.5, + "hand to mouth": -2, + } + + # for removing punctuation + REGEX_REMOVE_PUNCTUATION = re.compile(f"[{re.escape(string.punctuation)}]") + + PUNC_LIST = [ + ".", + "!", + "?", + ",", + ";", + ":", + "-", + "'", + '"', + "!!", + "!!!", + "??", + "???", + "?!?", + "!?!", + "?!?!", + "!?!?", + ] + + def __init__(self): + pass + + def negated(self, input_words, include_nt=True): + """ + Determine if input contains negation words + """ + neg_words = self.NEGATE + if any(word.lower() in neg_words for word in input_words): + return True + if include_nt: + if any("n't" in word.lower() for word in input_words): + return True + for first, second in pairwise(input_words): + if second.lower() == "least" and first.lower() != "at": + return True + return False + + def normalize(self, score, alpha=15): + """ + Normalize the score to be between -1 and 1 using an alpha that + approximates the max expected value + """ + norm_score = score / math.sqrt((score * score) + alpha) + return norm_score + + def scalar_inc_dec(self, word, valence, is_cap_diff): + """ + Check if the preceding words increase, decrease, or negate/nullify the + valence + """ + scalar = 0.0 + word_lower = word.lower() + if word_lower in self.BOOSTER_DICT: + scalar = self.BOOSTER_DICT[word_lower] + if valence < 0: + scalar *= -1 + # check if booster/dampener word is in ALLCAPS (while others aren't) + if word.isupper() and is_cap_diff: + if valence > 0: + scalar += self.C_INCR + else: + scalar -= self.C_INCR + return scalar + + +class SentiText: + """ + Identify sentiment-relevant string-level properties of input text. + """ + + def __init__(self, text, punc_list, regex_remove_punctuation): + if not isinstance(text, str): + text = str(text.encode("utf-8")) + self.text = text + self.PUNC_LIST = punc_list + self.REGEX_REMOVE_PUNCTUATION = regex_remove_punctuation + self.words_and_emoticons = self._words_and_emoticons() + # doesn't separate words from + # adjacent punctuation (keeps emoticons & contractions) + self.is_cap_diff = self.allcap_differential(self.words_and_emoticons) + + def _words_plus_punc(self): + """ + Returns mapping of form: + { + 'cat,': 'cat', + ',cat': 'cat', + } + """ + no_punc_text = self.REGEX_REMOVE_PUNCTUATION.sub("", self.text) + # removes punctuation (but loses emoticons & contractions) + words_only = no_punc_text.split() + # remove singletons + words_only = {w for w in words_only if len(w) > 1} + # the product gives ('cat', ',') and (',', 'cat') + punc_before = {"".join(p): p[1] for p in product(self.PUNC_LIST, words_only)} + punc_after = {"".join(p): p[0] for p in product(words_only, self.PUNC_LIST)} + words_punc_dict = punc_before + words_punc_dict.update(punc_after) + return words_punc_dict + + def _words_and_emoticons(self): + """ + Removes leading and trailing puncutation + Leaves contractions and most emoticons + Does not preserve punc-plus-letter emoticons (e.g. :D) + """ + wes = self.text.split() + words_punc_dict = self._words_plus_punc() + wes = [we for we in wes if len(we) > 1] + for i, we in enumerate(wes): + if we in words_punc_dict: + wes[i] = words_punc_dict[we] + return wes + + def allcap_differential(self, words): + """ + Check whether just some words in the input are ALL CAPS + + :param list words: The words to inspect + :returns: `True` if some but not all items in `words` are ALL CAPS + """ + is_different = False + allcap_words = 0 + for word in words: + if word.isupper(): + allcap_words += 1 + cap_differential = len(words) - allcap_words + if 0 < cap_differential < len(words): + is_different = True + return is_different + + +class SentimentIntensityAnalyzer: + """ + Give a sentiment intensity score to sentences. + """ + + def __init__( + self, + lexicon_file="sentiment/vader_lexicon.zip/vader_lexicon/vader_lexicon.txt", + ): + self.lexicon_file = nltk.data.load(lexicon_file) + self.lexicon = self.make_lex_dict() + self.constants = VaderConstants() + + def make_lex_dict(self): + """ + Convert lexicon file to a dictionary + """ + lex_dict = {} + for line in self.lexicon_file.split("\n"): + (word, measure) = line.strip().split("\t")[0:2] + lex_dict[word] = float(measure) + return lex_dict + + def polarity_scores(self, text): + """ + Return a float for sentiment strength based on the input text. + Positive values are positive valence, negative value are negative + valence. + + :note: Hashtags are not taken into consideration (e.g. #BAD is neutral). If you + are interested in processing the text in the hashtags too, then we recommend + preprocessing your data to remove the #, after which the hashtag text may be + matched as if it was a normal word in the sentence. + """ + # text, words_and_emoticons, is_cap_diff = self.preprocess(text) + sentitext = SentiText( + text, self.constants.PUNC_LIST, self.constants.REGEX_REMOVE_PUNCTUATION + ) + sentiments = [] + words_and_emoticons = sentitext.words_and_emoticons + for item in words_and_emoticons: + valence = 0 + i = words_and_emoticons.index(item) + if ( + i < len(words_and_emoticons) - 1 + and item.lower() == "kind" + and words_and_emoticons[i + 1].lower() == "of" + ) or item.lower() in self.constants.BOOSTER_DICT: + sentiments.append(valence) + continue + + sentiments = self.sentiment_valence(valence, sentitext, item, i, sentiments) + + sentiments = self._but_check(words_and_emoticons, sentiments) + + return self.score_valence(sentiments, text) + + def sentiment_valence(self, valence, sentitext, item, i, sentiments): + is_cap_diff = sentitext.is_cap_diff + words_and_emoticons = sentitext.words_and_emoticons + item_lowercase = item.lower() + if item_lowercase in self.lexicon: + # get the sentiment valence + valence = self.lexicon[item_lowercase] + + # check if sentiment laden word is in ALL CAPS (while others aren't) + if item.isupper() and is_cap_diff: + if valence > 0: + valence += self.constants.C_INCR + else: + valence -= self.constants.C_INCR + + for start_i in range(0, 3): + if ( + i > start_i + and words_and_emoticons[i - (start_i + 1)].lower() + not in self.lexicon + ): + # dampen the scalar modifier of preceding words and emoticons + # (excluding the ones that immediately preceed the item) based + # on their distance from the current item. + s = self.constants.scalar_inc_dec( + words_and_emoticons[i - (start_i + 1)], valence, is_cap_diff + ) + if start_i == 1 and s != 0: + s = s * 0.95 + if start_i == 2 and s != 0: + s = s * 0.9 + valence = valence + s + valence = self._never_check( + valence, words_and_emoticons, start_i, i + ) + if start_i == 2: + valence = self._idioms_check(valence, words_and_emoticons, i) + + # future work: consider other sentiment-laden idioms + # other_idioms = + # {"back handed": -2, "blow smoke": -2, "blowing smoke": -2, + # "upper hand": 1, "break a leg": 2, + # "cooking with gas": 2, "in the black": 2, "in the red": -2, + # "on the ball": 2,"under the weather": -2} + + valence = self._least_check(valence, words_and_emoticons, i) + + sentiments.append(valence) + return sentiments + + def _least_check(self, valence, words_and_emoticons, i): + # check for negation case using "least" + if ( + i > 1 + and words_and_emoticons[i - 1].lower() not in self.lexicon + and words_and_emoticons[i - 1].lower() == "least" + ): + if ( + words_and_emoticons[i - 2].lower() != "at" + and words_and_emoticons[i - 2].lower() != "very" + ): + valence = valence * self.constants.N_SCALAR + elif ( + i > 0 + and words_and_emoticons[i - 1].lower() not in self.lexicon + and words_and_emoticons[i - 1].lower() == "least" + ): + valence = valence * self.constants.N_SCALAR + return valence + + def _but_check(self, words_and_emoticons, sentiments): + words_and_emoticons = [w_e.lower() for w_e in words_and_emoticons] + but = {"but"} & set(words_and_emoticons) + if but: + bi = words_and_emoticons.index(next(iter(but))) + for sidx, sentiment in enumerate(sentiments): + if sidx < bi: + sentiments[sidx] = sentiment * 0.5 + elif sidx > bi: + sentiments[sidx] = sentiment * 1.5 + return sentiments + + def _idioms_check(self, valence, words_and_emoticons, i): + onezero = f"{words_and_emoticons[i - 1]} {words_and_emoticons[i]}" + + twoonezero = "{} {} {}".format( + words_and_emoticons[i - 2], + words_and_emoticons[i - 1], + words_and_emoticons[i], + ) + + twoone = f"{words_and_emoticons[i - 2]} {words_and_emoticons[i - 1]}" + + threetwoone = "{} {} {}".format( + words_and_emoticons[i - 3], + words_and_emoticons[i - 2], + words_and_emoticons[i - 1], + ) + + threetwo = "{} {}".format( + words_and_emoticons[i - 3], words_and_emoticons[i - 2] + ) + + sequences = [onezero, twoonezero, twoone, threetwoone, threetwo] + + for seq in sequences: + if seq in self.constants.SPECIAL_CASE_IDIOMS: + valence = self.constants.SPECIAL_CASE_IDIOMS[seq] + break + + if len(words_and_emoticons) - 1 > i: + zeroone = f"{words_and_emoticons[i]} {words_and_emoticons[i + 1]}" + if zeroone in self.constants.SPECIAL_CASE_IDIOMS: + valence = self.constants.SPECIAL_CASE_IDIOMS[zeroone] + if len(words_and_emoticons) - 1 > i + 1: + zeroonetwo = "{} {} {}".format( + words_and_emoticons[i], + words_and_emoticons[i + 1], + words_and_emoticons[i + 2], + ) + if zeroonetwo in self.constants.SPECIAL_CASE_IDIOMS: + valence = self.constants.SPECIAL_CASE_IDIOMS[zeroonetwo] + + # check for booster/dampener bi-grams such as 'sort of' or 'kind of' + if ( + threetwo in self.constants.BOOSTER_DICT + or twoone in self.constants.BOOSTER_DICT + ): + valence = valence + self.constants.B_DECR + return valence + + def _never_check(self, valence, words_and_emoticons, start_i, i): + if start_i == 0: + if self.constants.negated([words_and_emoticons[i - 1]]): + valence = valence * self.constants.N_SCALAR + if start_i == 1: + if words_and_emoticons[i - 2] == "never" and ( + words_and_emoticons[i - 1] == "so" + or words_and_emoticons[i - 1] == "this" + ): + valence = valence * 1.5 + elif self.constants.negated([words_and_emoticons[i - (start_i + 1)]]): + valence = valence * self.constants.N_SCALAR + if start_i == 2: + if ( + words_and_emoticons[i - 3] == "never" + and ( + words_and_emoticons[i - 2] == "so" + or words_and_emoticons[i - 2] == "this" + ) + or ( + words_and_emoticons[i - 1] == "so" + or words_and_emoticons[i - 1] == "this" + ) + ): + valence = valence * 1.25 + elif self.constants.negated([words_and_emoticons[i - (start_i + 1)]]): + valence = valence * self.constants.N_SCALAR + return valence + + def _punctuation_emphasis(self, sum_s, text): + # add emphasis from exclamation points and question marks + ep_amplifier = self._amplify_ep(text) + qm_amplifier = self._amplify_qm(text) + punct_emph_amplifier = ep_amplifier + qm_amplifier + return punct_emph_amplifier + + def _amplify_ep(self, text): + # check for added emphasis resulting from exclamation points (up to 4 of them) + ep_count = text.count("!") + if ep_count > 4: + ep_count = 4 + # (empirically derived mean sentiment intensity rating increase for + # exclamation points) + ep_amplifier = ep_count * 0.292 + return ep_amplifier + + def _amplify_qm(self, text): + # check for added emphasis resulting from question marks (2 or 3+) + qm_count = text.count("?") + qm_amplifier = 0 + if qm_count > 1: + if qm_count <= 3: + # (empirically derived mean sentiment intensity rating increase for + # question marks) + qm_amplifier = qm_count * 0.18 + else: + qm_amplifier = 0.96 + return qm_amplifier + + def _sift_sentiment_scores(self, sentiments): + # want separate positive versus negative sentiment scores + pos_sum = 0.0 + neg_sum = 0.0 + neu_count = 0 + for sentiment_score in sentiments: + if sentiment_score > 0: + pos_sum += ( + float(sentiment_score) + 1 + ) # compensates for neutral words that are counted as 1 + if sentiment_score < 0: + neg_sum += ( + float(sentiment_score) - 1 + ) # when used with math.fabs(), compensates for neutrals + if sentiment_score == 0: + neu_count += 1 + return pos_sum, neg_sum, neu_count + + def score_valence(self, sentiments, text): + if sentiments: + sum_s = float(sum(sentiments)) + # compute and add emphasis from punctuation in text + punct_emph_amplifier = self._punctuation_emphasis(sum_s, text) + if sum_s > 0: + sum_s += punct_emph_amplifier + elif sum_s < 0: + sum_s -= punct_emph_amplifier + + compound = self.constants.normalize(sum_s) + # discriminate between positive, negative and neutral sentiment scores + pos_sum, neg_sum, neu_count = self._sift_sentiment_scores(sentiments) + + if pos_sum > math.fabs(neg_sum): + pos_sum += punct_emph_amplifier + elif pos_sum < math.fabs(neg_sum): + neg_sum -= punct_emph_amplifier + + total = pos_sum + math.fabs(neg_sum) + neu_count + pos = math.fabs(pos_sum / total) + neg = math.fabs(neg_sum / total) + neu = math.fabs(neu_count / total) + + else: + compound = 0.0 + pos = 0.0 + neg = 0.0 + neu = 0.0 + + sentiment_dict = { + "neg": round(neg, 3), + "neu": round(neu, 3), + "pos": round(pos, 3), + "compound": round(compound, 4), + } + + return sentiment_dict diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3387daec4b489d83a4f87b9652a0309f7c4e1ce5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__init__.py @@ -0,0 +1,31 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Transformation Based Learning + +A general purpose package for Transformation Based Learning, +currently used by nltk.tag.BrillTagger. + +isort:skip_file +""" + +from nltk.tbl.template import Template + +# API: Template(...), Template.expand(...) + +from nltk.tbl.feature import Feature + +# API: Feature(...), Feature.expand(...) + +from nltk.tbl.rule import Rule + +# API: Rule.format(...), Rule.templatetid + +from nltk.tbl.erroranalysis import error_list diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3bdf9c38a4f28fbdb377fdff02228aea9007b7c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2289e35c90286fc808ef6b4e8fe803b3fcfbc4d1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/demo.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/demo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70dc0108b63b14b648ccabd51851f91e57117e76 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/demo.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f317529f99e4ee428db2c76d7eb18552fecd5adf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/feature.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/feature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58dfdd7d4413ead38818f888fde71111665b7802 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/feature.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/rule.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/rule.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72fed9c84b62f6533ab4a1add411098afddb7042 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/rule.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/template.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/template.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b5f4eb283deabaf7a2152e2f069fa0d9ca9853e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/__pycache__/template.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tbl/api.py b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/api.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tbl/demo.py b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..a5298e396e964f1f33e89a81263014249bca7cfa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/demo.py @@ -0,0 +1,418 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +import os +import pickle +import random +import time + +from nltk.corpus import treebank +from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger +from nltk.tag.brill import Pos, Word +from nltk.tbl import Template, error_list + + +def demo(): + """ + Run a demo with defaults. See source comments for details, + or docstrings of any of the more specific demo_* functions. + """ + postag() + + +def demo_repr_rule_format(): + """ + Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose")) + """ + postag(ruleformat="repr") + + +def demo_str_rule_format(): + """ + Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose")) + """ + postag(ruleformat="str") + + +def demo_verbose_rule_format(): + """ + Exemplify Rule.format("verbose") + """ + postag(ruleformat="verbose") + + +def demo_multiposition_feature(): + """ + The feature/s of a template takes a list of positions + relative to the current word where the feature should be + looked for, conceptually joined by logical OR. For instance, + Pos([-1, 1]), given a value V, will hold whenever V is found + one step to the left and/or one step to the right. + + For contiguous ranges, a 2-arg form giving inclusive end + points can also be used: Pos(-3, -1) is the same as the arg + below. + """ + postag(templates=[Template(Pos([-3, -2, -1]))]) + + +def demo_multifeature_template(): + """ + Templates can have more than a single feature. + """ + postag(templates=[Template(Word([0]), Pos([-2, -1]))]) + + +def demo_template_statistics(): + """ + Show aggregate statistics per template. Little used templates are + candidates for deletion, much used templates may possibly be refined. + + Deleting unused templates is mostly about saving time and/or space: + training is basically O(T) in the number of templates T + (also in terms of memory usage, which often will be the limiting factor). + """ + postag(incremental_stats=True, template_stats=True) + + +def demo_generated_templates(): + """ + Template.expand and Feature.expand are class methods facilitating + generating large amounts of templates. See their documentation for + details. + + Note: training with 500 templates can easily fill all available + even on relatively small corpora + """ + wordtpls = Word.expand([-1, 0, 1], [1, 2], excludezero=False) + tagtpls = Pos.expand([-2, -1, 0, 1], [1, 2], excludezero=True) + templates = list(Template.expand([wordtpls, tagtpls], combinations=(1, 3))) + print( + "Generated {} templates for transformation-based learning".format( + len(templates) + ) + ) + postag(templates=templates, incremental_stats=True, template_stats=True) + + +def demo_learning_curve(): + """ + Plot a learning curve -- the contribution on tagging accuracy of + the individual rules. + Note: requires matplotlib + """ + postag( + incremental_stats=True, + separate_baseline_data=True, + learning_curve_output="learningcurve.png", + ) + + +def demo_error_analysis(): + """ + Writes a file with context for each erroneous word after tagging testing data + """ + postag(error_output="errors.txt") + + +def demo_serialize_tagger(): + """ + Serializes the learned tagger to a file in pickle format; reloads it + and validates the process. + """ + postag(serialize_output="tagger.pcl") + + +def demo_high_accuracy_rules(): + """ + Discard rules with low accuracy. This may hurt performance a bit, + but will often produce rules which are more interesting read to a human. + """ + postag(num_sents=3000, min_acc=0.96, min_score=10) + + +def postag( + templates=None, + tagged_data=None, + num_sents=1000, + max_rules=300, + min_score=3, + min_acc=None, + train=0.8, + trace=3, + randomize=False, + ruleformat="str", + incremental_stats=False, + template_stats=False, + error_output=None, + serialize_output=None, + learning_curve_output=None, + learning_curve_take=300, + baseline_backoff_tagger=None, + separate_baseline_data=False, + cache_baseline_tagger=None, +): + """ + Brill Tagger Demonstration + :param templates: how many sentences of training and testing data to use + :type templates: list of Template + + :param tagged_data: maximum number of rule instances to create + :type tagged_data: C{int} + + :param num_sents: how many sentences of training and testing data to use + :type num_sents: C{int} + + :param max_rules: maximum number of rule instances to create + :type max_rules: C{int} + + :param min_score: the minimum score for a rule in order for it to be considered + :type min_score: C{int} + + :param min_acc: the minimum score for a rule in order for it to be considered + :type min_acc: C{float} + + :param train: the fraction of the the corpus to be used for training (1=all) + :type train: C{float} + + :param trace: the level of diagnostic tracing output to produce (0-4) + :type trace: C{int} + + :param randomize: whether the training data should be a random subset of the corpus + :type randomize: C{bool} + + :param ruleformat: rule output format, one of "str", "repr", "verbose" + :type ruleformat: C{str} + + :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) + :type incremental_stats: C{bool} + + :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing + :type template_stats: C{bool} + + :param error_output: the file where errors will be saved + :type error_output: C{string} + + :param serialize_output: the file where the learned tbl tagger will be saved + :type serialize_output: C{string} + + :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) + :type learning_curve_output: C{string} + + :param learning_curve_take: how many rules plotted + :type learning_curve_take: C{int} + + :param baseline_backoff_tagger: the file where rules will be saved + :type baseline_backoff_tagger: tagger + + :param separate_baseline_data: use a fraction of the training data exclusively for training baseline + :type separate_baseline_data: C{bool} + + :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get + deterministic output from the baseline unigram tagger between python versions) + :type cache_baseline_tagger: C{string} + + + Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This + is fast and fine for a demo, but is likely to generalize worse on unseen data. + Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). + """ + + # defaults + baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER + if templates is None: + from nltk.tag.brill import brill24, describe_template_sets + + # some pre-built template sets taken from typical systems or publications are + # available. Print a list with describe_template_sets() + # for instance: + templates = brill24() + (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( + tagged_data, train, num_sents, randomize, separate_baseline_data + ) + + # creating (or reloading from cache) a baseline tagger (unigram tagger) + # this is just a mechanism for getting deterministic output from the baseline between + # python versions + if cache_baseline_tagger: + if not os.path.exists(cache_baseline_tagger): + baseline_tagger = UnigramTagger( + baseline_data, backoff=baseline_backoff_tagger + ) + with open(cache_baseline_tagger, "w") as print_rules: + pickle.dump(baseline_tagger, print_rules) + print( + "Trained baseline tagger, pickled it to {}".format( + cache_baseline_tagger + ) + ) + with open(cache_baseline_tagger) as print_rules: + baseline_tagger = pickle.load(print_rules) + print(f"Reloaded pickled tagger from {cache_baseline_tagger}") + else: + baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) + print("Trained baseline tagger") + if gold_data: + print( + " Accuracy on test set: {:0.4f}".format( + baseline_tagger.accuracy(gold_data) + ) + ) + + # creating a Brill tagger + tbrill = time.time() + trainer = BrillTaggerTrainer( + baseline_tagger, templates, trace, ruleformat=ruleformat + ) + print("Training tbl tagger...") + brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) + print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") + if gold_data: + print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) + + # printing the learned rules, if learned silently + if trace == 1: + print("\nLearned rules: ") + for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): + print(f"{ruleno:4d} {rule.format(ruleformat):s}") + + # printing template statistics (optionally including comparison with the training data) + # note: if not separate_baseline_data, then baseline accuracy will be artificially high + if incremental_stats: + print( + "Incrementally tagging the test data, collecting individual rule statistics" + ) + (taggedtest, teststats) = brill_tagger.batch_tag_incremental( + testing_data, gold_data + ) + print(" Rule statistics collected") + if not separate_baseline_data: + print( + "WARNING: train_stats asked for separate_baseline_data=True; the baseline " + "will be artificially high" + ) + trainstats = brill_tagger.train_stats() + if template_stats: + brill_tagger.print_template_statistics(teststats) + if learning_curve_output: + _demo_plot( + learning_curve_output, teststats, trainstats, take=learning_curve_take + ) + print(f"Wrote plot of learning curve to {learning_curve_output}") + else: + print("Tagging the test data") + taggedtest = brill_tagger.tag_sents(testing_data) + if template_stats: + brill_tagger.print_template_statistics() + + # writing error analysis to file + if error_output is not None: + with open(error_output, "w") as f: + f.write("Errors for Brill Tagger %r\n\n" % serialize_output) + f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") + print(f"Wrote tagger errors including context to {error_output}") + + # serializing the tagger to a pickle file and reloading (just to see it works) + if serialize_output is not None: + taggedtest = brill_tagger.tag_sents(testing_data) + with open(serialize_output, "w") as print_rules: + pickle.dump(brill_tagger, print_rules) + print(f"Wrote pickled tagger to {serialize_output}") + with open(serialize_output) as print_rules: + brill_tagger_reloaded = pickle.load(print_rules) + print(f"Reloaded pickled tagger from {serialize_output}") + taggedtest_reloaded = brill_tagger.tag_sents(testing_data) + if taggedtest == taggedtest_reloaded: + print("Reloaded tagger tried on test set, results identical") + else: + print("PROBLEM: Reloaded tagger gave different results on test set") + + +def _demo_prepare_data( + tagged_data, train, num_sents, randomize, separate_baseline_data +): + # train is the proportion of data used in training; the rest is reserved + # for testing. + if tagged_data is None: + print("Loading tagged data from treebank... ") + tagged_data = treebank.tagged_sents() + if num_sents is None or len(tagged_data) <= num_sents: + num_sents = len(tagged_data) + if randomize: + random.seed(len(tagged_data)) + random.shuffle(tagged_data) + cutoff = int(num_sents * train) + training_data = tagged_data[:cutoff] + gold_data = tagged_data[cutoff:num_sents] + testing_data = [[t[0] for t in sent] for sent in gold_data] + if not separate_baseline_data: + baseline_data = training_data + else: + bl_cutoff = len(training_data) // 3 + (baseline_data, training_data) = ( + training_data[:bl_cutoff], + training_data[bl_cutoff:], + ) + (trainseqs, traintokens) = corpus_size(training_data) + (testseqs, testtokens) = corpus_size(testing_data) + (bltrainseqs, bltraintokens) = corpus_size(baseline_data) + print(f"Read testing data ({testseqs:d} sents/{testtokens:d} wds)") + print(f"Read training data ({trainseqs:d} sents/{traintokens:d} wds)") + print( + "Read baseline data ({:d} sents/{:d} wds) {:s}".format( + bltrainseqs, + bltraintokens, + "" if separate_baseline_data else "[reused the training set]", + ) + ) + return (training_data, baseline_data, gold_data, testing_data) + + +def _demo_plot(learning_curve_output, teststats, trainstats=None, take=None): + testcurve = [teststats["initialerrors"]] + for rulescore in teststats["rulescores"]: + testcurve.append(testcurve[-1] - rulescore) + testcurve = [1 - x / teststats["tokencount"] for x in testcurve[:take]] + + traincurve = [trainstats["initialerrors"]] + for rulescore in trainstats["rulescores"]: + traincurve.append(traincurve[-1] - rulescore) + traincurve = [1 - x / trainstats["tokencount"] for x in traincurve[:take]] + + import matplotlib.pyplot as plt + + r = list(range(len(testcurve))) + plt.plot(r, testcurve, r, traincurve) + plt.axis([None, None, None, 1.0]) + plt.savefig(learning_curve_output) + + +NN_CD_TAGGER = RegexpTagger([(r"^-?[0-9]+(\.[0-9]+)?$", "CD"), (r".*", "NN")]) + +REGEXP_TAGGER = RegexpTagger( + [ + (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers + (r"(The|the|A|a|An|an)$", "AT"), # articles + (r".*able$", "JJ"), # adjectives + (r".*ness$", "NN"), # nouns formed from adjectives + (r".*ly$", "RB"), # adverbs + (r".*s$", "NNS"), # plural nouns + (r".*ing$", "VBG"), # gerunds + (r".*ed$", "VBD"), # past tense verbs + (r".*", "NN"), # nouns (default) + ] +) + + +def corpus_size(seqs): + return (len(seqs), sum(len(x) for x in seqs)) + + +if __name__ == "__main__": + demo_learning_curve() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tbl/erroranalysis.py b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/erroranalysis.py new file mode 100644 index 0000000000000000000000000000000000000000..8b192e75d8b410942960cbf5ea1476a42f0decf7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/erroranalysis.py @@ -0,0 +1,38 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# returns a list of errors in string format + + +def error_list(train_sents, test_sents): + """ + Returns a list of human-readable strings indicating the errors in the + given tagging of the corpus. + + :param train_sents: The correct tagging of the corpus + :type train_sents: list(tuple) + :param test_sents: The tagged corpus + :type test_sents: list(tuple) + """ + hdr = ("%25s | %s | %s\n" + "-" * 26 + "+" + "-" * 24 + "+" + "-" * 26) % ( + "left context", + "word/test->gold".center(22), + "right context", + ) + errors = [hdr] + for (train_sent, test_sent) in zip(train_sents, test_sents): + for wordnum, (word, train_pos) in enumerate(train_sent): + test_pos = test_sent[wordnum][1] + if train_pos != test_pos: + left = " ".join("%s/%s" % w for w in train_sent[:wordnum]) + right = " ".join("%s/%s" % w for w in train_sent[wordnum + 1 :]) + mid = f"{word}/{test_pos}->{train_pos}" + errors.append(f"{left[-25:]:>25} | {mid.center(22)} | {right[:25]}") + + return errors diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tbl/feature.py b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/feature.py new file mode 100644 index 0000000000000000000000000000000000000000..568425918db4b4b7910ef0d216b03bd10411d287 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/feature.py @@ -0,0 +1,267 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from abc import ABCMeta, abstractmethod + + +class Feature(metaclass=ABCMeta): + """ + An abstract base class for Features. A Feature is a combination of + a specific property-computing method and a list of relative positions + to apply that method to. + + The property-computing method, M{extract_property(tokens, index)}, + must be implemented by every subclass. It extracts or computes a specific + property for the token at the current index. Typical extract_property() + methods return features such as the token text or tag; but more involved + methods may consider the entire sequence M{tokens} and + for instance compute the length of the sentence the token belongs to. + + In addition, the subclass may have a PROPERTY_NAME, which is how + it will be printed (in Rules and Templates, etc). If not given, defaults + to the classname. + + """ + + json_tag = "nltk.tbl.Feature" + PROPERTY_NAME = None + + def __init__(self, positions, end=None): + """ + Construct a Feature which may apply at C{positions}. + + >>> # For instance, importing some concrete subclasses (Feature is abstract) + >>> from nltk.tag.brill import Word, Pos + + >>> # Feature Word, applying at one of [-2, -1] + >>> Word([-2,-1]) + Word([-2, -1]) + + >>> # Positions need not be contiguous + >>> Word([-2,-1, 1]) + Word([-2, -1, 1]) + + >>> # Contiguous ranges can alternatively be specified giving the + >>> # two endpoints (inclusive) + >>> Pos(-3, -1) + Pos([-3, -2, -1]) + + >>> # In two-arg form, start <= end is enforced + >>> Pos(2, 1) + Traceback (most recent call last): + File "", line 1, in + File "nltk/tbl/template.py", line 306, in __init__ + raise TypeError + ValueError: illegal interval specification: (start=2, end=1) + + :type positions: list of int + :param positions: the positions at which this features should apply + :raises ValueError: illegal position specifications + + An alternative calling convention, for contiguous positions only, + is Feature(start, end): + + :type start: int + :param start: start of range where this feature should apply + :type end: int + :param end: end of range (NOTE: inclusive!) where this feature should apply + """ + self.positions = None # to avoid warnings + if end is None: + self.positions = tuple(sorted({int(i) for i in positions})) + else: # positions was actually not a list, but only the start index + try: + if positions > end: + raise TypeError + self.positions = tuple(range(positions, end + 1)) + except TypeError as e: + # let any kind of erroneous spec raise ValueError + raise ValueError( + "illegal interval specification: (start={}, end={})".format( + positions, end + ) + ) from e + + # set property name given in subclass, or otherwise name of subclass + self.PROPERTY_NAME = self.__class__.PROPERTY_NAME or self.__class__.__name__ + + def encode_json_obj(self): + return self.positions + + @classmethod + def decode_json_obj(cls, obj): + positions = obj + return cls(positions) + + def __repr__(self): + return f"{self.__class__.__name__}({list(self.positions)!r})" + + @classmethod + def expand(cls, starts, winlens, excludezero=False): + """ + Return a list of features, one for each start point in starts + and for each window length in winlen. If excludezero is True, + no Features containing 0 in its positions will be generated + (many tbl trainers have a special representation for the + target feature at [0]) + + For instance, importing a concrete subclass (Feature is abstract) + + >>> from nltk.tag.brill import Word + + First argument gives the possible start positions, second the + possible window lengths + + >>> Word.expand([-3,-2,-1], [1]) + [Word([-3]), Word([-2]), Word([-1])] + + >>> Word.expand([-2,-1], [1]) + [Word([-2]), Word([-1])] + + >>> Word.expand([-3,-2,-1], [1,2]) + [Word([-3]), Word([-2]), Word([-1]), Word([-3, -2]), Word([-2, -1])] + + >>> Word.expand([-2,-1], [1]) + [Word([-2]), Word([-1])] + + A third optional argument excludes all Features whose positions contain zero + + >>> Word.expand([-2,-1,0], [1,2], excludezero=False) + [Word([-2]), Word([-1]), Word([0]), Word([-2, -1]), Word([-1, 0])] + + >>> Word.expand([-2,-1,0], [1,2], excludezero=True) + [Word([-2]), Word([-1]), Word([-2, -1])] + + All window lengths must be positive + + >>> Word.expand([-2,-1], [0]) + Traceback (most recent call last): + File "", line 1, in + File "nltk/tag/tbl/template.py", line 371, in expand + :param starts: where to start looking for Feature + ValueError: non-positive window length in [0] + + :param starts: where to start looking for Feature + :type starts: list of ints + :param winlens: window lengths where to look for Feature + :type starts: list of ints + :param excludezero: do not output any Feature with 0 in any of its positions. + :type excludezero: bool + :returns: list of Features + :raises ValueError: for non-positive window lengths + """ + if not all(x > 0 for x in winlens): + raise ValueError(f"non-positive window length in {winlens}") + xs = (starts[i : i + w] for w in winlens for i in range(len(starts) - w + 1)) + return [cls(x) for x in xs if not (excludezero and 0 in x)] + + def issuperset(self, other): + """ + Return True if this Feature always returns True when other does + + More precisely, return True if this feature refers to the same property as other; + and this Feature looks at all positions that other does (and possibly + other positions in addition). + + #For instance, importing a concrete subclass (Feature is abstract) + >>> from nltk.tag.brill import Word, Pos + + >>> Word([-3,-2,-1]).issuperset(Word([-3,-2])) + True + + >>> Word([-3,-2,-1]).issuperset(Word([-3,-2, 0])) + False + + #Feature subclasses must agree + >>> Word([-3,-2,-1]).issuperset(Pos([-3,-2])) + False + + :param other: feature with which to compare + :type other: (subclass of) Feature + :return: True if this feature is superset, otherwise False + :rtype: bool + + + """ + return self.__class__ is other.__class__ and set(self.positions) >= set( + other.positions + ) + + def intersects(self, other): + """ + Return True if the positions of this Feature intersects with those of other + + More precisely, return True if this feature refers to the same property as other; + and there is some overlap in the positions they look at. + + #For instance, importing a concrete subclass (Feature is abstract) + >>> from nltk.tag.brill import Word, Pos + + >>> Word([-3,-2,-1]).intersects(Word([-3,-2])) + True + + >>> Word([-3,-2,-1]).intersects(Word([-3,-2, 0])) + True + + >>> Word([-3,-2,-1]).intersects(Word([0])) + False + + #Feature subclasses must agree + >>> Word([-3,-2,-1]).intersects(Pos([-3,-2])) + False + + :param other: feature with which to compare + :type other: (subclass of) Feature + :return: True if feature classes agree and there is some overlap in the positions they look at + :rtype: bool + """ + + return bool( + self.__class__ is other.__class__ + and set(self.positions) & set(other.positions) + ) + + # Rich comparisons for Features. With @functools.total_ordering (Python 2.7+), + # it will be enough to define __lt__ and __eq__ + def __eq__(self, other): + return self.__class__ is other.__class__ and self.positions == other.positions + + def __lt__(self, other): + return ( + self.__class__.__name__ < other.__class__.__name__ + or + # self.positions is a sorted tuple of ints + self.positions < other.positions + ) + + def __ne__(self, other): + return not (self == other) + + def __gt__(self, other): + return other < self + + def __ge__(self, other): + return not self < other + + def __le__(self, other): + return self < other or self == other + + @staticmethod + @abstractmethod + def extract_property(tokens, index): + """ + Any subclass of Feature must define static method extract_property(tokens, index) + + :param tokens: the sequence of tokens + :type tokens: list of tokens + :param index: the current index + :type index: int + :return: feature value + :rtype: any (but usually scalar) + """ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tbl/rule.py b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/rule.py new file mode 100644 index 0000000000000000000000000000000000000000..7faea23bd36ddbf974de4499bb1f9106a78e4c0e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/rule.py @@ -0,0 +1,322 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from abc import ABCMeta, abstractmethod + +from nltk import jsontags + + +###################################################################### +# Tag Rules +###################################################################### +class TagRule(metaclass=ABCMeta): + """ + An interface for tag transformations on a tagged corpus, as + performed by tbl taggers. Each transformation finds all tokens + in the corpus that are tagged with a specific original tag and + satisfy a specific condition, and replaces their tags with a + replacement tag. For any given transformation, the original + tag, replacement tag, and condition are fixed. Conditions may + depend on the token under consideration, as well as any other + tokens in the corpus. + + Tag rules must be comparable and hashable. + """ + + def __init__(self, original_tag, replacement_tag): + + self.original_tag = original_tag + """The tag which this TagRule may cause to be replaced.""" + + self.replacement_tag = replacement_tag + """The tag with which this TagRule may replace another tag.""" + + def apply(self, tokens, positions=None): + """ + Apply this rule at every position in positions where it + applies to the given sentence. I.e., for each position p + in *positions*, if *tokens[p]* is tagged with this rule's + original tag, and satisfies this rule's condition, then set + its tag to be this rule's replacement tag. + + :param tokens: The tagged sentence + :type tokens: list(tuple(str, str)) + :type positions: list(int) + :param positions: The positions where the transformation is to + be tried. If not specified, try it at all positions. + :return: The indices of tokens whose tags were changed by this + rule. + :rtype: int + """ + if positions is None: + positions = list(range(len(tokens))) + + # Determine the indices at which this rule applies. + change = [i for i in positions if self.applies(tokens, i)] + + # Make the changes. Note: this must be done in a separate + # step from finding applicable locations, since we don't want + # the rule to interact with itself. + for i in change: + tokens[i] = (tokens[i][0], self.replacement_tag) + + return change + + @abstractmethod + def applies(self, tokens, index): + """ + :return: True if the rule would change the tag of + ``tokens[index]``, False otherwise + :rtype: bool + :param tokens: A tagged sentence + :type tokens: list(str) + :param index: The index to check + :type index: int + """ + + # Rules must be comparable and hashable for the algorithm to work + def __eq__(self, other): + raise TypeError("Rules must implement __eq__()") + + def __ne__(self, other): + raise TypeError("Rules must implement __ne__()") + + def __hash__(self): + raise TypeError("Rules must implement __hash__()") + + +@jsontags.register_tag +class Rule(TagRule): + """ + A Rule checks the current corpus position for a certain set of conditions; + if they are all fulfilled, the Rule is triggered, meaning that it + will change tag A to tag B. For other tags than A, nothing happens. + + The conditions are parameters to the Rule instance. Each condition is a feature-value pair, + with a set of positions to check for the value of the corresponding feature. + Conceptually, the positions are joined by logical OR, and the feature set by logical AND. + + More formally, the Rule is then applicable to the M{n}th token iff: + + - The M{n}th token is tagged with the Rule's original tag; and + - For each (Feature(positions), M{value}) tuple: + + - The value of Feature of at least one token in {n+p for p in positions} + is M{value}. + """ + + json_tag = "nltk.tbl.Rule" + + def __init__(self, templateid, original_tag, replacement_tag, conditions): + """ + Construct a new Rule that changes a token's tag from + C{original_tag} to C{replacement_tag} if all of the properties + specified in C{conditions} hold. + + :param templateid: the template id (a zero-padded string, '001' etc, + so it will sort nicely) + :type templateid: string + + :param conditions: A list of Feature(positions), + each of which specifies that the property (computed by + Feature.extract_property()) of at least one + token in M{n} + p in positions is C{value}. + :type conditions: C{iterable} of C{Feature} + + """ + TagRule.__init__(self, original_tag, replacement_tag) + self._conditions = conditions + self.templateid = templateid + + def encode_json_obj(self): + return { + "templateid": self.templateid, + "original": self.original_tag, + "replacement": self.replacement_tag, + "conditions": self._conditions, + } + + @classmethod + def decode_json_obj(cls, obj): + return cls( + obj["templateid"], + obj["original"], + obj["replacement"], + tuple(tuple(feat) for feat in obj["conditions"]), + ) + + def applies(self, tokens, index): + # Inherit docs from TagRule + + # Does the given token have this Rule's "original tag"? + if tokens[index][1] != self.original_tag: + return False + + # Check to make sure that every condition holds. + for (feature, val) in self._conditions: + + # Look for *any* token that satisfies the condition. + for pos in feature.positions: + if not (0 <= index + pos < len(tokens)): + continue + if feature.extract_property(tokens, index + pos) == val: + break + else: + # No token satisfied the condition; return false. + return False + + # Every condition checked out, so the Rule is applicable. + return True + + def __eq__(self, other): + return self is other or ( + other is not None + and other.__class__ == self.__class__ + and self.original_tag == other.original_tag + and self.replacement_tag == other.replacement_tag + and self._conditions == other._conditions + ) + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + + # Cache our hash value (justified by profiling.) + try: + return self.__hash + except AttributeError: + self.__hash = hash(repr(self)) + return self.__hash + + def __repr__(self): + # Cache the repr (justified by profiling -- this is used as + # a sort key when deterministic=True.) + try: + return self.__repr + except AttributeError: + self.__repr = "{}('{}', {}, {}, [{}])".format( + self.__class__.__name__, + self.templateid, + repr(self.original_tag), + repr(self.replacement_tag), + # list(self._conditions) would be simpler but will not generate + # the same Rule.__repr__ in python 2 and 3 and thus break some tests + ", ".join(f"({f},{repr(v)})" for (f, v) in self._conditions), + ) + + return self.__repr + + def __str__(self): + def _condition_to_logic(feature, value): + """ + Return a compact, predicate-logic styled string representation + of the given condition. + """ + return "{}:{}@[{}]".format( + feature.PROPERTY_NAME, + value, + ",".join(str(w) for w in feature.positions), + ) + + conditions = " & ".join( + [_condition_to_logic(f, v) for (f, v) in self._conditions] + ) + s = f"{self.original_tag}->{self.replacement_tag} if {conditions}" + + return s + + def format(self, fmt): + """ + Return a string representation of this rule. + + >>> from nltk.tbl.rule import Rule + >>> from nltk.tag.brill import Pos + + >>> r = Rule("23", "VB", "NN", [(Pos([-2,-1]), 'DT')]) + + r.format("str") == str(r) + True + >>> r.format("str") + 'VB->NN if Pos:DT@[-2,-1]' + + r.format("repr") == repr(r) + True + >>> r.format("repr") + "Rule('23', 'VB', 'NN', [(Pos([-2, -1]),'DT')])" + + >>> r.format("verbose") + 'VB -> NN if the Pos of words i-2...i-1 is "DT"' + + >>> r.format("not_found") + Traceback (most recent call last): + File "", line 1, in + File "nltk/tbl/rule.py", line 256, in format + raise ValueError("unknown rule format spec: {0}".format(fmt)) + ValueError: unknown rule format spec: not_found + >>> + + :param fmt: format specification + :type fmt: str + :return: string representation + :rtype: str + """ + if fmt == "str": + return self.__str__() + elif fmt == "repr": + return self.__repr__() + elif fmt == "verbose": + return self._verbose_format() + else: + raise ValueError(f"unknown rule format spec: {fmt}") + + def _verbose_format(self): + """ + Return a wordy, human-readable string representation + of the given rule. + + Not sure how useful this is. + """ + + def condition_to_str(feature, value): + return 'the {} of {} is "{}"'.format( + feature.PROPERTY_NAME, + range_to_str(feature.positions), + value, + ) + + def range_to_str(positions): + if len(positions) == 1: + p = positions[0] + if p == 0: + return "this word" + if p == -1: + return "the preceding word" + elif p == 1: + return "the following word" + elif p < 0: + return "word i-%d" % -p + elif p > 0: + return "word i+%d" % p + else: + # for complete compatibility with the wordy format of nltk2 + mx = max(positions) + mn = min(positions) + if mx - mn == len(positions) - 1: + return "words i%+d...i%+d" % (mn, mx) + else: + return "words {{{}}}".format( + ",".join("i%+d" % d for d in positions) + ) + + replacement = f"{self.original_tag} -> {self.replacement_tag}" + conditions = (" if " if self._conditions else "") + ", and ".join( + condition_to_str(f, v) for (f, v) in self._conditions + ) + return replacement + conditions diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tbl/template.py b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/template.py new file mode 100644 index 0000000000000000000000000000000000000000..ac9ed5df52f5730bd767a04a121637a5c2be01d2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tbl/template.py @@ -0,0 +1,325 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +import itertools as it +from abc import ABCMeta, abstractmethod + +from nltk.tbl.feature import Feature +from nltk.tbl.rule import Rule + + +class BrillTemplateI(metaclass=ABCMeta): + """ + An interface for generating lists of transformational rules that + apply at given sentence positions. ``BrillTemplateI`` is used by + ``Brill`` training algorithms to generate candidate rules. + """ + + @abstractmethod + def applicable_rules(self, tokens, i, correctTag): + """ + Return a list of the transformational rules that would correct + the ``i``-th subtoken's tag in the given token. In particular, + return a list of zero or more rules that would change + ``tokens[i][1]`` to ``correctTag``, if applied to ``token[i]``. + + If the ``i``-th token already has the correct tag (i.e., if + ``tagged_tokens[i][1] == correctTag``), then + ``applicable_rules()`` should return the empty list. + + :param tokens: The tagged tokens being tagged. + :type tokens: list(tuple) + :param i: The index of the token whose tag should be corrected. + :type i: int + :param correctTag: The correct tag for the ``i``-th token. + :type correctTag: any + :rtype: list(BrillRule) + """ + + @abstractmethod + def get_neighborhood(self, token, index): + """ + Returns the set of indices *i* such that + ``applicable_rules(token, i, ...)`` depends on the value of + the *index*th token of *token*. + + This method is used by the "fast" Brill tagger trainer. + + :param token: The tokens being tagged. + :type token: list(tuple) + :param index: The index whose neighborhood should be returned. + :type index: int + :rtype: set + """ + + +class Template(BrillTemplateI): + """ + A tbl Template that generates a list of L{Rule}s that apply at a given sentence + position. In particular, each C{Template} is parameterized by a list of + independent features (a combination of a specific + property to extract and a list C{L} of relative positions at which to extract + it) and generates all Rules that: + + - use the given features, each at its own independent position; and + - are applicable to the given token. + """ + + ALLTEMPLATES = [] + # record a unique id of form "001", for each template created + # _ids = it.count(0) + + def __init__(self, *features): + + """ + Construct a Template for generating Rules. + + Takes a list of Features. A C{Feature} is a combination + of a specific property and its relative positions and should be + a subclass of L{nltk.tbl.feature.Feature}. + + An alternative calling convention (kept for backwards compatibility, + but less expressive as it only permits one feature type) is + Template(Feature, (start1, end1), (start2, end2), ...) + In new code, that would be better written + Template(Feature(start1, end1), Feature(start2, end2), ...) + + For instance, importing some features + + >>> from nltk.tbl.template import Template + >>> from nltk.tag.brill import Word, Pos + + Create some features + + >>> wfeat1, wfeat2, pfeat = (Word([-1]), Word([1,2]), Pos([-2,-1])) + + Create a single-feature template + + >>> Template(wfeat1) + Template(Word([-1])) + + Or a two-feature one + + >>> Template(wfeat1, wfeat2) + Template(Word([-1]),Word([1, 2])) + + Or a three-feature one with two different feature types + + >>> Template(wfeat1, wfeat2, pfeat) + Template(Word([-1]),Word([1, 2]),Pos([-2, -1])) + + deprecated api: Feature subclass, followed by list of (start,end) pairs + (permits only a single Feature) + + >>> Template(Word, (-2,-1), (0,0)) + Template(Word([-2, -1]),Word([0])) + + Incorrect specification raises TypeError + + >>> Template(Word, (-2,-1), Pos, (0,0)) + Traceback (most recent call last): + File "", line 1, in + File "nltk/tag/tbl/template.py", line 143, in __init__ + raise TypeError( + TypeError: expected either Feature1(args), Feature2(args), ... or Feature, (start1, end1), (start2, end2), ... + + :type features: list of Features + :param features: the features to build this Template on + """ + # determine the calling form: either + # Template(Feature, args1, [args2, ...)] + # Template(Feature1(args), Feature2(args), ...) + if all(isinstance(f, Feature) for f in features): + self._features = features + elif issubclass(features[0], Feature) and all( + isinstance(a, tuple) for a in features[1:] + ): + self._features = [features[0](*tp) for tp in features[1:]] + else: + raise TypeError( + "expected either Feature1(args), Feature2(args), ... or Feature, (start1, end1), (start2, end2), ..." + ) + self.id = f"{len(self.ALLTEMPLATES):03d}" + self.ALLTEMPLATES.append(self) + + def __repr__(self): + return "{}({})".format( + self.__class__.__name__, + ",".join([str(f) for f in self._features]), + ) + + def applicable_rules(self, tokens, index, correct_tag): + if tokens[index][1] == correct_tag: + return [] + + # For each of this Template's features, find the conditions + # that are applicable for the given token. + # Then, generate one Rule for each combination of features + # (the crossproduct of the conditions). + + applicable_conditions = self._applicable_conditions(tokens, index) + xs = list(it.product(*applicable_conditions)) + return [Rule(self.id, tokens[index][1], correct_tag, tuple(x)) for x in xs] + + def _applicable_conditions(self, tokens, index): + """ + :returns: A set of all conditions for rules + that are applicable to C{tokens[index]}. + """ + conditions = [] + + for feature in self._features: + conditions.append([]) + for pos in feature.positions: + if not (0 <= index + pos < len(tokens)): + continue + value = feature.extract_property(tokens, index + pos) + conditions[-1].append((feature, value)) + return conditions + + def get_neighborhood(self, tokens, index): + # inherit docs from BrillTemplateI + + # applicable_rules(tokens, index, ...) depends on index. + neighborhood = {index} # set literal for python 2.7+ + + # applicable_rules(tokens, i, ...) depends on index if + # i+start < index <= i+end. + + allpositions = [0] + [p for feat in self._features for p in feat.positions] + start, end = min(allpositions), max(allpositions) + s = max(0, index + (-end)) + e = min(index + (-start) + 1, len(tokens)) + for i in range(s, e): + neighborhood.add(i) + return neighborhood + + @classmethod + def expand(cls, featurelists, combinations=None, skipintersecting=True): + + """ + Factory method to mass generate Templates from a list L of lists of Features. + + #With combinations=(k1, k2), the function will in all possible ways choose k1 ... k2 + #of the sublists in L; it will output all Templates formed by the Cartesian product + #of this selection, with duplicates and other semantically equivalent + #forms removed. Default for combinations is (1, len(L)). + + The feature lists may have been specified + manually, or generated from Feature.expand(). For instance, + + >>> from nltk.tbl.template import Template + >>> from nltk.tag.brill import Word, Pos + + #creating some features + >>> (wd_0, wd_01) = (Word([0]), Word([0,1])) + + >>> (pos_m2, pos_m33) = (Pos([-2]), Pos([3-2,-1,0,1,2,3])) + + >>> list(Template.expand([[wd_0], [pos_m2]])) + [Template(Word([0])), Template(Pos([-2])), Template(Pos([-2]),Word([0]))] + + >>> list(Template.expand([[wd_0, wd_01], [pos_m2]])) + [Template(Word([0])), Template(Word([0, 1])), Template(Pos([-2])), Template(Pos([-2]),Word([0])), Template(Pos([-2]),Word([0, 1]))] + + #note: with Feature.expand(), it is very easy to generate more templates + #than your system can handle -- for instance, + >>> wordtpls = Word.expand([-2,-1,0,1], [1,2], excludezero=False) + >>> len(wordtpls) + 7 + + >>> postpls = Pos.expand([-3,-2,-1,0,1,2], [1,2,3], excludezero=True) + >>> len(postpls) + 9 + + #and now the Cartesian product of all non-empty combinations of two wordtpls and + #two postpls, with semantic equivalents removed + >>> templates = list(Template.expand([wordtpls, wordtpls, postpls, postpls])) + >>> len(templates) + 713 + + + will return a list of eight templates + Template(Word([0])), + Template(Word([0, 1])), + Template(Pos([-2])), + Template(Pos([-1])), + Template(Pos([-2]),Word([0])), + Template(Pos([-1]),Word([0])), + Template(Pos([-2]),Word([0, 1])), + Template(Pos([-1]),Word([0, 1]))] + + + #Templates where one feature is a subset of another, such as + #Template(Word([0,1]), Word([1]), will not appear in the output. + #By default, this non-subset constraint is tightened to disjointness: + #Templates of type Template(Word([0,1]), Word([1,2]) will also be filtered out. + #With skipintersecting=False, then such Templates are allowed + + WARNING: this method makes it very easy to fill all your memory when training + generated templates on any real-world corpus + + :param featurelists: lists of Features, whose Cartesian product will return a set of Templates + :type featurelists: list of (list of Features) + :param combinations: given n featurelists: if combinations=k, all generated Templates will have + k features; if combinations=(k1,k2) they will have k1..k2 features; if None, defaults to 1..n + :type combinations: None, int, or (int, int) + :param skipintersecting: if True, do not output intersecting Templates (non-disjoint positions for some feature) + :type skipintersecting: bool + :returns: generator of Templates + + """ + + def nonempty_powerset(xs): # xs is a list + # itertools docnonempty_powerset([1,2,3]) --> (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3) + + # find the correct tuple given combinations, one of {None, k, (k1,k2)} + k = combinations # for brevity + combrange = ( + (1, len(xs) + 1) + if k is None + else (k, k + 1) # n over 1 .. n over n (all non-empty combinations) + if isinstance(k, int) + else (k[0], k[1] + 1) # n over k (only + ) # n over k1, n over k1+1... n over k2 + return it.chain.from_iterable( + it.combinations(xs, r) for r in range(*combrange) + ) + + seentemplates = set() + for picks in nonempty_powerset(featurelists): + for pick in it.product(*picks): + if any( + i != j and x.issuperset(y) + for (i, x) in enumerate(pick) + for (j, y) in enumerate(pick) + ): + continue + if skipintersecting and any( + i != j and x.intersects(y) + for (i, x) in enumerate(pick) + for (j, y) in enumerate(pick) + ): + continue + thistemplate = cls(*sorted(pick)) + strpick = str(thistemplate) + #!!FIXME --this is hackish + if strpick in seentemplates: # already added + cls._poptemplate() + continue + seentemplates.add(strpick) + yield thistemplate + + @classmethod + def _cleartemplates(cls): + cls.ALLTEMPLATES = [] + + @classmethod + def _poptemplate(cls): + return cls.ALLTEMPLATES.pop() if cls.ALLTEMPLATES else None diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cd9cfa1dce5c6b608b0c096cc7511a600a1ddeb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a960982862d36aa44c8b417538c19fe053b5d4f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/casual.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/casual.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49fe1fb00dd92d519e6cf29af262eb5a96d8c2da Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/casual.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/destructive.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/destructive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed14cf2583a319d026763a02d3efbc593c0e3548 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/destructive.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/legality_principle.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/legality_principle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1966d268fc4266baa2e4be343fad1a43b918eeac Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/legality_principle.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/mwe.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/mwe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b9123718031cbbbe7604bf60375f54b721dcfae Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/mwe.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/nist.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/nist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3eda77ba7bb8e0e3385d9672bf081da8c8cc213 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/nist.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/punkt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/punkt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc965a92028a940a316e8f134ad174b42b84262a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/punkt.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/repp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/repp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33d6c321c7d1ae8caaf39d6a5c18f34e201c3e73 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/repp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sexpr.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sexpr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d3236b40a812fd34809ddb5329ec9a18e46dbd1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sexpr.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/simple.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/simple.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..823cdac7666f3c41471af0bbd84536d664fabb5b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/simple.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f15bc82de5c7b450fc4c6eeca1ba7f95e03d4b6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford_segmenter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford_segmenter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..758aceff0e9281476a4c0edfc42520429d1fa699 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford_segmenter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/texttiling.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/texttiling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..715e07d604ee216129e4ddba6174979bcfec5c2d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/texttiling.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/treebank.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/treebank.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d52efcf946eae1957e1a710d31f61f215953e32 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/treebank.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fca26422c2795436774c73018c9cc7c5fc4ebea6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/destructive.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/destructive.py new file mode 100644 index 0000000000000000000000000000000000000000..4beb395dde57bf73082dfa91f65ad625d199bc31 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/destructive.py @@ -0,0 +1,233 @@ +# Natural Language Toolkit: NLTK's very own tokenizer. +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Liling Tan +# Tom Aarsen <> (modifications) +# URL: +# For license information, see LICENSE.TXT + + +import re +import warnings +from typing import Iterator, List, Tuple + +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.util import align_tokens + + +class MacIntyreContractions: + """ + List of contractions adapted from Robert MacIntyre's tokenizer. + """ + + CONTRACTIONS2 = [ + r"(?i)\b(can)(?#X)(not)\b", + r"(?i)\b(d)(?#X)('ye)\b", + r"(?i)\b(gim)(?#X)(me)\b", + r"(?i)\b(gon)(?#X)(na)\b", + r"(?i)\b(got)(?#X)(ta)\b", + r"(?i)\b(lem)(?#X)(me)\b", + r"(?i)\b(more)(?#X)('n)\b", + r"(?i)\b(wan)(?#X)(na)(?=\s)", + ] + CONTRACTIONS3 = [r"(?i) ('t)(?#X)(is)\b", r"(?i) ('t)(?#X)(was)\b"] + CONTRACTIONS4 = [r"(?i)\b(whad)(dd)(ya)\b", r"(?i)\b(wha)(t)(cha)\b"] + + +class NLTKWordTokenizer(TokenizerI): + """ + The NLTK tokenizer that has improved upon the TreebankWordTokenizer. + + This is the method that is invoked by ``word_tokenize()``. It assumes that the + text has already been segmented into sentences, e.g. using ``sent_tokenize()``. + + The tokenizer is "destructive" such that the regexes applied will munge the + input string to a state beyond re-construction. It is possible to apply + `TreebankWordDetokenizer.detokenize` to the tokenized outputs of + `NLTKDestructiveWordTokenizer.tokenize` but there's no guarantees to + revert to the original string. + """ + + # Starting quotes. + STARTING_QUOTES = [ + (re.compile("([«“‘„]|[`]+)", re.U), r" \1 "), + (re.compile(r"^\""), r"``"), + (re.compile(r"(``)"), r" \1 "), + (re.compile(r"([ \(\[{<])(\"|\'{2})"), r"\1 `` "), + (re.compile(r"(?i)(\')(?!re|ve|ll|m|t|s|d|n)(\w)\b", re.U), r"\1 \2"), + ] + + # Ending quotes. + ENDING_QUOTES = [ + (re.compile("([»”’])", re.U), r" \1 "), + (re.compile(r"''"), " '' "), + (re.compile(r'"'), " '' "), + (re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "), + (re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "), + ] + + # For improvements for starting/closing quotes from TreebankWordTokenizer, + # see discussion on https://github.com/nltk/nltk/pull/1437 + # Adding to TreebankWordTokenizer, nltk.word_tokenize now splits on + # - chervon quotes u'\xab' and u'\xbb' . + # - unicode quotes u'\u2018', u'\u2019', u'\u201c' and u'\u201d' + # See https://github.com/nltk/nltk/issues/1995#issuecomment-376741608 + # Also, behavior of splitting on clitics now follows Stanford CoreNLP + # - clitics covered (?!re|ve|ll|m|t|s|d)(\w)\b + + # Punctuation. + PUNCTUATION = [ + (re.compile(r'([^\.])(\.)([\]\)}>"\'' "»”’ " r"]*)\s*$", re.U), r"\1 \2 \3 "), + (re.compile(r"([:,])([^\d])"), r" \1 \2"), + (re.compile(r"([:,])$"), r" \1 "), + ( + re.compile(r"\.{2,}", re.U), + r" \g<0> ", + ), # See https://github.com/nltk/nltk/pull/2322 + (re.compile(r"[;@#$%&]"), r" \g<0> "), + ( + re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'), + r"\1 \2\3 ", + ), # Handles the final period. + (re.compile(r"[?!]"), r" \g<0> "), + (re.compile(r"([^'])' "), r"\1 ' "), + ( + re.compile(r"[*]", re.U), + r" \g<0> ", + ), # See https://github.com/nltk/nltk/pull/2322 + ] + + # Pads parentheses + PARENS_BRACKETS = (re.compile(r"[\]\[\(\)\{\}\<\>]"), r" \g<0> ") + + # Optionally: Convert parentheses, brackets and converts them to PTB symbols. + CONVERT_PARENTHESES = [ + (re.compile(r"\("), "-LRB-"), + (re.compile(r"\)"), "-RRB-"), + (re.compile(r"\["), "-LSB-"), + (re.compile(r"\]"), "-RSB-"), + (re.compile(r"\{"), "-LCB-"), + (re.compile(r"\}"), "-RCB-"), + ] + + DOUBLE_DASHES = (re.compile(r"--"), r" -- ") + + # List of contractions adapted from Robert MacIntyre's tokenizer. + _contractions = MacIntyreContractions() + CONTRACTIONS2 = list(map(re.compile, _contractions.CONTRACTIONS2)) + CONTRACTIONS3 = list(map(re.compile, _contractions.CONTRACTIONS3)) + + def tokenize( + self, text: str, convert_parentheses: bool = False, return_str: bool = False + ) -> List[str]: + r"""Return a tokenized copy of `text`. + + >>> from nltk.tokenize import NLTKWordTokenizer + >>> s = '''Good muffins cost $3.88 (roughly 3,36 euros)\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> NLTKWordTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', '(', 'roughly', '3,36', + 'euros', ')', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', + 'of', 'them.', 'Thanks', '.'] + >>> NLTKWordTokenizer().tokenize(s, convert_parentheses=True) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', '-LRB-', 'roughly', '3,36', + 'euros', '-RRB-', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', + 'of', 'them.', 'Thanks', '.'] + + + :param text: A string with a sentence or sentences. + :type text: str + :param convert_parentheses: if True, replace parentheses to PTB symbols, + e.g. `(` to `-LRB-`. Defaults to False. + :type convert_parentheses: bool, optional + :param return_str: If True, return tokens as space-separated string, + defaults to False. + :type return_str: bool, optional + :return: List of tokens from `text`. + :rtype: List[str] + """ + if return_str: + warnings.warn( + "Parameter 'return_str' has been deprecated and should no " + "longer be used.", + category=DeprecationWarning, + stacklevel=2, + ) + + for regexp, substitution in self.STARTING_QUOTES: + text = regexp.sub(substitution, text) + + for regexp, substitution in self.PUNCTUATION: + text = regexp.sub(substitution, text) + + # Handles parentheses. + regexp, substitution = self.PARENS_BRACKETS + text = regexp.sub(substitution, text) + # Optionally convert parentheses + if convert_parentheses: + for regexp, substitution in self.CONVERT_PARENTHESES: + text = regexp.sub(substitution, text) + + # Handles double dash. + regexp, substitution = self.DOUBLE_DASHES + text = regexp.sub(substitution, text) + + # add extra space to make things easier + text = " " + text + " " + + for regexp, substitution in self.ENDING_QUOTES: + text = regexp.sub(substitution, text) + + for regexp in self.CONTRACTIONS2: + text = regexp.sub(r" \1 \2 ", text) + for regexp in self.CONTRACTIONS3: + text = regexp.sub(r" \1 \2 ", text) + + # We are not using CONTRACTIONS4 since + # they are also commented out in the SED scripts + # for regexp in self._contractions.CONTRACTIONS4: + # text = regexp.sub(r' \1 \2 \3 ', text) + + return text.split() + + def span_tokenize(self, text: str) -> Iterator[Tuple[int, int]]: + r""" + Returns the spans of the tokens in ``text``. + Uses the post-hoc nltk.tokens.align_tokens to return the offset spans. + + >>> from nltk.tokenize import NLTKWordTokenizer + >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).''' + >>> expected = [(0, 4), (5, 12), (13, 17), (18, 19), (19, 23), + ... (24, 26), (27, 30), (31, 32), (32, 36), (36, 37), (37, 38), + ... (40, 46), (47, 48), (48, 51), (51, 52), (53, 55), (56, 59), + ... (60, 62), (63, 68), (69, 70), (70, 76), (76, 77), (77, 78)] + >>> list(NLTKWordTokenizer().span_tokenize(s)) == expected + True + >>> expected = ['Good', 'muffins', 'cost', '$', '3.88', 'in', + ... 'New', '(', 'York', ')', '.', 'Please', '(', 'buy', ')', + ... 'me', 'two', 'of', 'them.', '(', 'Thanks', ')', '.'] + >>> [s[start:end] for start, end in NLTKWordTokenizer().span_tokenize(s)] == expected + True + + :param text: A string with a sentence or sentences. + :type text: str + :yield: Tuple[int, int] + """ + raw_tokens = self.tokenize(text) + + # Convert converted quotes back to original double quotes + # Do this only if original text contains double quote(s) or double + # single-quotes (because '' might be transformed to `` if it is + # treated as starting quotes). + if ('"' in text) or ("''" in text): + # Find double quotes and converted quotes + matched = [m.group() for m in re.finditer(r"``|'{2}|\"", text)] + + # Replace converted quotes back to double quotes + tokens = [ + matched.pop(0) if tok in ['"', "``", "''"] else tok + for tok in raw_tokens + ] + else: + tokens = raw_tokens + + yield from align_tokens(tokens, text) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/repp.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/repp.py new file mode 100644 index 0000000000000000000000000000000000000000..6e0740a94645f14ec6162814cdb3c92167f503bb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/repp.py @@ -0,0 +1,149 @@ +# Natural Language Toolkit: Interface to the Repp Tokenizer +# +# Copyright (C) 2001-2015 NLTK Project +# Authors: Rebecca Dridan and Stephan Oepen +# Contributors: Liling Tan +# +# URL: +# For license information, see LICENSE.TXT + +import os +import re +import subprocess +import sys +import tempfile + +from nltk.data import ZipFilePathPointer +from nltk.internals import find_dir +from nltk.tokenize.api import TokenizerI + + +class ReppTokenizer(TokenizerI): + """ + A class for word tokenization using the REPP parser described in + Rebecca Dridan and Stephan Oepen (2012) Tokenization: Returning to a + Long Solved Problem - A Survey, Contrastive Experiment, Recommendations, + and Toolkit. In ACL. http://anthology.aclweb.org/P/P12/P12-2.pdf#page=406 + + >>> sents = ['Tokenization is widely regarded as a solved problem due to the high accuracy that rulebased tokenizers achieve.' , + ... 'But rule-based tokenizers are hard to maintain and their rules language specific.' , + ... 'We evaluated our method on three languages and obtained error rates of 0.27% (English), 0.35% (Dutch) and 0.76% (Italian) for our best models.' + ... ] + >>> tokenizer = ReppTokenizer('/home/alvas/repp/') # doctest: +SKIP + >>> for sent in sents: # doctest: +SKIP + ... tokenizer.tokenize(sent) # doctest: +SKIP + ... + (u'Tokenization', u'is', u'widely', u'regarded', u'as', u'a', u'solved', u'problem', u'due', u'to', u'the', u'high', u'accuracy', u'that', u'rulebased', u'tokenizers', u'achieve', u'.') + (u'But', u'rule-based', u'tokenizers', u'are', u'hard', u'to', u'maintain', u'and', u'their', u'rules', u'language', u'specific', u'.') + (u'We', u'evaluated', u'our', u'method', u'on', u'three', u'languages', u'and', u'obtained', u'error', u'rates', u'of', u'0.27', u'%', u'(', u'English', u')', u',', u'0.35', u'%', u'(', u'Dutch', u')', u'and', u'0.76', u'%', u'(', u'Italian', u')', u'for', u'our', u'best', u'models', u'.') + + >>> for sent in tokenizer.tokenize_sents(sents): # doctest: +SKIP + ... print(sent) # doctest: +SKIP + ... + (u'Tokenization', u'is', u'widely', u'regarded', u'as', u'a', u'solved', u'problem', u'due', u'to', u'the', u'high', u'accuracy', u'that', u'rulebased', u'tokenizers', u'achieve', u'.') + (u'But', u'rule-based', u'tokenizers', u'are', u'hard', u'to', u'maintain', u'and', u'their', u'rules', u'language', u'specific', u'.') + (u'We', u'evaluated', u'our', u'method', u'on', u'three', u'languages', u'and', u'obtained', u'error', u'rates', u'of', u'0.27', u'%', u'(', u'English', u')', u',', u'0.35', u'%', u'(', u'Dutch', u')', u'and', u'0.76', u'%', u'(', u'Italian', u')', u'for', u'our', u'best', u'models', u'.') + >>> for sent in tokenizer.tokenize_sents(sents, keep_token_positions=True): # doctest: +SKIP + ... print(sent) # doctest: +SKIP + ... + [(u'Tokenization', 0, 12), (u'is', 13, 15), (u'widely', 16, 22), (u'regarded', 23, 31), (u'as', 32, 34), (u'a', 35, 36), (u'solved', 37, 43), (u'problem', 44, 51), (u'due', 52, 55), (u'to', 56, 58), (u'the', 59, 62), (u'high', 63, 67), (u'accuracy', 68, 76), (u'that', 77, 81), (u'rulebased', 82, 91), (u'tokenizers', 92, 102), (u'achieve', 103, 110), (u'.', 110, 111)] + [(u'But', 0, 3), (u'rule-based', 4, 14), (u'tokenizers', 15, 25), (u'are', 26, 29), (u'hard', 30, 34), (u'to', 35, 37), (u'maintain', 38, 46), (u'and', 47, 50), (u'their', 51, 56), (u'rules', 57, 62), (u'language', 63, 71), (u'specific', 72, 80), (u'.', 80, 81)] + [(u'We', 0, 2), (u'evaluated', 3, 12), (u'our', 13, 16), (u'method', 17, 23), (u'on', 24, 26), (u'three', 27, 32), (u'languages', 33, 42), (u'and', 43, 46), (u'obtained', 47, 55), (u'error', 56, 61), (u'rates', 62, 67), (u'of', 68, 70), (u'0.27', 71, 75), (u'%', 75, 76), (u'(', 77, 78), (u'English', 78, 85), (u')', 85, 86), (u',', 86, 87), (u'0.35', 88, 92), (u'%', 92, 93), (u'(', 94, 95), (u'Dutch', 95, 100), (u')', 100, 101), (u'and', 102, 105), (u'0.76', 106, 110), (u'%', 110, 111), (u'(', 112, 113), (u'Italian', 113, 120), (u')', 120, 121), (u'for', 122, 125), (u'our', 126, 129), (u'best', 130, 134), (u'models', 135, 141), (u'.', 141, 142)] + """ + + def __init__(self, repp_dir, encoding="utf8"): + self.repp_dir = self.find_repptokenizer(repp_dir) + # Set a directory to store the temporary files. + self.working_dir = tempfile.gettempdir() + # Set an encoding for the input strings. + self.encoding = encoding + + def tokenize(self, sentence): + """ + Use Repp to tokenize a single sentence. + + :param sentence: A single sentence string. + :type sentence: str + :return: A tuple of tokens. + :rtype: tuple(str) + """ + return next(self.tokenize_sents([sentence])) + + def tokenize_sents(self, sentences, keep_token_positions=False): + """ + Tokenize multiple sentences using Repp. + + :param sentences: A list of sentence strings. + :type sentences: list(str) + :return: A list of tuples of tokens + :rtype: iter(tuple(str)) + """ + with tempfile.NamedTemporaryFile( + prefix="repp_input.", dir=self.working_dir, mode="w", delete=False + ) as input_file: + # Write sentences to temporary input file. + for sent in sentences: + input_file.write(str(sent) + "\n") + input_file.close() + # Generate command to run REPP. + cmd = self.generate_repp_command(input_file.name) + # Decode the stdout and strips the ending newline. + repp_output = self._execute(cmd).decode(self.encoding).strip() + for tokenized_sent in self.parse_repp_outputs(repp_output): + if not keep_token_positions: + # Removes token position information. + tokenized_sent, starts, ends = zip(*tokenized_sent) + yield tokenized_sent + + def generate_repp_command(self, inputfilename): + """ + This module generates the REPP command to be used at the terminal. + + :param inputfilename: path to the input file + :type inputfilename: str + """ + cmd = [self.repp_dir + "/src/repp"] + cmd += ["-c", self.repp_dir + "/erg/repp.set"] + cmd += ["--format", "triple"] + cmd += [inputfilename] + return cmd + + @staticmethod + def _execute(cmd): + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + return stdout + + @staticmethod + def parse_repp_outputs(repp_output): + """ + This module parses the tri-tuple format that REPP outputs using the + "--format triple" option and returns an generator with tuple of string + tokens. + + :param repp_output: + :type repp_output: type + :return: an iterable of the tokenized sentences as tuples of strings + :rtype: iter(tuple) + """ + line_regex = re.compile(r"^\((\d+), (\d+), (.+)\)$", re.MULTILINE) + for section in repp_output.split("\n\n"): + words_with_positions = [ + (token, int(start), int(end)) + for start, end, token in line_regex.findall(section) + ] + words = tuple(t[2] for t in words_with_positions) + yield words_with_positions + + def find_repptokenizer(self, repp_dirname): + """ + A module to find REPP tokenizer binary and its *repp.set* config file. + """ + if os.path.exists(repp_dirname): # If a full path is given. + _repp_dir = repp_dirname + else: # Try to find path to REPP directory in environment variables. + _repp_dir = find_dir(repp_dirname, env_vars=("REPP_TOKENIZER",)) + # Checks for the REPP binary and erg/repp.set config file. + assert os.path.exists(_repp_dir + "/src/repp") + assert os.path.exists(_repp_dir + "/erg/repp.set") + return _repp_dir diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/simple.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/simple.py new file mode 100644 index 0000000000000000000000000000000000000000..f87b60a274c8121303ff60f203e1f3b991da1547 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/simple.py @@ -0,0 +1,137 @@ +# Natural Language Toolkit: Simple Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +r""" +Simple Tokenizers + +These tokenizers divide strings into substrings using the string +``split()`` method. +When tokenizing using a particular delimiter string, use +the string ``split()`` method directly, as this is more efficient. + +The simple tokenizers are *not* available as separate functions; +instead, you should just use the string ``split()`` method directly: + + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> s.split() # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.', + 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.'] + >>> s.split(' ') # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88\nin', 'New', 'York.', '', + 'Please', 'buy', 'me\ntwo', 'of', 'them.\n\nThanks.'] + >>> s.split('\n') # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88', 'in New York. Please buy me', + 'two of them.', '', 'Thanks.'] + +The simple tokenizers are mainly useful because they follow the +standard ``TokenizerI`` interface, and so can be used with any code +that expects a tokenizer. For example, these tokenizers can be used +to specify the tokenization conventions when building a `CorpusReader`. + +""" + +from nltk.tokenize.api import StringTokenizer, TokenizerI +from nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize + + +class SpaceTokenizer(StringTokenizer): + r"""Tokenize a string using the space character as a delimiter, + which is the same as ``s.split(' ')``. + + >>> from nltk.tokenize import SpaceTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> SpaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88\nin', 'New', 'York.', '', + 'Please', 'buy', 'me\ntwo', 'of', 'them.\n\nThanks.'] + """ + + _string = " " + + +class TabTokenizer(StringTokenizer): + r"""Tokenize a string use the tab character as a delimiter, + the same as ``s.split('\t')``. + + >>> from nltk.tokenize import TabTokenizer + >>> TabTokenizer().tokenize('a\tb c\n\t d') + ['a', 'b c\n', ' d'] + """ + + _string = "\t" + + +class CharTokenizer(StringTokenizer): + """Tokenize a string into individual characters. If this functionality + is ever required directly, use ``for char in string``. + """ + + def tokenize(self, s): + return list(s) + + def span_tokenize(self, s): + yield from enumerate(range(1, len(s) + 1)) + + +class LineTokenizer(TokenizerI): + r"""Tokenize a string into its lines, optionally discarding blank lines. + This is similar to ``s.split('\n')``. + + >>> from nltk.tokenize import LineTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> LineTokenizer(blanklines='keep').tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88', 'in New York. Please buy me', + 'two of them.', '', 'Thanks.'] + >>> # same as [l for l in s.split('\n') if l.strip()]: + >>> LineTokenizer(blanklines='discard').tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88', 'in New York. Please buy me', + 'two of them.', 'Thanks.'] + + :param blanklines: Indicates how blank lines should be handled. Valid values are: + + - ``discard``: strip blank lines out of the token list before returning it. + A line is considered blank if it contains only whitespace characters. + - ``keep``: leave all blank lines in the token list. + - ``discard-eof``: if the string ends with a newline, then do not generate + a corresponding token ``''`` after that newline. + """ + + def __init__(self, blanklines="discard"): + valid_blanklines = ("discard", "keep", "discard-eof") + if blanklines not in valid_blanklines: + raise ValueError( + "Blank lines must be one of: %s" % " ".join(valid_blanklines) + ) + + self._blanklines = blanklines + + def tokenize(self, s): + lines = s.splitlines() + # If requested, strip off blank lines. + if self._blanklines == "discard": + lines = [l for l in lines if l.rstrip()] + elif self._blanklines == "discard-eof": + if lines and not lines[-1].strip(): + lines.pop() + return lines + + # discard-eof not implemented + def span_tokenize(self, s): + if self._blanklines == "keep": + yield from string_span_tokenize(s, r"\n") + else: + yield from regexp_span_tokenize(s, r"\n(\s+\n)*") + + +###################################################################### +# { Tokenization Functions +###################################################################### +# XXX: it is stated in module docs that there is no function versions + + +def line_tokenize(text, blanklines="discard"): + return LineTokenizer(blanklines).tokenize(text) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/texttiling.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/texttiling.py new file mode 100644 index 0000000000000000000000000000000000000000..b5b770b2d08a998538d85803126e74cc13139d11 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/texttiling.py @@ -0,0 +1,475 @@ +# Natural Language Toolkit: TextTiling +# +# Copyright (C) 2001-2023 NLTK Project +# Author: George Boutsioukis +# +# URL: +# For license information, see LICENSE.TXT + +import math +import re + +try: + import numpy +except ImportError: + pass + +from nltk.tokenize.api import TokenizerI + +BLOCK_COMPARISON, VOCABULARY_INTRODUCTION = 0, 1 +LC, HC = 0, 1 +DEFAULT_SMOOTHING = [0] + + +class TextTilingTokenizer(TokenizerI): + """Tokenize a document into topical sections using the TextTiling algorithm. + This algorithm detects subtopic shifts based on the analysis of lexical + co-occurrence patterns. + + The process starts by tokenizing the text into pseudosentences of + a fixed size w. Then, depending on the method used, similarity + scores are assigned at sentence gaps. The algorithm proceeds by + detecting the peak differences between these scores and marking + them as boundaries. The boundaries are normalized to the closest + paragraph break and the segmented text is returned. + + :param w: Pseudosentence size + :type w: int + :param k: Size (in sentences) of the block used in the block comparison method + :type k: int + :param similarity_method: The method used for determining similarity scores: + `BLOCK_COMPARISON` (default) or `VOCABULARY_INTRODUCTION`. + :type similarity_method: constant + :param stopwords: A list of stopwords that are filtered out (defaults to NLTK's stopwords corpus) + :type stopwords: list(str) + :param smoothing_method: The method used for smoothing the score plot: + `DEFAULT_SMOOTHING` (default) + :type smoothing_method: constant + :param smoothing_width: The width of the window used by the smoothing method + :type smoothing_width: int + :param smoothing_rounds: The number of smoothing passes + :type smoothing_rounds: int + :param cutoff_policy: The policy used to determine the number of boundaries: + `HC` (default) or `LC` + :type cutoff_policy: constant + + >>> from nltk.corpus import brown + >>> tt = TextTilingTokenizer(demo_mode=True) + >>> text = brown.raw()[:4000] + >>> s, ss, d, b = tt.tokenize(text) + >>> b + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0] + """ + + def __init__( + self, + w=20, + k=10, + similarity_method=BLOCK_COMPARISON, + stopwords=None, + smoothing_method=DEFAULT_SMOOTHING, + smoothing_width=2, + smoothing_rounds=1, + cutoff_policy=HC, + demo_mode=False, + ): + + if stopwords is None: + from nltk.corpus import stopwords + + stopwords = stopwords.words("english") + self.__dict__.update(locals()) + del self.__dict__["self"] + + def tokenize(self, text): + """Return a tokenized copy of *text*, where each "token" represents + a separate topic.""" + + lowercase_text = text.lower() + paragraph_breaks = self._mark_paragraph_breaks(text) + text_length = len(lowercase_text) + + # Tokenization step starts here + + # Remove punctuation + nopunct_text = "".join( + c for c in lowercase_text if re.match(r"[a-z\-' \n\t]", c) + ) + nopunct_par_breaks = self._mark_paragraph_breaks(nopunct_text) + + tokseqs = self._divide_to_tokensequences(nopunct_text) + + # The morphological stemming step mentioned in the TextTile + # paper is not implemented. A comment in the original C + # implementation states that it offers no benefit to the + # process. It might be interesting to test the existing + # stemmers though. + # words = _stem_words(words) + + # Filter stopwords + for ts in tokseqs: + ts.wrdindex_list = [ + wi for wi in ts.wrdindex_list if wi[0] not in self.stopwords + ] + + token_table = self._create_token_table(tokseqs, nopunct_par_breaks) + # End of the Tokenization step + + # Lexical score determination + if self.similarity_method == BLOCK_COMPARISON: + gap_scores = self._block_comparison(tokseqs, token_table) + elif self.similarity_method == VOCABULARY_INTRODUCTION: + raise NotImplementedError("Vocabulary introduction not implemented") + else: + raise ValueError( + f"Similarity method {self.similarity_method} not recognized" + ) + + if self.smoothing_method == DEFAULT_SMOOTHING: + smooth_scores = self._smooth_scores(gap_scores) + else: + raise ValueError(f"Smoothing method {self.smoothing_method} not recognized") + # End of Lexical score Determination + + # Boundary identification + depth_scores = self._depth_scores(smooth_scores) + segment_boundaries = self._identify_boundaries(depth_scores) + + normalized_boundaries = self._normalize_boundaries( + text, segment_boundaries, paragraph_breaks + ) + # End of Boundary Identification + segmented_text = [] + prevb = 0 + + for b in normalized_boundaries: + if b == 0: + continue + segmented_text.append(text[prevb:b]) + prevb = b + + if prevb < text_length: # append any text that may be remaining + segmented_text.append(text[prevb:]) + + if not segmented_text: + segmented_text = [text] + + if self.demo_mode: + return gap_scores, smooth_scores, depth_scores, segment_boundaries + return segmented_text + + def _block_comparison(self, tokseqs, token_table): + """Implements the block comparison method""" + + def blk_frq(tok, block): + ts_occs = filter(lambda o: o[0] in block, token_table[tok].ts_occurences) + freq = sum(tsocc[1] for tsocc in ts_occs) + return freq + + gap_scores = [] + numgaps = len(tokseqs) - 1 + + for curr_gap in range(numgaps): + score_dividend, score_divisor_b1, score_divisor_b2 = 0.0, 0.0, 0.0 + score = 0.0 + # adjust window size for boundary conditions + if curr_gap < self.k - 1: + window_size = curr_gap + 1 + elif curr_gap > numgaps - self.k: + window_size = numgaps - curr_gap + else: + window_size = self.k + + b1 = [ts.index for ts in tokseqs[curr_gap - window_size + 1 : curr_gap + 1]] + b2 = [ts.index for ts in tokseqs[curr_gap + 1 : curr_gap + window_size + 1]] + + for t in token_table: + score_dividend += blk_frq(t, b1) * blk_frq(t, b2) + score_divisor_b1 += blk_frq(t, b1) ** 2 + score_divisor_b2 += blk_frq(t, b2) ** 2 + try: + score = score_dividend / math.sqrt(score_divisor_b1 * score_divisor_b2) + except ZeroDivisionError: + pass # score += 0.0 + + gap_scores.append(score) + + return gap_scores + + def _smooth_scores(self, gap_scores): + "Wraps the smooth function from the SciPy Cookbook" + return list( + smooth(numpy.array(gap_scores[:]), window_len=self.smoothing_width + 1) + ) + + def _mark_paragraph_breaks(self, text): + """Identifies indented text or line breaks as the beginning of + paragraphs""" + MIN_PARAGRAPH = 100 + pattern = re.compile("[ \t\r\f\v]*\n[ \t\r\f\v]*\n[ \t\r\f\v]*") + matches = pattern.finditer(text) + + last_break = 0 + pbreaks = [0] + for pb in matches: + if pb.start() - last_break < MIN_PARAGRAPH: + continue + else: + pbreaks.append(pb.start()) + last_break = pb.start() + + return pbreaks + + def _divide_to_tokensequences(self, text): + "Divides the text into pseudosentences of fixed size" + w = self.w + wrdindex_list = [] + matches = re.finditer(r"\w+", text) + for match in matches: + wrdindex_list.append((match.group(), match.start())) + return [ + TokenSequence(i / w, wrdindex_list[i : i + w]) + for i in range(0, len(wrdindex_list), w) + ] + + def _create_token_table(self, token_sequences, par_breaks): + "Creates a table of TokenTableFields" + token_table = {} + current_par = 0 + current_tok_seq = 0 + pb_iter = par_breaks.__iter__() + current_par_break = next(pb_iter) + if current_par_break == 0: + try: + current_par_break = next(pb_iter) # skip break at 0 + except StopIteration as e: + raise ValueError( + "No paragraph breaks were found(text too short perhaps?)" + ) from e + for ts in token_sequences: + for word, index in ts.wrdindex_list: + try: + while index > current_par_break: + current_par_break = next(pb_iter) + current_par += 1 + except StopIteration: + # hit bottom + pass + + if word in token_table: + token_table[word].total_count += 1 + + if token_table[word].last_par != current_par: + token_table[word].last_par = current_par + token_table[word].par_count += 1 + + if token_table[word].last_tok_seq != current_tok_seq: + token_table[word].last_tok_seq = current_tok_seq + token_table[word].ts_occurences.append([current_tok_seq, 1]) + else: + token_table[word].ts_occurences[-1][1] += 1 + else: # new word + token_table[word] = TokenTableField( + first_pos=index, + ts_occurences=[[current_tok_seq, 1]], + total_count=1, + par_count=1, + last_par=current_par, + last_tok_seq=current_tok_seq, + ) + + current_tok_seq += 1 + + return token_table + + def _identify_boundaries(self, depth_scores): + """Identifies boundaries at the peaks of similarity score + differences""" + + boundaries = [0 for x in depth_scores] + + avg = sum(depth_scores) / len(depth_scores) + stdev = numpy.std(depth_scores) + + if self.cutoff_policy == LC: + cutoff = avg - stdev + else: + cutoff = avg - stdev / 2.0 + + depth_tuples = sorted(zip(depth_scores, range(len(depth_scores)))) + depth_tuples.reverse() + hp = list(filter(lambda x: x[0] > cutoff, depth_tuples)) + + for dt in hp: + boundaries[dt[1]] = 1 + for dt2 in hp: # undo if there is a boundary close already + if ( + dt[1] != dt2[1] + and abs(dt2[1] - dt[1]) < 4 + and boundaries[dt2[1]] == 1 + ): + boundaries[dt[1]] = 0 + return boundaries + + def _depth_scores(self, scores): + """Calculates the depth of each gap, i.e. the average difference + between the left and right peaks and the gap's score""" + + depth_scores = [0 for x in scores] + # clip boundaries: this holds on the rule of thumb(my thumb) + # that a section shouldn't be smaller than at least 2 + # pseudosentences for small texts and around 5 for larger ones. + + clip = min(max(len(scores) // 10, 2), 5) + index = clip + + for gapscore in scores[clip:-clip]: + lpeak = gapscore + for score in scores[index::-1]: + if score >= lpeak: + lpeak = score + else: + break + rpeak = gapscore + for score in scores[index:]: + if score >= rpeak: + rpeak = score + else: + break + depth_scores[index] = lpeak + rpeak - 2 * gapscore + index += 1 + + return depth_scores + + def _normalize_boundaries(self, text, boundaries, paragraph_breaks): + """Normalize the boundaries identified to the original text's + paragraph breaks""" + + norm_boundaries = [] + char_count, word_count, gaps_seen = 0, 0, 0 + seen_word = False + + for char in text: + char_count += 1 + if char in " \t\n" and seen_word: + seen_word = False + word_count += 1 + if char not in " \t\n" and not seen_word: + seen_word = True + if gaps_seen < len(boundaries) and word_count > ( + max(gaps_seen * self.w, self.w) + ): + if boundaries[gaps_seen] == 1: + # find closest paragraph break + best_fit = len(text) + for br in paragraph_breaks: + if best_fit > abs(br - char_count): + best_fit = abs(br - char_count) + bestbr = br + else: + break + if bestbr not in norm_boundaries: # avoid duplicates + norm_boundaries.append(bestbr) + gaps_seen += 1 + + return norm_boundaries + + +class TokenTableField: + """A field in the token table holding parameters for each token, + used later in the process""" + + def __init__( + self, + first_pos, + ts_occurences, + total_count=1, + par_count=1, + last_par=0, + last_tok_seq=None, + ): + self.__dict__.update(locals()) + del self.__dict__["self"] + + +class TokenSequence: + "A token list with its original length and its index" + + def __init__(self, index, wrdindex_list, original_length=None): + original_length = original_length or len(wrdindex_list) + self.__dict__.update(locals()) + del self.__dict__["self"] + + +# Pasted from the SciPy cookbook: https://www.scipy.org/Cookbook/SignalSmooth +def smooth(x, window_len=11, window="flat"): + """smooth the data using a window with requested size. + + This method is based on the convolution of a scaled window with the signal. + The signal is prepared by introducing reflected copies of the signal + (with the window size) in both ends so that transient parts are minimized + in the beginning and end part of the output signal. + + :param x: the input signal + :param window_len: the dimension of the smoothing window; should be an odd integer + :param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' + flat window will produce a moving average smoothing. + + :return: the smoothed signal + + example:: + + t=linspace(-2,2,0.1) + x=sin(t)+randn(len(t))*0.1 + y=smooth(x) + + :see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve, + scipy.signal.lfilter + + TODO: the window parameter could be the window itself if an array instead of a string + """ + + if x.ndim != 1: + raise ValueError("smooth only accepts 1 dimension arrays.") + + if x.size < window_len: + raise ValueError("Input vector needs to be bigger than window size.") + + if window_len < 3: + return x + + if window not in ["flat", "hanning", "hamming", "bartlett", "blackman"]: + raise ValueError( + "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" + ) + + s = numpy.r_[2 * x[0] - x[window_len:1:-1], x, 2 * x[-1] - x[-1:-window_len:-1]] + + # print(len(s)) + if window == "flat": # moving average + w = numpy.ones(window_len, "d") + else: + w = eval("numpy." + window + "(window_len)") + + y = numpy.convolve(w / w.sum(), s, mode="same") + + return y[window_len - 1 : -window_len + 1] + + +def demo(text=None): + from matplotlib import pylab + + from nltk.corpus import brown + + tt = TextTilingTokenizer(demo_mode=True) + if text is None: + text = brown.raw()[:10000] + s, ss, d, b = tt.tokenize(text) + pylab.xlabel("Sentence Gap index") + pylab.ylabel("Gap Scores") + pylab.plot(range(len(s)), s, label="Gap Scores") + pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores") + pylab.plot(range(len(d)), d, label="Depth scores") + pylab.stem(range(len(b)), b) + pylab.legend() + pylab.show() diff --git a/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..443445cfb3aa4aaefb1d387f41e48be29e1cd778 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/RECORD @@ -0,0 +1,24 @@ +numexpr-2.10.0.dist-info/AUTHORS.txt,sha256=PaKTkzR-lu5K3JSzDqN4ATMT8baQKypX8177j7WsLkg,1053 +numexpr-2.10.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +numexpr-2.10.0.dist-info/LICENSE.txt,sha256=A_3YAlpObXDK8ncg6Rp5rY2f5CLr4YIPXOxh3OrnCf8,1193 +numexpr-2.10.0.dist-info/METADATA,sha256=flleeACi8UBaSkttO8ArKQQyf28Sb69tkca5m7nLLBk,7870 +numexpr-2.10.0.dist-info/RECORD,, +numexpr-2.10.0.dist-info/WHEEL,sha256=CzQQWV-lNyM92gr3iaBk8dvO35YDHRxgzkZ-dxumUIM,152 +numexpr-2.10.0.dist-info/top_level.txt,sha256=5R5OAhc7k6sUajqt4vp-368lWWhb23CoC6LltIMNqNA,8 +numexpr/__init__.py,sha256=BveiRb343utwo5WHQhDDXW-rzj8xempYWkpUKPP42-A,2279 +numexpr/__pycache__/__init__.cpython-310.pyc,, +numexpr/__pycache__/cpuinfo.cpython-310.pyc,, +numexpr/__pycache__/expressions.cpython-310.pyc,, +numexpr/__pycache__/necompiler.cpython-310.pyc,, +numexpr/__pycache__/utils.cpython-310.pyc,, +numexpr/__pycache__/version.cpython-310.pyc,, +numexpr/cpuinfo.py,sha256=AFBUER-10rJclvgH6nGgOj7q-HahAoj3_SxftaUnRkQ,25164 +numexpr/expressions.py,sha256=J9e_mlMRDfCQlridk48vwgnyFIhlCzA3voNLJpowheY,16138 +numexpr/interpreter.cpython-310-x86_64-linux-gnu.so,sha256=vLDGSB-1OaDSBamLitmGDzImo7gOw92BIAlwQVGZyrU,1150680 +numexpr/necompiler.py,sha256=OW2gsD94jzflN_NvIR0USKF7l2dgcHYwHlDuUWrgd_U,34845 +numexpr/tests/__init__.py,sha256=GBFxEA1-KjffdC-yum4Zj4u-f_zYVhSZIMPv-bOKoSQ,447 +numexpr/tests/__pycache__/__init__.cpython-310.pyc,, +numexpr/tests/__pycache__/test_numexpr.cpython-310.pyc,, +numexpr/tests/test_numexpr.py,sha256=zyMW-WxvWzdYhc6C0wF7GgvCHoiqOe3tFGj-Rwc_PpI,50968 +numexpr/utils.py,sha256=lly5fvICKOR0_2aHju4CcA1A_nOacrZKUrUr9V5NUKs,7725 +numexpr/version.py,sha256=lsi9_VC5i0byRpzLsw5tH4fFBnoobWU6AJgnEFV3v_w,119 diff --git a/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..9bb86cf30c63df9170e9af3dd246ce6f41270402 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_17_x86_64 +Tag: cp310-cp310-manylinux2014_x86_64 + diff --git a/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..ba1904bf1533907788338997eb320918b2578528 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/top_level.txt @@ -0,0 +1 @@ +numexpr