diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__init__.pyi b/env-llmeval/lib/python3.10/site-packages/attr/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..37a208732acf774ed369811d51f1393798f22148 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/__init__.pyi @@ -0,0 +1,555 @@ +import enum +import sys + +from typing import ( + Any, + Callable, + Dict, + Generic, + List, + Mapping, + Optional, + Protocol, + Sequence, + Tuple, + Type, + TypeVar, + Union, + overload, +) + +# `import X as X` is required to make these public +from . import converters as converters +from . import exceptions as exceptions +from . import filters as filters +from . import setters as setters +from . import validators as validators +from ._cmp import cmp_using as cmp_using +from ._typing_compat import AttrsInstance_ +from ._version_info import VersionInfo + +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + +if sys.version_info >= (3, 11): + from typing import dataclass_transform +else: + from typing_extensions import dataclass_transform + +__version__: str +__version_info__: VersionInfo +__title__: str +__description__: str +__url__: str +__uri__: str +__author__: str +__email__: str +__license__: str +__copyright__: str + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_EqOrderType = Union[bool, Callable[[Any], Any]] +_ValidatorType = Callable[[Any, "Attribute[_T]", _T], Any] +_ConverterType = Callable[[Any], Any] +_FilterType = Callable[["Attribute[_T]", _T], bool] +_ReprType = Callable[[Any], str] +_ReprArgType = Union[bool, _ReprType] +_OnSetAttrType = Callable[[Any, "Attribute[Any]", Any], Any] +_OnSetAttrArgType = Union[ + _OnSetAttrType, List[_OnSetAttrType], setters._NoOpType +] +_FieldTransformer = Callable[ + [type, List["Attribute[Any]"]], List["Attribute[Any]"] +] +# FIXME: in reality, if multiple validators are passed they must be in a list +# or tuple, but those are invariant and so would prevent subtypes of +# _ValidatorType from working when passed in a list or tuple. +_ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]] + +# We subclass this here to keep the protocol's qualified name clean. +class AttrsInstance(AttrsInstance_, Protocol): + pass + +_A = TypeVar("_A", bound=type[AttrsInstance]) + +class _Nothing(enum.Enum): + NOTHING = enum.auto() + +NOTHING = _Nothing.NOTHING + +# NOTE: Factory lies about its return type to make this possible: +# `x: List[int] # = Factory(list)` +# Work around mypy issue #4554 in the common case by using an overload. +if sys.version_info >= (3, 8): + from typing import Literal + @overload + def Factory(factory: Callable[[], _T]) -> _T: ... + @overload + def Factory( + factory: Callable[[Any], _T], + takes_self: Literal[True], + ) -> _T: ... + @overload + def Factory( + factory: Callable[[], _T], + takes_self: Literal[False], + ) -> _T: ... + +else: + @overload + def Factory(factory: Callable[[], _T]) -> _T: ... + @overload + def Factory( + factory: Union[Callable[[Any], _T], Callable[[], _T]], + takes_self: bool = ..., + ) -> _T: ... + +class Attribute(Generic[_T]): + name: str + default: Optional[_T] + validator: Optional[_ValidatorType[_T]] + repr: _ReprArgType + cmp: _EqOrderType + eq: _EqOrderType + order: _EqOrderType + hash: Optional[bool] + init: bool + converter: Optional[_ConverterType] + metadata: Dict[Any, Any] + type: Optional[Type[_T]] + kw_only: bool + on_setattr: _OnSetAttrType + alias: Optional[str] + + def evolve(self, **changes: Any) -> "Attribute[Any]": ... + +# NOTE: We had several choices for the annotation to use for type arg: +# 1) Type[_T] +# - Pros: Handles simple cases correctly +# - Cons: Might produce less informative errors in the case of conflicting +# TypeVars e.g. `attr.ib(default='bad', type=int)` +# 2) Callable[..., _T] +# - Pros: Better error messages than #1 for conflicting TypeVars +# - Cons: Terrible error messages for validator checks. +# e.g. attr.ib(type=int, validator=validate_str) +# -> error: Cannot infer function type argument +# 3) type (and do all of the work in the mypy plugin) +# - Pros: Simple here, and we could customize the plugin with our own errors. +# - Cons: Would need to write mypy plugin code to handle all the cases. +# We chose option #1. + +# `attr` lies about its return type to make the following possible: +# attr() -> Any +# attr(8) -> int +# attr(validator=) -> Whatever the callable expects. +# This makes this type of assignments possible: +# x: int = attr(8) +# +# This form catches explicit None or no default but with no other arguments +# returns Any. +@overload +def attrib( + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def attrib( + default: None = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: Optional[Type[_T]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def attrib( + default: _T, + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: Optional[Type[_T]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def attrib( + default: Optional[_T] = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: object = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., +) -> Any: ... +@overload +def field( + *, + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def field( + *, + default: None = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def field( + *, + default: _T, + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def field( + *, + default: Optional[_T] = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., +) -> Any: ... +@overload +@dataclass_transform(order_default=True, field_specifiers=(attrib, field)) +def attrs( + maybe_cls: _C, + these: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., + unsafe_hash: Optional[bool] = ..., +) -> _C: ... +@overload +@dataclass_transform(order_default=True, field_specifiers=(attrib, field)) +def attrs( + maybe_cls: None = ..., + these: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., + unsafe_hash: Optional[bool] = ..., +) -> Callable[[_C], _C]: ... +@overload +@dataclass_transform(field_specifiers=(attrib, field)) +def define( + maybe_cls: _C, + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + unsafe_hash: Optional[bool] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@dataclass_transform(field_specifiers=(attrib, field)) +def define( + maybe_cls: None = ..., + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + unsafe_hash: Optional[bool] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... + +mutable = define + +@overload +@dataclass_transform(frozen_default=True, field_specifiers=(attrib, field)) +def frozen( + maybe_cls: _C, + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + unsafe_hash: Optional[bool] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@dataclass_transform(frozen_default=True, field_specifiers=(attrib, field)) +def frozen( + maybe_cls: None = ..., + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + unsafe_hash: Optional[bool] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... +def fields(cls: Type[AttrsInstance]) -> Any: ... +def fields_dict(cls: Type[AttrsInstance]) -> Dict[str, Attribute[Any]]: ... +def validate(inst: AttrsInstance) -> None: ... +def resolve_types( + cls: _A, + globalns: Optional[Dict[str, Any]] = ..., + localns: Optional[Dict[str, Any]] = ..., + attribs: Optional[List[Attribute[Any]]] = ..., + include_extras: bool = ..., +) -> _A: ... + +# TODO: add support for returning a proper attrs class from the mypy plugin +# we use Any instead of _CountingAttr so that e.g. `make_class('Foo', +# [attr.ib()])` is valid +def make_class( + name: str, + attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]], + bases: Tuple[type, ...] = ..., + class_body: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + collect_by_mro: bool = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., +) -> type: ... + +# _funcs -- + +# TODO: add support for returning TypedDict from the mypy plugin +# FIXME: asdict/astuple do not honor their factory args. Waiting on one of +# these: +# https://github.com/python/mypy/issues/4236 +# https://github.com/python/typing/issues/253 +# XXX: remember to fix attrs.asdict/astuple too! +def asdict( + inst: AttrsInstance, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + dict_factory: Type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., + value_serializer: Optional[ + Callable[[type, Attribute[Any], Any], Any] + ] = ..., + tuple_keys: Optional[bool] = ..., +) -> Dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: AttrsInstance, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + tuple_factory: Type[Sequence[Any]] = ..., + retain_collection_types: bool = ..., +) -> Tuple[Any, ...]: ... +def has(cls: type) -> TypeGuard[Type[AttrsInstance]]: ... +def assoc(inst: _T, **changes: Any) -> _T: ... +def evolve(inst: _T, **changes: Any) -> _T: ... + +# _config -- + +def set_run_validators(run: bool) -> None: ... +def get_run_validators() -> bool: ... + +# aliases -- + +s = attributes = attrs +ib = attr = attrib +dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7959f87549c374d0dfcca03790188fb25b02c37 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_cmp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_cmp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbc6d813882267e8ec55ea4e7362ef1e698053d9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_cmp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_compat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dfc6f2534677b76732f7e4c68ae2f03385e2658 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_compat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a48baab2d91ed6eb9313412745692bfd5f2576a5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_funcs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_funcs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bd7eea5924960a3fa14946f7c44d082c86afe81 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_funcs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_make.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_make.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ab54372b37829c77b8451a2648aee4943814ea0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_make.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_next_gen.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_next_gen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b82346fabbe7b377d8eceec4e480449d179b8a00 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_next_gen.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_version_info.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_version_info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..302d0bfb9e10d8d0a062fee64b438c0c2a166fd7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/_version_info.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/converters.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/converters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6ac38cb579c87a0baa77f39d4a3d535e2b0764d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/converters.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/exceptions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5814948263320a7e1528874be1e4e04a0687c7e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/exceptions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/filters.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/filters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..289b5974fafe6f3aba374e6e29b4ada45bcd6695 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/filters.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/setters.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/setters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b67b6f3a0392c2199fcb44af6ec857a8e276767 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/setters.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/validators.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/validators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cba0a18a4f25588a4fb7f39d7fb534c71b8d1317 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/attr/__pycache__/validators.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/_cmp.pyi b/env-llmeval/lib/python3.10/site-packages/attr/_cmp.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f3dcdc1a754146303b28009cbff9ec9bf960e450 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/_cmp.pyi @@ -0,0 +1,13 @@ +from typing import Any, Callable, Optional, Type + +_CompareWithType = Callable[[Any, Any], bool] + +def cmp_using( + eq: Optional[_CompareWithType] = ..., + lt: Optional[_CompareWithType] = ..., + le: Optional[_CompareWithType] = ..., + gt: Optional[_CompareWithType] = ..., + ge: Optional[_CompareWithType] = ..., + require_same_type: bool = ..., + class_name: str = ..., +) -> Type: ... diff --git a/env-llmeval/lib/python3.10/site-packages/attr/_compat.py b/env-llmeval/lib/python3.10/site-packages/attr/_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..46b05ca453773da7f2972f023c4a4f447b44e824 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/_compat.py @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: MIT + +import inspect +import platform +import sys +import threading + +from collections.abc import Mapping, Sequence # noqa: F401 +from typing import _GenericAlias + + +PYPY = platform.python_implementation() == "PyPy" +PY_3_8_PLUS = sys.version_info[:2] >= (3, 8) +PY_3_9_PLUS = sys.version_info[:2] >= (3, 9) +PY310 = sys.version_info[:2] >= (3, 10) +PY_3_12_PLUS = sys.version_info[:2] >= (3, 12) + + +if sys.version_info < (3, 8): + try: + from typing_extensions import Protocol + except ImportError: # pragma: no cover + Protocol = object +else: + from typing import Protocol # noqa: F401 + + +class _AnnotationExtractor: + """ + Extract type annotations from a callable, returning None whenever there + is none. + """ + + __slots__ = ["sig"] + + def __init__(self, callable): + try: + self.sig = inspect.signature(callable) + except (ValueError, TypeError): # inspect failed + self.sig = None + + def get_first_param_type(self): + """ + Return the type annotation of the first argument if it's not empty. + """ + if not self.sig: + return None + + params = list(self.sig.parameters.values()) + if params and params[0].annotation is not inspect.Parameter.empty: + return params[0].annotation + + return None + + def get_return_type(self): + """ + Return the return type if it's not empty. + """ + if ( + self.sig + and self.sig.return_annotation is not inspect.Signature.empty + ): + return self.sig.return_annotation + + return None + + +# Thread-local global to track attrs instances which are already being repr'd. +# This is needed because there is no other (thread-safe) way to pass info +# about the instances that are already being repr'd through the call stack +# in order to ensure we don't perform infinite recursion. +# +# For instance, if an instance contains a dict which contains that instance, +# we need to know that we're already repr'ing the outside instance from within +# the dict's repr() call. +# +# This lives here rather than in _make.py so that the functions in _make.py +# don't have a direct reference to the thread-local in their globals dict. +# If they have such a reference, it breaks cloudpickle. +repr_context = threading.local() + + +def get_generic_base(cl): + """If this is a generic class (A[str]), return the generic base for it.""" + if cl.__class__ is _GenericAlias: + return cl.__origin__ + return None diff --git a/env-llmeval/lib/python3.10/site-packages/attr/_config.py b/env-llmeval/lib/python3.10/site-packages/attr/_config.py new file mode 100644 index 0000000000000000000000000000000000000000..9c245b1461abd5dc5143f69bc74c75ae50fabdc5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/_config.py @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: MIT + +__all__ = ["set_run_validators", "get_run_validators"] + +_run_validators = True + + +def set_run_validators(run): + """ + Set whether or not validators are run. By default, they are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()` + instead. + """ + if not isinstance(run, bool): + msg = "'run' must be bool." + raise TypeError(msg) + global _run_validators + _run_validators = run + + +def get_run_validators(): + """ + Return whether or not validators are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()` + instead. + """ + return _run_validators diff --git a/env-llmeval/lib/python3.10/site-packages/attr/_next_gen.py b/env-llmeval/lib/python3.10/site-packages/attr/_next_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..1fb9f259b53b851336c3135f06cfa377ab3240d7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/_next_gen.py @@ -0,0 +1,229 @@ +# SPDX-License-Identifier: MIT + +""" +These are keyword-only APIs that call `attr.s` and `attr.ib` with different +default values. +""" + + +from functools import partial + +from . import setters +from ._funcs import asdict as _asdict +from ._funcs import astuple as _astuple +from ._make import ( + NOTHING, + _frozen_setattrs, + _ng_default_on_setattr, + attrib, + attrs, +) +from .exceptions import UnannotatedAttributeError + + +def define( + maybe_cls=None, + *, + these=None, + repr=None, + unsafe_hash=None, + hash=None, + init=None, + slots=True, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=None, + kw_only=False, + cache_hash=False, + auto_exc=True, + eq=None, + order=False, + auto_detect=True, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, +): + r""" + Define an *attrs* class. + + Differences to the classic `attr.s` that it uses underneath: + + - Automatically detect whether or not *auto_attribs* should be `True` (c.f. + *auto_attribs* parameter). + - Converters and validators run when attributes are set by default -- if + *frozen* is `False`. + - *slots=True* + + .. caution:: + + Usually this has only upsides and few visible effects in everyday + programming. But it *can* lead to some surprising behaviors, so please + make sure to read :term:`slotted classes`. + - *auto_exc=True* + - *auto_detect=True* + - *order=False* + - Some options that were only relevant on Python 2 or were kept around for + backwards-compatibility have been removed. + + Please note that these are all defaults and you can change them as you + wish. + + :param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves + exactly like `attr.s`. If left `None`, `attr.s` will try to guess: + + 1. If any attributes are annotated and no unannotated `attrs.fields`\ s + are found, it assumes *auto_attribs=True*. + 2. Otherwise it assumes *auto_attribs=False* and tries to collect + `attrs.fields`\ s. + + For now, please refer to `attr.s` for the rest of the parameters. + + .. versionadded:: 20.1.0 + .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``. + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). + """ + + def do_it(cls, auto_attribs): + return attrs( + maybe_cls=cls, + these=these, + repr=repr, + hash=hash, + unsafe_hash=unsafe_hash, + init=init, + slots=slots, + frozen=frozen, + weakref_slot=weakref_slot, + str=str, + auto_attribs=auto_attribs, + kw_only=kw_only, + cache_hash=cache_hash, + auto_exc=auto_exc, + eq=eq, + order=order, + auto_detect=auto_detect, + collect_by_mro=True, + getstate_setstate=getstate_setstate, + on_setattr=on_setattr, + field_transformer=field_transformer, + match_args=match_args, + ) + + def wrap(cls): + """ + Making this a wrapper ensures this code runs during class creation. + + We also ensure that frozen-ness of classes is inherited. + """ + nonlocal frozen, on_setattr + + had_on_setattr = on_setattr not in (None, setters.NO_OP) + + # By default, mutable classes convert & validate on setattr. + if frozen is False and on_setattr is None: + on_setattr = _ng_default_on_setattr + + # However, if we subclass a frozen class, we inherit the immutability + # and disable on_setattr. + for base_cls in cls.__bases__: + if base_cls.__setattr__ is _frozen_setattrs: + if had_on_setattr: + msg = "Frozen classes can't use on_setattr (frozen-ness was inherited)." + raise ValueError(msg) + + on_setattr = setters.NO_OP + break + + if auto_attribs is not None: + return do_it(cls, auto_attribs) + + try: + return do_it(cls, True) + except UnannotatedAttributeError: + return do_it(cls, False) + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + + return wrap(maybe_cls) + + +mutable = define +frozen = partial(define, frozen=True, on_setattr=None) + + +def field( + *, + default=NOTHING, + validator=None, + repr=True, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, + alias=None, +): + """ + Identical to `attr.ib`, except keyword-only and with some arguments + removed. + + .. versionadded:: 23.1.0 + The *type* parameter has been re-added; mostly for `attrs.make_class`. + Please note that type checkers ignore this metadata. + .. versionadded:: 20.1.0 + """ + return attrib( + default=default, + validator=validator, + repr=repr, + hash=hash, + init=init, + metadata=metadata, + type=type, + converter=converter, + factory=factory, + kw_only=kw_only, + eq=eq, + order=order, + on_setattr=on_setattr, + alias=alias, + ) + + +def asdict(inst, *, recurse=True, filter=None, value_serializer=None): + """ + Same as `attr.asdict`, except that collections types are always retained + and dict is always used as *dict_factory*. + + .. versionadded:: 21.3.0 + """ + return _asdict( + inst=inst, + recurse=recurse, + filter=filter, + value_serializer=value_serializer, + retain_collection_types=True, + ) + + +def astuple(inst, *, recurse=True, filter=None): + """ + Same as `attr.astuple`, except that collections types are always retained + and `tuple` is always used as the *tuple_factory*. + + .. versionadded:: 21.3.0 + """ + return _astuple( + inst=inst, recurse=recurse, filter=filter, retain_collection_types=True + ) diff --git a/env-llmeval/lib/python3.10/site-packages/attr/_typing_compat.pyi b/env-llmeval/lib/python3.10/site-packages/attr/_typing_compat.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ca7b71e906a28f88726bbd342fdfe636af0281e7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/_typing_compat.pyi @@ -0,0 +1,15 @@ +from typing import Any, ClassVar, Protocol + +# MYPY is a special constant in mypy which works the same way as `TYPE_CHECKING`. +MYPY = False + +if MYPY: + # A protocol to be able to statically accept an attrs class. + class AttrsInstance_(Protocol): + __attrs_attrs__: ClassVar[Any] + +else: + # For type checkers without plug-in support use an empty protocol that + # will (hopefully) be combined into a union. + class AttrsInstance_(Protocol): + pass diff --git a/env-llmeval/lib/python3.10/site-packages/attr/_version_info.py b/env-llmeval/lib/python3.10/site-packages/attr/_version_info.py new file mode 100644 index 0000000000000000000000000000000000000000..51a1312f9759f21063caea779a62882d7f7c86ae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/_version_info.py @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: MIT + + +from functools import total_ordering + +from ._funcs import astuple +from ._make import attrib, attrs + + +@total_ordering +@attrs(eq=False, order=False, slots=True, frozen=True) +class VersionInfo: + """ + A version object that can be compared to tuple of length 1--4: + + >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2) + True + >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1) + True + >>> vi = attr.VersionInfo(19, 2, 0, "final") + >>> vi < (19, 1, 1) + False + >>> vi < (19,) + False + >>> vi == (19, 2,) + True + >>> vi == (19, 2, 1) + False + + .. versionadded:: 19.2 + """ + + year = attrib(type=int) + minor = attrib(type=int) + micro = attrib(type=int) + releaselevel = attrib(type=str) + + @classmethod + def _from_version_string(cls, s): + """ + Parse *s* and return a _VersionInfo. + """ + v = s.split(".") + if len(v) == 3: + v.append("final") + + return cls( + year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3] + ) + + def _ensure_tuple(self, other): + """ + Ensure *other* is a tuple of a valid length. + + Returns a possibly transformed *other* and ourselves as a tuple of + the same length as *other*. + """ + + if self.__class__ is other.__class__: + other = astuple(other) + + if not isinstance(other, tuple): + raise NotImplementedError + + if not (1 <= len(other) <= 4): + raise NotImplementedError + + return astuple(self)[: len(other)], other + + def __eq__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + return us == them + + def __lt__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't + # have to do anything special with releaselevel for now. + return us < them diff --git a/env-llmeval/lib/python3.10/site-packages/attr/converters.py b/env-llmeval/lib/python3.10/site-packages/attr/converters.py new file mode 100644 index 0000000000000000000000000000000000000000..2bf4c902a66faeeda4cbae89d75f063df99c5039 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/converters.py @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful converters. +""" + + +import typing + +from ._compat import _AnnotationExtractor +from ._make import NOTHING, Factory, pipe + + +__all__ = [ + "default_if_none", + "optional", + "pipe", + "to_bool", +] + + +def optional(converter): + """ + A converter that allows an attribute to be optional. An optional attribute + is one which can be set to ``None``. + + Type annotations will be inferred from the wrapped converter's, if it + has any. + + :param callable converter: the converter that is used for non-``None`` + values. + + .. versionadded:: 17.1.0 + """ + + def optional_converter(val): + if val is None: + return None + return converter(val) + + xtr = _AnnotationExtractor(converter) + + t = xtr.get_first_param_type() + if t: + optional_converter.__annotations__["val"] = typing.Optional[t] + + rt = xtr.get_return_type() + if rt: + optional_converter.__annotations__["return"] = typing.Optional[rt] + + return optional_converter + + +def default_if_none(default=NOTHING, factory=None): + """ + A converter that allows to replace ``None`` values by *default* or the + result of *factory*. + + :param default: Value to be used if ``None`` is passed. Passing an instance + of `attrs.Factory` is supported, however the ``takes_self`` option + is *not*. + :param callable factory: A callable that takes no parameters whose result + is used if ``None`` is passed. + + :raises TypeError: If **neither** *default* or *factory* is passed. + :raises TypeError: If **both** *default* and *factory* are passed. + :raises ValueError: If an instance of `attrs.Factory` is passed with + ``takes_self=True``. + + .. versionadded:: 18.2.0 + """ + if default is NOTHING and factory is None: + msg = "Must pass either `default` or `factory`." + raise TypeError(msg) + + if default is not NOTHING and factory is not None: + msg = "Must pass either `default` or `factory` but not both." + raise TypeError(msg) + + if factory is not None: + default = Factory(factory) + + if isinstance(default, Factory): + if default.takes_self: + msg = "`takes_self` is not supported by default_if_none." + raise ValueError(msg) + + def default_if_none_converter(val): + if val is not None: + return val + + return default.factory() + + else: + + def default_if_none_converter(val): + if val is not None: + return val + + return default + + return default_if_none_converter + + +def to_bool(val): + """ + Convert "boolean" strings (e.g., from env. vars.) to real booleans. + + Values mapping to :code:`True`: + + - :code:`True` + - :code:`"true"` / :code:`"t"` + - :code:`"yes"` / :code:`"y"` + - :code:`"on"` + - :code:`"1"` + - :code:`1` + + Values mapping to :code:`False`: + + - :code:`False` + - :code:`"false"` / :code:`"f"` + - :code:`"no"` / :code:`"n"` + - :code:`"off"` + - :code:`"0"` + - :code:`0` + + :raises ValueError: for any other value. + + .. versionadded:: 21.3.0 + """ + if isinstance(val, str): + val = val.lower() + truthy = {True, "true", "t", "yes", "y", "on", "1", 1} + falsy = {False, "false", "f", "no", "n", "off", "0", 0} + try: + if val in truthy: + return True + if val in falsy: + return False + except TypeError: + # Raised when "val" is not hashable (e.g., lists) + pass + msg = f"Cannot convert value to bool: {val}" + raise ValueError(msg) diff --git a/env-llmeval/lib/python3.10/site-packages/attr/filters.py b/env-llmeval/lib/python3.10/site-packages/attr/filters.py new file mode 100644 index 0000000000000000000000000000000000000000..a1e40c98db853aa375ab0b24559e0559f91e6152 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/filters.py @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful filters for `attr.asdict`. +""" + +from ._make import Attribute + + +def _split_what(what): + """ + Returns a tuple of `frozenset`s of classes and attributes. + """ + return ( + frozenset(cls for cls in what if isinstance(cls, type)), + frozenset(cls for cls in what if isinstance(cls, str)), + frozenset(cls for cls in what if isinstance(cls, Attribute)), + ) + + +def include(*what): + """ + Include *what*. + + :param what: What to include. + :type what: `list` of classes `type`, field names `str` or + `attrs.Attribute`\\ s + + :rtype: `callable` + + .. versionchanged:: 23.1.0 Accept strings with field names. + """ + cls, names, attrs = _split_what(what) + + def include_(attribute, value): + return ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) + + return include_ + + +def exclude(*what): + """ + Exclude *what*. + + :param what: What to exclude. + :type what: `list` of classes `type`, field names `str` or + `attrs.Attribute`\\ s. + + :rtype: `callable` + + .. versionchanged:: 23.3.0 Accept field name string as input argument + """ + cls, names, attrs = _split_what(what) + + def exclude_(attribute, value): + return not ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) + + return exclude_ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/setters.pyi b/env-llmeval/lib/python3.10/site-packages/attr/setters.pyi new file mode 100644 index 0000000000000000000000000000000000000000..72f7ce4761c343860d8b230dd50dcdeba10b03fb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/setters.pyi @@ -0,0 +1,19 @@ +from typing import Any, NewType, NoReturn, TypeVar + +from . import Attribute, _OnSetAttrType + +_T = TypeVar("_T") + +def frozen( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> NoReturn: ... +def pipe(*setters: _OnSetAttrType) -> _OnSetAttrType: ... +def validate(instance: Any, attribute: Attribute[_T], new_value: _T) -> _T: ... + +# convert is allowed to return Any, because they can be chained using pipe. +def convert( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> Any: ... + +_NoOpType = NewType("_NoOpType", object) +NO_OP: _NoOpType diff --git a/env-llmeval/lib/python3.10/site-packages/attr/validators.py b/env-llmeval/lib/python3.10/site-packages/attr/validators.py new file mode 100644 index 0000000000000000000000000000000000000000..34d6b761d37857e876a7d0fd1970a758f8f71981 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/validators.py @@ -0,0 +1,681 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful validators. +""" + + +import operator +import re + +from contextlib import contextmanager +from re import Pattern + +from ._config import get_run_validators, set_run_validators +from ._make import _AndValidator, and_, attrib, attrs +from .converters import default_if_none +from .exceptions import NotCallableError + + +__all__ = [ + "and_", + "deep_iterable", + "deep_mapping", + "disabled", + "ge", + "get_disabled", + "gt", + "in_", + "instance_of", + "is_callable", + "le", + "lt", + "matches_re", + "max_len", + "min_len", + "not_", + "optional", + "provides", + "set_disabled", +] + + +def set_disabled(disabled): + """ + Globally disable or enable running validators. + + By default, they are run. + + :param disabled: If ``True``, disable running all validators. + :type disabled: bool + + .. warning:: + + This function is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(not disabled) + + +def get_disabled(): + """ + Return a bool indicating whether validators are currently disabled or not. + + :return: ``True`` if validators are currently disabled. + :rtype: bool + + .. versionadded:: 21.3.0 + """ + return not get_run_validators() + + +@contextmanager +def disabled(): + """ + Context manager that disables running validators within its context. + + .. warning:: + + This context manager is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(False) + try: + yield + finally: + set_run_validators(True) + + +@attrs(repr=False, slots=True, hash=True) +class _InstanceOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not isinstance(value, self.type): + msg = "'{name}' must be {type!r} (got {value!r} that is a {actual!r}).".format( + name=attr.name, + type=self.type, + actual=value.__class__, + value=value, + ) + raise TypeError( + msg, + attr, + self.type, + value, + ) + + def __repr__(self): + return f"" + + +def instance_of(type): + """ + A validator that raises a `TypeError` if the initializer is called + with a wrong type for this particular attribute (checks are performed using + `isinstance` therefore it's also valid to pass a tuple of types). + + :param type: The type to check for. + :type type: type or tuple of type + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected type, and the value it + got. + """ + return _InstanceOfValidator(type) + + +@attrs(repr=False, frozen=True, slots=True) +class _MatchesReValidator: + pattern = attrib() + match_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.match_func(value): + msg = "'{name}' must match regex {pattern!r} ({value!r} doesn't)".format( + name=attr.name, pattern=self.pattern.pattern, value=value + ) + raise ValueError( + msg, + attr, + self.pattern, + value, + ) + + def __repr__(self): + return f"" + + +def matches_re(regex, flags=0, func=None): + r""" + A validator that raises `ValueError` if the initializer is called + with a string that doesn't match *regex*. + + :param regex: a regex string or precompiled pattern to match against + :param int flags: flags that will be passed to the underlying re function + (default 0) + :param callable func: which underlying `re` function to call. Valid options + are `re.fullmatch`, `re.search`, and `re.match`; the default ``None`` + means `re.fullmatch`. For performance reasons, the pattern is always + precompiled using `re.compile`. + + .. versionadded:: 19.2.0 + .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern. + """ + valid_funcs = (re.fullmatch, None, re.search, re.match) + if func not in valid_funcs: + msg = "'func' must be one of {}.".format( + ", ".join( + sorted(e and e.__name__ or "None" for e in set(valid_funcs)) + ) + ) + raise ValueError(msg) + + if isinstance(regex, Pattern): + if flags: + msg = "'flags' can only be used with a string pattern; pass flags to re.compile() instead" + raise TypeError(msg) + pattern = regex + else: + pattern = re.compile(regex, flags) + + if func is re.match: + match_func = pattern.match + elif func is re.search: + match_func = pattern.search + else: + match_func = pattern.fullmatch + + return _MatchesReValidator(pattern, match_func) + + +@attrs(repr=False, slots=True, hash=True) +class _ProvidesValidator: + interface = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.interface.providedBy(value): + msg = "'{name}' must provide {interface!r} which {value!r} doesn't.".format( + name=attr.name, interface=self.interface, value=value + ) + raise TypeError( + msg, + attr, + self.interface, + value, + ) + + def __repr__(self): + return f"" + + +def provides(interface): + """ + A validator that raises a `TypeError` if the initializer is called + with an object that does not provide the requested *interface* (checks are + performed using ``interface.providedBy(value)`` (see `zope.interface + `_). + + :param interface: The interface to check for. + :type interface: ``zope.interface.Interface`` + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected interface, and the + value it got. + + .. deprecated:: 23.1.0 + """ + import warnings + + warnings.warn( + "attrs's zope-interface support is deprecated and will be removed in, " + "or after, April 2024.", + DeprecationWarning, + stacklevel=2, + ) + return _ProvidesValidator(interface) + + +@attrs(repr=False, slots=True, hash=True) +class _OptionalValidator: + validator = attrib() + + def __call__(self, inst, attr, value): + if value is None: + return + + self.validator(inst, attr, value) + + def __repr__(self): + return f"" + + +def optional(validator): + """ + A validator that makes an attribute optional. An optional attribute is one + which can be set to ``None`` in addition to satisfying the requirements of + the sub-validator. + + :param Callable | tuple[Callable] | list[Callable] validator: A validator + (or validators) that is used for non-``None`` values. + + .. versionadded:: 15.1.0 + .. versionchanged:: 17.1.0 *validator* can be a list of validators. + .. versionchanged:: 23.1.0 *validator* can also be a tuple of validators. + """ + if isinstance(validator, (list, tuple)): + return _OptionalValidator(_AndValidator(validator)) + + return _OptionalValidator(validator) + + +@attrs(repr=False, slots=True, hash=True) +class _InValidator: + options = attrib() + + def __call__(self, inst, attr, value): + try: + in_options = value in self.options + except TypeError: # e.g. `1 in "abc"` + in_options = False + + if not in_options: + msg = f"'{attr.name}' must be in {self.options!r} (got {value!r})" + raise ValueError( + msg, + attr, + self.options, + value, + ) + + def __repr__(self): + return f"" + + +def in_(options): + """ + A validator that raises a `ValueError` if the initializer is called + with a value that does not belong in the options provided. The check is + performed using ``value in options``. + + :param options: Allowed options. + :type options: list, tuple, `enum.Enum`, ... + + :raises ValueError: With a human readable error message, the attribute (of + type `attrs.Attribute`), the expected options, and the value it + got. + + .. versionadded:: 17.1.0 + .. versionchanged:: 22.1.0 + The ValueError was incomplete until now and only contained the human + readable error message. Now it contains all the information that has + been promised since 17.1.0. + """ + return _InValidator(options) + + +@attrs(repr=False, slots=False, hash=True) +class _IsCallableValidator: + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not callable(value): + message = ( + "'{name}' must be callable " + "(got {value!r} that is a {actual!r})." + ) + raise NotCallableError( + msg=message.format( + name=attr.name, value=value, actual=value.__class__ + ), + value=value, + ) + + def __repr__(self): + return "" + + +def is_callable(): + """ + A validator that raises a `attrs.exceptions.NotCallableError` if the + initializer is called with a value for this particular attribute + that is not callable. + + .. versionadded:: 19.1.0 + + :raises attrs.exceptions.NotCallableError: With a human readable error + message containing the attribute (`attrs.Attribute`) name, + and the value it got. + """ + return _IsCallableValidator() + + +@attrs(repr=False, slots=True, hash=True) +class _DeepIterable: + member_validator = attrib(validator=is_callable()) + iterable_validator = attrib( + default=None, validator=optional(is_callable()) + ) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.iterable_validator is not None: + self.iterable_validator(inst, attr, value) + + for member in value: + self.member_validator(inst, attr, member) + + def __repr__(self): + iterable_identifier = ( + "" + if self.iterable_validator is None + else f" {self.iterable_validator!r}" + ) + return ( + f"" + ) + + +def deep_iterable(member_validator, iterable_validator=None): + """ + A validator that performs deep validation of an iterable. + + :param member_validator: Validator(s) to apply to iterable members + :param iterable_validator: Validator to apply to iterable itself + (optional) + + .. versionadded:: 19.1.0 + + :raises TypeError: if any sub-validators fail + """ + if isinstance(member_validator, (list, tuple)): + member_validator = and_(*member_validator) + return _DeepIterable(member_validator, iterable_validator) + + +@attrs(repr=False, slots=True, hash=True) +class _DeepMapping: + key_validator = attrib(validator=is_callable()) + value_validator = attrib(validator=is_callable()) + mapping_validator = attrib(default=None, validator=optional(is_callable())) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.mapping_validator is not None: + self.mapping_validator(inst, attr, value) + + for key in value: + self.key_validator(inst, attr, key) + self.value_validator(inst, attr, value[key]) + + def __repr__(self): + return ( + "" + ).format(key=self.key_validator, value=self.value_validator) + + +def deep_mapping(key_validator, value_validator, mapping_validator=None): + """ + A validator that performs deep validation of a dictionary. + + :param key_validator: Validator to apply to dictionary keys + :param value_validator: Validator to apply to dictionary values + :param mapping_validator: Validator to apply to top-level mapping + attribute (optional) + + .. versionadded:: 19.1.0 + + :raises TypeError: if any sub-validators fail + """ + return _DeepMapping(key_validator, value_validator, mapping_validator) + + +@attrs(repr=False, frozen=True, slots=True) +class _NumberValidator: + bound = attrib() + compare_op = attrib() + compare_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.compare_func(value, self.bound): + msg = f"'{attr.name}' must be {self.compare_op} {self.bound}: {value}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def lt(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number larger or equal to *val*. + + :param val: Exclusive upper bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<", operator.lt) + + +def le(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number greater than *val*. + + :param val: Inclusive upper bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<=", operator.le) + + +def ge(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number smaller than *val*. + + :param val: Inclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">=", operator.ge) + + +def gt(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number smaller or equal to *val*. + + :param val: Exclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">", operator.gt) + + +@attrs(repr=False, frozen=True, slots=True) +class _MaxLengthValidator: + max_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) > self.max_length: + msg = f"Length of '{attr.name}' must be <= {self.max_length}: {len(value)}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def max_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is longer than *length*. + + :param int length: Maximum length of the string or iterable + + .. versionadded:: 21.3.0 + """ + return _MaxLengthValidator(length) + + +@attrs(repr=False, frozen=True, slots=True) +class _MinLengthValidator: + min_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) < self.min_length: + msg = f"Length of '{attr.name}' must be >= {self.min_length}: {len(value)}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def min_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is shorter than *length*. + + :param int length: Minimum length of the string or iterable + + .. versionadded:: 22.1.0 + """ + return _MinLengthValidator(length) + + +@attrs(repr=False, slots=True, hash=True) +class _SubclassOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not issubclass(value, self.type): + msg = f"'{attr.name}' must be a subclass of {self.type!r} (got {value!r})." + raise TypeError( + msg, + attr, + self.type, + value, + ) + + def __repr__(self): + return f"" + + +def _subclass_of(type): + """ + A validator that raises a `TypeError` if the initializer is called + with a wrong type for this particular attribute (checks are performed using + `issubclass` therefore it's also valid to pass a tuple of types). + + :param type: The type to check for. + :type type: type or tuple of types + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected type, and the value it + got. + """ + return _SubclassOfValidator(type) + + +@attrs(repr=False, slots=True, hash=True) +class _NotValidator: + validator = attrib() + msg = attrib( + converter=default_if_none( + "not_ validator child '{validator!r}' " + "did not raise a captured error" + ) + ) + exc_types = attrib( + validator=deep_iterable( + member_validator=_subclass_of(Exception), + iterable_validator=instance_of(tuple), + ), + ) + + def __call__(self, inst, attr, value): + try: + self.validator(inst, attr, value) + except self.exc_types: + pass # suppress error to invert validity + else: + raise ValueError( + self.msg.format( + validator=self.validator, + exc_types=self.exc_types, + ), + attr, + self.validator, + value, + self.exc_types, + ) + + def __repr__(self): + return ( + "" + ).format( + what=self.validator, + exc_types=self.exc_types, + ) + + +def not_(validator, *, msg=None, exc_types=(ValueError, TypeError)): + """ + A validator that wraps and logically 'inverts' the validator passed to it. + It will raise a `ValueError` if the provided validator *doesn't* raise a + `ValueError` or `TypeError` (by default), and will suppress the exception + if the provided validator *does*. + + Intended to be used with existing validators to compose logic without + needing to create inverted variants, for example, ``not_(in_(...))``. + + :param validator: A validator to be logically inverted. + :param msg: Message to raise if validator fails. + Formatted with keys ``exc_types`` and ``validator``. + :type msg: str + :param exc_types: Exception type(s) to capture. + Other types raised by child validators will not be intercepted and + pass through. + + :raises ValueError: With a human readable error message, + the attribute (of type `attrs.Attribute`), + the validator that failed to raise an exception, + the value it got, + and the expected exception types. + + .. versionadded:: 22.2.0 + """ + try: + exc_types = tuple(exc_types) + except TypeError: + exc_types = (exc_types,) + return _NotValidator(validator, msg, exc_types) diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/_login.py b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/_login.py new file mode 100644 index 0000000000000000000000000000000000000000..0f4fa02535cb9b91ff3dec01c4878850317b3895 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/_login.py @@ -0,0 +1,396 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains methods to login to the Hub.""" + +import os +import subprocess +from functools import partial +from getpass import getpass +from pathlib import Path +from typing import Optional + +from . import constants +from .commands._cli_utils import ANSI +from .utils import ( + capture_output, + get_token, + is_google_colab, + is_notebook, + list_credential_helpers, + logging, + run_subprocess, + set_git_credential, + unset_git_credential, +) +from .utils._token import _get_token_from_environment, _get_token_from_google_colab + + +logger = logging.get_logger(__name__) + +_HF_LOGO_ASCII = """ + _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_| + _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _| + _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_| + _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _| + _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_| +""" + + +def login( + token: Optional[str] = None, + add_to_git_credential: bool = False, + new_session: bool = True, + write_permission: bool = False, +) -> None: + """Login the machine to access the Hub. + + The `token` is persisted in cache and set as a git credential. Once done, the machine + is logged in and the access token will be available across all `huggingface_hub` + components. If `token` is not provided, it will be prompted to the user either with + a widget (in a notebook) or via the terminal. + + To login from outside of a script, one can also use `huggingface-cli login` which is + a cli command that wraps [`login`]. + + + + [`login`] is a drop-in replacement method for [`notebook_login`] as it wraps and + extends its capabilities. + + + + + + When the token is not passed, [`login`] will automatically detect if the script runs + in a notebook or not. However, this detection might not be accurate due to the + variety of notebooks that exists nowadays. If that is the case, you can always force + the UI by using [`notebook_login`] or [`interpreter_login`]. + + + + Args: + token (`str`, *optional*): + User access token to generate from https://huggingface.co/settings/token. + add_to_git_credential (`bool`, defaults to `False`): + If `True`, token will be set as git credential. If no git credential helper + is configured, a warning will be displayed to the user. If `token` is `None`, + the value of `add_to_git_credential` is ignored and will be prompted again + to the end user. + new_session (`bool`, defaults to `True`): + If `True`, will request a token even if one is already saved on the machine. + write_permission (`bool`, defaults to `False`): + If `True`, requires a token with write permission. + Raises: + [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) + If an organization token is passed. Only personal account tokens are valid + to login. + [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) + If token is invalid. + [`ImportError`](https://docs.python.org/3/library/exceptions.html#ImportError) + If running in a notebook but `ipywidgets` is not installed. + """ + if token is not None: + if not add_to_git_credential: + print( + "Token has not been saved to git credential helper. Pass" + " `add_to_git_credential=True` if you want to set the git" + " credential as well." + ) + _login(token, add_to_git_credential=add_to_git_credential, write_permission=write_permission) + elif is_notebook(): + notebook_login(new_session=new_session, write_permission=write_permission) + else: + interpreter_login(new_session=new_session, write_permission=write_permission) + + +def logout() -> None: + """Logout the machine from the Hub. + + Token is deleted from the machine and removed from git credential. + """ + if get_token() is None: + print("Not logged in!") + return + + # Delete token from git credentials + unset_git_credential() + + # Delete token file + try: + Path(constants.HF_TOKEN_PATH).unlink() + except FileNotFoundError: + pass + + # Check if still logged in + if _get_token_from_google_colab() is not None: + raise EnvironmentError( + "You are automatically logged in using a Google Colab secret.\n" + "To log out, you must unset the `HF_TOKEN` secret in your Colab settings." + ) + if _get_token_from_environment() is not None: + raise EnvironmentError( + "Token has been deleted from your machine but you are still logged in.\n" + "To log out, you must clear out both `HF_TOKEN` and `HUGGING_FACE_HUB_TOKEN` environment variables." + ) + + print("Successfully logged out.") + + +### +# Interpreter-based login (text) +### + + +def interpreter_login(new_session: bool = True, write_permission: bool = False) -> None: + """ + Displays a prompt to login to the HF website and store the token. + + This is equivalent to [`login`] without passing a token when not run in a notebook. + [`interpreter_login`] is useful if you want to force the use of the terminal prompt + instead of a notebook widget. + + For more details, see [`login`]. + + Args: + new_session (`bool`, defaults to `True`): + If `True`, will request a token even if one is already saved on the machine. + write_permission (`bool`, defaults to `False`): + If `True`, requires a token with write permission. + + """ + if not new_session and _current_token_okay(write_permission=write_permission): + print("User is already logged in.") + return + + from .commands.delete_cache import _ask_for_confirmation_no_tui + + print(_HF_LOGO_ASCII) + if get_token() is not None: + print( + " A token is already saved on your machine. Run `huggingface-cli" + " whoami` to get more information or `huggingface-cli logout` if you want" + " to log out." + ) + print(" Setting a new token will erase the existing one.") + + print(" To login, `huggingface_hub` requires a token generated from https://huggingface.co/settings/tokens .") + if os.name == "nt": + print("Token can be pasted using 'Right-Click'.") + token = getpass("Enter your token (input will not be visible): ") + add_to_git_credential = _ask_for_confirmation_no_tui("Add token as git credential?") + + _login(token=token, add_to_git_credential=add_to_git_credential, write_permission=write_permission) + + +### +# Notebook-based login (widget) +### + +NOTEBOOK_LOGIN_PASSWORD_HTML = """

Immediately click login after typing your password or +it might be stored in plain text in this notebook file.
""" + + +NOTEBOOK_LOGIN_TOKEN_HTML_START = """

Copy a token from your Hugging Face +tokens page and paste it below.
Immediately click login after copying +your token or it might be stored in plain text in this notebook file.
""" + + +NOTEBOOK_LOGIN_TOKEN_HTML_END = """ +Pro Tip: If you don't already have one, you can create a dedicated +'notebooks' token with 'write' access, that you can then easily reuse for all +notebooks. """ + + +def notebook_login(new_session: bool = True, write_permission: bool = False) -> None: + """ + Displays a widget to login to the HF website and store the token. + + This is equivalent to [`login`] without passing a token when run in a notebook. + [`notebook_login`] is useful if you want to force the use of the notebook widget + instead of a prompt in the terminal. + + For more details, see [`login`]. + + Args: + new_session (`bool`, defaults to `True`): + If `True`, will request a token even if one is already saved on the machine. + write_permission (`bool`, defaults to `False`): + If `True`, requires a token with write permission. + """ + try: + import ipywidgets.widgets as widgets # type: ignore + from IPython.display import display # type: ignore + except ImportError: + raise ImportError( + "The `notebook_login` function can only be used in a notebook (Jupyter or" + " Colab) and you need the `ipywidgets` module: `pip install ipywidgets`." + ) + if not new_session and _current_token_okay(write_permission=write_permission): + print("User is already logged in.") + return + + box_layout = widgets.Layout(display="flex", flex_flow="column", align_items="center", width="50%") + + token_widget = widgets.Password(description="Token:") + git_checkbox_widget = widgets.Checkbox(value=True, description="Add token as git credential?") + token_finish_button = widgets.Button(description="Login") + + login_token_widget = widgets.VBox( + [ + widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_START), + token_widget, + git_checkbox_widget, + token_finish_button, + widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_END), + ], + layout=box_layout, + ) + display(login_token_widget) + + # On click events + def login_token_event(t, write_permission: bool = False): + """ + Event handler for the login button. + + Args: + write_permission (`bool`, defaults to `False`): + If `True`, requires a token with write permission. + """ + token = token_widget.value + add_to_git_credential = git_checkbox_widget.value + # Erase token and clear value to make sure it's not saved in the notebook. + token_widget.value = "" + # Hide inputs + login_token_widget.children = [widgets.Label("Connecting...")] + try: + with capture_output() as captured: + _login(token, add_to_git_credential=add_to_git_credential, write_permission=write_permission) + message = captured.getvalue() + except Exception as error: + message = str(error) + # Print result (success message or error) + login_token_widget.children = [widgets.Label(line) for line in message.split("\n") if line.strip()] + + token_finish_button.on_click(partial(login_token_event, write_permission=write_permission)) + + +### +# Login private helpers +### + + +def _login(token: str, add_to_git_credential: bool, write_permission: bool = False) -> None: + from .hf_api import get_token_permission # avoid circular import + + if token.startswith("api_org"): + raise ValueError("You must use your personal account token, not an organization token.") + + permission = get_token_permission(token) + if permission is None: + raise ValueError("Invalid token passed!") + elif write_permission and permission != "write": + raise ValueError( + "Token is valid but is 'read-only' and a 'write' token is required.\nPlease provide a new token with" + " correct permission." + ) + print(f"Token is valid (permission: {permission}).") + + if add_to_git_credential: + if _is_git_credential_helper_configured(): + set_git_credential(token) + print( + "Your token has been saved in your configured git credential helpers" + + f" ({','.join(list_credential_helpers())})." + ) + else: + print("Token has not been saved to git credential helper.") + + # Save token + path = Path(constants.HF_TOKEN_PATH) + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(token) + print(f"Your token has been saved to {constants.HF_TOKEN_PATH}") + print("Login successful") + + +def _current_token_okay(write_permission: bool = False): + """Check if the current token is valid. + + Args: + write_permission (`bool`, defaults to `False`): + If `True`, requires a token with write permission. + + Returns: + `bool`: `True` if the current token is valid, `False` otherwise. + """ + from .hf_api import get_token_permission # avoid circular import + + permission = get_token_permission() + if permission is None or (write_permission and permission != "write"): + return False + return True + + +def _is_git_credential_helper_configured() -> bool: + """Check if a git credential helper is configured. + + Warns user if not the case (except for Google Colab where "store" is set by default + by `huggingface_hub`). + """ + helpers = list_credential_helpers() + if len(helpers) > 0: + return True # Do not warn: at least 1 helper is set + + # Only in Google Colab to avoid the warning message + # See https://github.com/huggingface/huggingface_hub/issues/1043#issuecomment-1247010710 + if is_google_colab(): + _set_store_as_git_credential_helper_globally() + return True # Do not warn: "store" is used by default in Google Colab + + # Otherwise, warn user + print( + ANSI.red( + "Cannot authenticate through git-credential as no helper is defined on your" + " machine.\nYou might have to re-authenticate when pushing to the Hugging" + " Face Hub.\nRun the following command in your terminal in case you want to" + " set the 'store' credential helper as default.\n\ngit config --global" + " credential.helper store\n\nRead" + " https://git-scm.com/book/en/v2/Git-Tools-Credential-Storage for more" + " details." + ) + ) + return False + + +def _set_store_as_git_credential_helper_globally() -> None: + """Set globally the credential.helper to `store`. + + To be used only in Google Colab as we assume the user doesn't care about the git + credential config. It is the only particular case where we don't want to display the + warning message in [`notebook_login()`]. + + Related: + - https://github.com/huggingface/huggingface_hub/issues/1043 + - https://github.com/huggingface/huggingface_hub/issues/1051 + - https://git-scm.com/docs/git-credential-store + """ + try: + run_subprocess("git config --global credential.helper store") + except subprocess.CalledProcessError as exc: + raise EnvironmentError(exc.stderr) diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/_tensorboard_logger.py b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/_tensorboard_logger.py new file mode 100644 index 0000000000000000000000000000000000000000..09f6aa5dbdb48049baaec12751318a0a8a87e7ae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/_tensorboard_logger.py @@ -0,0 +1,169 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains a logger to push training logs to the Hub, using Tensorboard.""" + +from pathlib import Path +from typing import TYPE_CHECKING, List, Optional, Union + +from huggingface_hub._commit_scheduler import CommitScheduler + +from .utils import experimental, is_tensorboard_available + + +if is_tensorboard_available(): + from tensorboardX import SummaryWriter + + # TODO: clarify: should we import from torch.utils.tensorboard ? + +else: + SummaryWriter = object # Dummy class to avoid failing at import. Will raise on instance creation. + +if TYPE_CHECKING: + from tensorboardX import SummaryWriter + + +class HFSummaryWriter(SummaryWriter): + """ + Wrapper around the tensorboard's `SummaryWriter` to push training logs to the Hub. + + Data is logged locally and then pushed to the Hub asynchronously. Pushing data to the Hub is done in a separate + thread to avoid blocking the training script. In particular, if the upload fails for any reason (e.g. a connection + issue), the main script will not be interrupted. Data is automatically pushed to the Hub every `commit_every` + minutes (default to every 5 minutes). + + + + `HFSummaryWriter` is experimental. Its API is subject to change in the future without prior notice. + + + + Args: + repo_id (`str`): + The id of the repo to which the logs will be pushed. + logdir (`str`, *optional*): + The directory where the logs will be written. If not specified, a local directory will be created by the + underlying `SummaryWriter` object. + commit_every (`int` or `float`, *optional*): + The frequency (in minutes) at which the logs will be pushed to the Hub. Defaults to 5 minutes. + squash_history (`bool`, *optional*): + Whether to squash the history of the repo after each commit. Defaults to `False`. Squashing commits is + useful to avoid degraded performances on the repo when it grows too large. + repo_type (`str`, *optional*): + The type of the repo to which the logs will be pushed. Defaults to "model". + repo_revision (`str`, *optional*): + The revision of the repo to which the logs will be pushed. Defaults to "main". + repo_private (`bool`, *optional*): + Whether to create a private repo or not. Defaults to False. This argument is ignored if the repo already + exists. + path_in_repo (`str`, *optional*): + The path to the folder in the repo where the logs will be pushed. Defaults to "tensorboard/". + repo_allow_patterns (`List[str]` or `str`, *optional*): + A list of patterns to include in the upload. Defaults to `"*.tfevents.*"`. Check out the + [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder) for more details. + repo_ignore_patterns (`List[str]` or `str`, *optional*): + A list of patterns to exclude in the upload. Check out the + [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder) for more details. + token (`str`, *optional*): + Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more + details + kwargs: + Additional keyword arguments passed to `SummaryWriter`. + + Examples: + ```py + >>> from huggingface_hub import HFSummaryWriter + + # Logs are automatically pushed every 15 minutes + >>> logger = HFSummaryWriter(repo_id="test_hf_logger", commit_every=15) + >>> logger.add_scalar("a", 1) + >>> logger.add_scalar("b", 2) + ... + + # You can also trigger a push manually + >>> logger.scheduler.trigger() + ``` + + ```py + >>> from huggingface_hub import HFSummaryWriter + + # Logs are automatically pushed every 5 minutes (default) + when exiting the context manager + >>> with HFSummaryWriter(repo_id="test_hf_logger") as logger: + ... logger.add_scalar("a", 1) + ... logger.add_scalar("b", 2) + ``` + """ + + @experimental + def __new__(cls, *args, **kwargs) -> "HFSummaryWriter": + if not is_tensorboard_available(): + raise ImportError( + "You must have `tensorboard` installed to use `HFSummaryWriter`. Please run `pip install --upgrade" + " tensorboardX` first." + ) + return super().__new__(cls) + + def __init__( + self, + repo_id: str, + *, + logdir: Optional[str] = None, + commit_every: Union[int, float] = 5, + squash_history: bool = False, + repo_type: Optional[str] = None, + repo_revision: Optional[str] = None, + repo_private: bool = False, + path_in_repo: Optional[str] = "tensorboard", + repo_allow_patterns: Optional[Union[List[str], str]] = "*.tfevents.*", + repo_ignore_patterns: Optional[Union[List[str], str]] = None, + token: Optional[str] = None, + **kwargs, + ): + # Initialize SummaryWriter + super().__init__(logdir=logdir, **kwargs) + + # Check logdir has been correctly initialized and fail early otherwise. In practice, SummaryWriter takes care of it. + if not isinstance(self.logdir, str): + raise ValueError(f"`self.logdir` must be a string. Got '{self.logdir}' of type {type(self.logdir)}.") + + # Append logdir name to `path_in_repo` + if path_in_repo is None or path_in_repo == "": + path_in_repo = Path(self.logdir).name + else: + path_in_repo = path_in_repo.strip("/") + "/" + Path(self.logdir).name + + # Initialize scheduler + self.scheduler = CommitScheduler( + folder_path=self.logdir, + path_in_repo=path_in_repo, + repo_id=repo_id, + repo_type=repo_type, + revision=repo_revision, + private=repo_private, + token=token, + allow_patterns=repo_allow_patterns, + ignore_patterns=repo_ignore_patterns, + every=commit_every, + squash_history=squash_history, + ) + + # Exposing some high-level info at root level + self.repo_id = self.scheduler.repo_id + self.repo_type = self.scheduler.repo_type + self.repo_revision = self.scheduler.revision + + def __exit__(self, exc_type, exc_val, exc_tb): + """Push to hub in a non-blocking way when exiting the logger's context manager.""" + super().__exit__(exc_type, exc_val, exc_tb) + future = self.scheduler.trigger() + future.result() diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/_webhooks_server.py b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/_webhooks_server.py new file mode 100644 index 0000000000000000000000000000000000000000..fefb038848221691c1b49df9f7a6c921295f6a71 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/_webhooks_server.py @@ -0,0 +1,380 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains `WebhooksServer` and `webhook_endpoint` to create a webhook server easily.""" + +import atexit +import inspect +import os +from functools import wraps +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional + +from .utils import experimental, is_gradio_available +from .utils._deprecation import _deprecate_method + + +if TYPE_CHECKING: + import gradio as gr + + +from fastapi import FastAPI, Request +from fastapi.responses import JSONResponse + + +_global_app: Optional["WebhooksServer"] = None +_is_local = os.getenv("SYSTEM") != "spaces" + + +@experimental +class WebhooksServer: + """ + The [`WebhooksServer`] class lets you create an instance of a Gradio app that can receive Huggingface webhooks. + These webhooks can be registered using the [`~WebhooksServer.add_webhook`] decorator. Webhook endpoints are added to + the app as a POST endpoint to the FastAPI router. Once all the webhooks are registered, the `run` method has to be + called to start the app. + + It is recommended to accept [`WebhookPayload`] as the first argument of the webhook function. It is a Pydantic + model that contains all the information about the webhook event. The data will be parsed automatically for you. + + Check out the [webhooks guide](../guides/webhooks_server) for a step-by-step tutorial on how to setup your + WebhooksServer and deploy it on a Space. + + + + `WebhooksServer` is experimental. Its API is subject to change in the future. + + + + + + You must have `gradio` installed to use `WebhooksServer` (`pip install --upgrade gradio`). + + + + Args: + ui (`gradio.Blocks`, optional): + A Gradio UI instance to be used as the Space landing page. If `None`, a UI displaying instructions + about the configured webhooks is created. + webhook_secret (`str`, optional): + A secret key to verify incoming webhook requests. You can set this value to any secret you want as long as + you also configure it in your [webhooks settings panel](https://huggingface.co/settings/webhooks). You + can also set this value as the `WEBHOOK_SECRET` environment variable. If no secret is provided, the + webhook endpoints are opened without any security. + + Example: + + ```python + import gradio as gr + from huggingface_hub import WebhooksServer, WebhookPayload + + with gr.Blocks() as ui: + ... + + app = WebhooksServer(ui=ui, webhook_secret="my_secret_key") + + @app.add_webhook("/say_hello") + async def hello(payload: WebhookPayload): + return {"message": "hello"} + + app.run() + ``` + """ + + def __new__(cls, *args, **kwargs) -> "WebhooksServer": + if not is_gradio_available(): + raise ImportError( + "You must have `gradio` installed to use `WebhooksServer`. Please run `pip install --upgrade gradio`" + " first." + ) + return super().__new__(cls) + + def __init__( + self, + ui: Optional["gr.Blocks"] = None, + webhook_secret: Optional[str] = None, + ) -> None: + self._ui = ui + + self.webhook_secret = webhook_secret or os.getenv("WEBHOOK_SECRET") + self.registered_webhooks: Dict[str, Callable] = {} + _warn_on_empty_secret(self.webhook_secret) + + def add_webhook(self, path: Optional[str] = None) -> Callable: + """ + Decorator to add a webhook to the [`WebhooksServer`] server. + + Args: + path (`str`, optional): + The URL path to register the webhook function. If not provided, the function name will be used as the + path. In any case, all webhooks are registered under `/webhooks`. + + Raises: + ValueError: If the provided path is already registered as a webhook. + + Example: + ```python + from huggingface_hub import WebhooksServer, WebhookPayload + + app = WebhooksServer() + + @app.add_webhook + async def trigger_training(payload: WebhookPayload): + if payload.repo.type == "dataset" and payload.event.action == "update": + # Trigger a training job if a dataset is updated + ... + + app.run() + ``` + """ + # Usage: directly as decorator. Example: `@app.add_webhook` + if callable(path): + # If path is a function, it means it was used as a decorator without arguments + return self.add_webhook()(path) + + # Usage: provide a path. Example: `@app.add_webhook(...)` + @wraps(FastAPI.post) + def _inner_post(*args, **kwargs): + func = args[0] + abs_path = f"/webhooks/{(path or func.__name__).strip('/')}" + if abs_path in self.registered_webhooks: + raise ValueError(f"Webhook {abs_path} already exists.") + self.registered_webhooks[abs_path] = func + + return _inner_post + + def launch(self, prevent_thread_lock: bool = False, **launch_kwargs: Any) -> None: + """Launch the Gradio app and register webhooks to the underlying FastAPI server. + + Input parameters are forwarded to Gradio when launching the app. + """ + ui = self._ui or self._get_default_ui() + + # Start Gradio App + # - as non-blocking so that webhooks can be added afterwards + # - as shared if launch locally (to debug webhooks) + launch_kwargs.setdefault("share", _is_local) + self.fastapi_app, _, _ = ui.launch(prevent_thread_lock=True, **launch_kwargs) + + # Register webhooks to FastAPI app + for path, func in self.registered_webhooks.items(): + # Add secret check if required + if self.webhook_secret is not None: + func = _wrap_webhook_to_check_secret(func, webhook_secret=self.webhook_secret) + + # Add route to FastAPI app + self.fastapi_app.post(path)(func) + + # Print instructions and block main thread + url = (ui.share_url or ui.local_url).strip("/") + message = "\nWebhooks are correctly setup and ready to use:" + message += "\n" + "\n".join(f" - POST {url}{webhook}" for webhook in self.registered_webhooks) + message += "\nGo to https://huggingface.co/settings/webhooks to setup your webhooks." + print(message) + + if not prevent_thread_lock: + ui.block_thread() + + @_deprecate_method(version="0.23", message="Use `WebhooksServer.launch` instead.") + def run(self) -> None: + return self.launch() + + def _get_default_ui(self) -> "gr.Blocks": + """Default UI if not provided (lists webhooks and provides basic instructions).""" + import gradio as gr + + with gr.Blocks() as ui: + gr.Markdown("# This is an app to process 🤗 Webhooks") + gr.Markdown( + "Webhooks are a foundation for MLOps-related features. They allow you to listen for new changes on" + " specific repos or to all repos belonging to particular set of users/organizations (not just your" + " repos, but any repo). Check out this [guide](https://huggingface.co/docs/hub/webhooks) to get to" + " know more about webhooks on the Huggingface Hub." + ) + gr.Markdown( + f"{len(self.registered_webhooks)} webhook(s) are registered:" + + "\n\n" + + "\n ".join( + f"- [{webhook_path}]({_get_webhook_doc_url(webhook.__name__, webhook_path)})" + for webhook_path, webhook in self.registered_webhooks.items() + ) + ) + gr.Markdown( + "Go to https://huggingface.co/settings/webhooks to setup your webhooks." + + "\nYou app is running locally. Please look at the logs to check the full URL you need to set." + if _is_local + else ( + "\nThis app is running on a Space. You can find the corresponding URL in the options menu" + " (top-right) > 'Embed the Space'. The URL looks like 'https://{username}-{repo_name}.hf.space'." + ) + ) + return ui + + +@experimental +def webhook_endpoint(path: Optional[str] = None) -> Callable: + """Decorator to start a [`WebhooksServer`] and register the decorated function as a webhook endpoint. + + This is a helper to get started quickly. If you need more flexibility (custom landing page or webhook secret), + you can use [`WebhooksServer`] directly. You can register multiple webhook endpoints (to the same server) by using + this decorator multiple times. + + Check out the [webhooks guide](../guides/webhooks_server) for a step-by-step tutorial on how to setup your + server and deploy it on a Space. + + + + `webhook_endpoint` is experimental. Its API is subject to change in the future. + + + + + + You must have `gradio` installed to use `webhook_endpoint` (`pip install --upgrade gradio`). + + + + Args: + path (`str`, optional): + The URL path to register the webhook function. If not provided, the function name will be used as the path. + In any case, all webhooks are registered under `/webhooks`. + + Examples: + The default usage is to register a function as a webhook endpoint. The function name will be used as the path. + The server will be started automatically at exit (i.e. at the end of the script). + + ```python + from huggingface_hub import webhook_endpoint, WebhookPayload + + @webhook_endpoint + async def trigger_training(payload: WebhookPayload): + if payload.repo.type == "dataset" and payload.event.action == "update": + # Trigger a training job if a dataset is updated + ... + + # Server is automatically started at the end of the script. + ``` + + Advanced usage: register a function as a webhook endpoint and start the server manually. This is useful if you + are running it in a notebook. + + ```python + from huggingface_hub import webhook_endpoint, WebhookPayload + + @webhook_endpoint + async def trigger_training(payload: WebhookPayload): + if payload.repo.type == "dataset" and payload.event.action == "update": + # Trigger a training job if a dataset is updated + ... + + # Start the server manually + trigger_training.run() + ``` + """ + if callable(path): + # If path is a function, it means it was used as a decorator without arguments + return webhook_endpoint()(path) + + @wraps(WebhooksServer.add_webhook) + def _inner(func: Callable) -> Callable: + app = _get_global_app() + app.add_webhook(path)(func) + if len(app.registered_webhooks) == 1: + # Register `app.run` to run at exit (only once) + atexit.register(app.run) + + @wraps(app.run) + def _run_now(): + # Run the app directly (without waiting atexit) + atexit.unregister(app.run) + app.run() + + func.run = _run_now # type: ignore + return func + + return _inner + + +def _get_global_app() -> WebhooksServer: + global _global_app + if _global_app is None: + _global_app = WebhooksServer() + return _global_app + + +def _warn_on_empty_secret(webhook_secret: Optional[str]) -> None: + if webhook_secret is None: + print("Webhook secret is not defined. This means your webhook endpoints will be open to everyone.") + print( + "To add a secret, set `WEBHOOK_SECRET` as environment variable or pass it at initialization: " + "\n\t`app = WebhooksServer(webhook_secret='my_secret', ...)`" + ) + print( + "For more details about webhook secrets, please refer to" + " https://huggingface.co/docs/hub/webhooks#webhook-secret." + ) + else: + print("Webhook secret is correctly defined.") + + +def _get_webhook_doc_url(webhook_name: str, webhook_path: str) -> str: + """Returns the anchor to a given webhook in the docs (experimental)""" + return "/docs#/default/" + webhook_name + webhook_path.replace("/", "_") + "_post" + + +def _wrap_webhook_to_check_secret(func: Callable, webhook_secret: str) -> Callable: + """Wraps a webhook function to check the webhook secret before calling the function. + + This is a hacky way to add the `request` parameter to the function signature. Since FastAPI based itself on route + parameters to inject the values to the function, we need to hack the function signature to retrieve the `Request` + object (and hence the headers). A far cleaner solution would be to use a middleware. However, since + `fastapi==0.90.1`, a middleware cannot be added once the app has started. And since the FastAPI app is started by + Gradio internals (and not by us), we cannot add a middleware. + + This method is called only when a secret has been defined by the user. If a request is sent without the + "x-webhook-secret", the function will return a 401 error (unauthorized). If the header is sent but is incorrect, + the function will return a 403 error (forbidden). + + Inspired by https://stackoverflow.com/a/33112180. + """ + initial_sig = inspect.signature(func) + + @wraps(func) + async def _protected_func(request: Request, **kwargs): + request_secret = request.headers.get("x-webhook-secret") + if request_secret is None: + return JSONResponse({"error": "x-webhook-secret header not set."}, status_code=401) + if request_secret != webhook_secret: + return JSONResponse({"error": "Invalid webhook secret."}, status_code=403) + + # Inject `request` in kwargs if required + if "request" in initial_sig.parameters: + kwargs["request"] = request + + # Handle both sync and async routes + if inspect.iscoroutinefunction(func): + return await func(**kwargs) + else: + return func(**kwargs) + + # Update signature to include request + if "request" not in initial_sig.parameters: + _protected_func.__signature__ = initial_sig.replace( # type: ignore + parameters=( + inspect.Parameter(name="request", kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=Request), + ) + + tuple(initial_sig.parameters.values()) + ) + + # Return protected route + return _protected_func diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/fastai_utils.py b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/fastai_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e586e8663c39e8d5bab3f57f667dbd878514e59d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/fastai_utils.py @@ -0,0 +1,425 @@ +import json +import os +from pathlib import Path +from pickle import DEFAULT_PROTOCOL, PicklingError +from typing import Any, Dict, List, Optional, Union + +from packaging import version + +from huggingface_hub import snapshot_download +from huggingface_hub.constants import CONFIG_NAME +from huggingface_hub.hf_api import HfApi +from huggingface_hub.utils import ( + SoftTemporaryDirectory, + get_fastai_version, + get_fastcore_version, + get_python_version, +) + +from .utils import logging, validate_hf_hub_args +from .utils._runtime import _PY_VERSION # noqa: F401 # for backward compatibility... + + +logger = logging.get_logger(__name__) + + +def _check_fastai_fastcore_versions( + fastai_min_version: str = "2.4", + fastcore_min_version: str = "1.3.27", +): + """ + Checks that the installed fastai and fastcore versions are compatible for pickle serialization. + + Args: + fastai_min_version (`str`, *optional*): + The minimum fastai version supported. + fastcore_min_version (`str`, *optional*): + The minimum fastcore version supported. + + + Raises the following error: + + - [`ImportError`](https://docs.python.org/3/library/exceptions.html#ImportError) + if the fastai or fastcore libraries are not available or are of an invalid version. + + + """ + + if (get_fastcore_version() or get_fastai_version()) == "N/A": + raise ImportError( + f"fastai>={fastai_min_version} and fastcore>={fastcore_min_version} are" + f" required. Currently using fastai=={get_fastai_version()} and" + f" fastcore=={get_fastcore_version()}." + ) + + current_fastai_version = version.Version(get_fastai_version()) + current_fastcore_version = version.Version(get_fastcore_version()) + + if current_fastai_version < version.Version(fastai_min_version): + raise ImportError( + "`push_to_hub_fastai` and `from_pretrained_fastai` require a" + f" fastai>={fastai_min_version} version, but you are using fastai version" + f" {get_fastai_version()} which is incompatible. Upgrade with `pip install" + " fastai==2.5.6`." + ) + + if current_fastcore_version < version.Version(fastcore_min_version): + raise ImportError( + "`push_to_hub_fastai` and `from_pretrained_fastai` require a" + f" fastcore>={fastcore_min_version} version, but you are using fastcore" + f" version {get_fastcore_version()} which is incompatible. Upgrade with" + " `pip install fastcore==1.3.27`." + ) + + +def _check_fastai_fastcore_pyproject_versions( + storage_folder: str, + fastai_min_version: str = "2.4", + fastcore_min_version: str = "1.3.27", +): + """ + Checks that the `pyproject.toml` file in the directory `storage_folder` has fastai and fastcore versions + that are compatible with `from_pretrained_fastai` and `push_to_hub_fastai`. If `pyproject.toml` does not exist + or does not contain versions for fastai and fastcore, then it logs a warning. + + Args: + storage_folder (`str`): + Folder to look for the `pyproject.toml` file. + fastai_min_version (`str`, *optional*): + The minimum fastai version supported. + fastcore_min_version (`str`, *optional*): + The minimum fastcore version supported. + + + Raises the following errors: + + - [`ImportError`](https://docs.python.org/3/library/exceptions.html#ImportError) + if the `toml` module is not installed. + - [`ImportError`](https://docs.python.org/3/library/exceptions.html#ImportError) + if the `pyproject.toml` indicates a lower than minimum supported version of fastai or fastcore. + + + """ + + try: + import toml + except ModuleNotFoundError: + raise ImportError( + "`push_to_hub_fastai` and `from_pretrained_fastai` require the toml module." + " Install it with `pip install toml`." + ) + + # Checks that a `pyproject.toml`, with `build-system` and `requires` sections, exists in the repository. If so, get a list of required packages. + if not os.path.isfile(f"{storage_folder}/pyproject.toml"): + logger.warning( + "There is no `pyproject.toml` in the repository that contains the fastai" + " `Learner`. The `pyproject.toml` would allow us to verify that your fastai" + " and fastcore versions are compatible with those of the model you want to" + " load." + ) + return + pyproject_toml = toml.load(f"{storage_folder}/pyproject.toml") + + if "build-system" not in pyproject_toml.keys(): + logger.warning( + "There is no `build-system` section in the pyproject.toml of the repository" + " that contains the fastai `Learner`. The `build-system` would allow us to" + " verify that your fastai and fastcore versions are compatible with those" + " of the model you want to load." + ) + return + build_system_toml = pyproject_toml["build-system"] + + if "requires" not in build_system_toml.keys(): + logger.warning( + "There is no `requires` section in the pyproject.toml of the repository" + " that contains the fastai `Learner`. The `requires` would allow us to" + " verify that your fastai and fastcore versions are compatible with those" + " of the model you want to load." + ) + return + package_versions = build_system_toml["requires"] + + # Extracts contains fastai and fastcore versions from `pyproject.toml` if available. + # If the package is specified but not the version (e.g. "fastai" instead of "fastai=2.4"), the default versions are the highest. + fastai_packages = [pck for pck in package_versions if pck.startswith("fastai")] + if len(fastai_packages) == 0: + logger.warning("The repository does not have a fastai version specified in the `pyproject.toml`.") + # fastai_version is an empty string if not specified + else: + fastai_version = str(fastai_packages[0]).partition("=")[2] + if fastai_version != "" and version.Version(fastai_version) < version.Version(fastai_min_version): + raise ImportError( + "`from_pretrained_fastai` requires" + f" fastai>={fastai_min_version} version but the model to load uses" + f" {fastai_version} which is incompatible." + ) + + fastcore_packages = [pck for pck in package_versions if pck.startswith("fastcore")] + if len(fastcore_packages) == 0: + logger.warning("The repository does not have a fastcore version specified in the `pyproject.toml`.") + # fastcore_version is an empty string if not specified + else: + fastcore_version = str(fastcore_packages[0]).partition("=")[2] + if fastcore_version != "" and version.Version(fastcore_version) < version.Version(fastcore_min_version): + raise ImportError( + "`from_pretrained_fastai` requires" + f" fastcore>={fastcore_min_version} version, but you are using fastcore" + f" version {fastcore_version} which is incompatible." + ) + + +README_TEMPLATE = """--- +tags: +- fastai +--- + +# Amazing! + +🥳 Congratulations on hosting your fastai model on the Hugging Face Hub! + +# Some next steps +1. Fill out this model card with more information (see the template below and the [documentation here](https://huggingface.co/docs/hub/model-repos))! + +2. Create a demo in Gradio or Streamlit using 🤗 Spaces ([documentation here](https://huggingface.co/docs/hub/spaces)). + +3. Join the fastai community on the [Fastai Discord](https://discord.com/invite/YKrxeNn)! + +Greetings fellow fastlearner 🤝! Don't forget to delete this content from your model card. + + +--- + + +# Model card + +## Model description +More information needed + +## Intended uses & limitations +More information needed + +## Training and evaluation data +More information needed +""" + +PYPROJECT_TEMPLATE = f"""[build-system] +requires = ["setuptools>=40.8.0", "wheel", "python={get_python_version()}", "fastai={get_fastai_version()}", "fastcore={get_fastcore_version()}"] +build-backend = "setuptools.build_meta:__legacy__" +""" + + +def _create_model_card(repo_dir: Path): + """ + Creates a model card for the repository. + + Args: + repo_dir (`Path`): + Directory where model card is created. + """ + readme_path = repo_dir / "README.md" + + if not readme_path.exists(): + with readme_path.open("w", encoding="utf-8") as f: + f.write(README_TEMPLATE) + + +def _create_model_pyproject(repo_dir: Path): + """ + Creates a `pyproject.toml` for the repository. + + Args: + repo_dir (`Path`): + Directory where `pyproject.toml` is created. + """ + pyproject_path = repo_dir / "pyproject.toml" + + if not pyproject_path.exists(): + with pyproject_path.open("w", encoding="utf-8") as f: + f.write(PYPROJECT_TEMPLATE) + + +def _save_pretrained_fastai( + learner, + save_directory: Union[str, Path], + config: Optional[Dict[str, Any]] = None, +): + """ + Saves a fastai learner to `save_directory` in pickle format using the default pickle protocol for the version of python used. + + Args: + learner (`Learner`): + The `fastai.Learner` you'd like to save. + save_directory (`str` or `Path`): + Specific directory in which you want to save the fastai learner. + config (`dict`, *optional*): + Configuration object. Will be uploaded as a .json file. Example: 'https://huggingface.co/espejelomar/fastai-pet-breeds-classification/blob/main/config.json'. + + + + Raises the following error: + + - [`RuntimeError`](https://docs.python.org/3/library/exceptions.html#RuntimeError) + if the config file provided is not a dictionary. + + + """ + _check_fastai_fastcore_versions() + + os.makedirs(save_directory, exist_ok=True) + + # if the user provides config then we update it with the fastai and fastcore versions in CONFIG_TEMPLATE. + if config is not None: + if not isinstance(config, dict): + raise RuntimeError(f"Provided config should be a dict. Got: '{type(config)}'") + path = os.path.join(save_directory, CONFIG_NAME) + with open(path, "w") as f: + json.dump(config, f) + + _create_model_card(Path(save_directory)) + _create_model_pyproject(Path(save_directory)) + + # learner.export saves the model in `self.path`. + learner.path = Path(save_directory) + os.makedirs(save_directory, exist_ok=True) + try: + learner.export( + fname="model.pkl", + pickle_protocol=DEFAULT_PROTOCOL, + ) + except PicklingError: + raise PicklingError( + "You are using a lambda function, i.e., an anonymous function. `pickle`" + " cannot pickle function objects and requires that all functions have" + " names. One possible solution is to name the function." + ) + + +@validate_hf_hub_args +def from_pretrained_fastai( + repo_id: str, + revision: Optional[str] = None, +): + """ + Load pretrained fastai model from the Hub or from a local directory. + + Args: + repo_id (`str`): + The location where the pickled fastai.Learner is. It can be either of the two: + - Hosted on the Hugging Face Hub. E.g.: 'espejelomar/fatai-pet-breeds-classification' or 'distilgpt2'. + You can add a `revision` by appending `@` at the end of `repo_id`. E.g.: `dbmdz/bert-base-german-cased@main`. + Revision is the specific model version to use. Since we use a git-based system for storing models and other + artifacts on the Hugging Face Hub, it can be a branch name, a tag name, or a commit id. + - Hosted locally. `repo_id` would be a directory containing the pickle and a pyproject.toml + indicating the fastai and fastcore versions used to build the `fastai.Learner`. E.g.: `./my_model_directory/`. + revision (`str`, *optional*): + Revision at which the repo's files are downloaded. See documentation of `snapshot_download`. + + Returns: + The `fastai.Learner` model in the `repo_id` repo. + """ + _check_fastai_fastcore_versions() + + # Load the `repo_id` repo. + # `snapshot_download` returns the folder where the model was stored. + # `cache_dir` will be the default '/root/.cache/huggingface/hub' + if not os.path.isdir(repo_id): + storage_folder = snapshot_download( + repo_id=repo_id, + revision=revision, + library_name="fastai", + library_version=get_fastai_version(), + ) + else: + storage_folder = repo_id + + _check_fastai_fastcore_pyproject_versions(storage_folder) + + from fastai.learner import load_learner # type: ignore + + return load_learner(os.path.join(storage_folder, "model.pkl")) + + +@validate_hf_hub_args +def push_to_hub_fastai( + learner, + *, + repo_id: str, + commit_message: str = "Push FastAI model using huggingface_hub.", + private: bool = False, + token: Optional[str] = None, + config: Optional[dict] = None, + branch: Optional[str] = None, + create_pr: Optional[bool] = None, + allow_patterns: Optional[Union[List[str], str]] = None, + ignore_patterns: Optional[Union[List[str], str]] = None, + delete_patterns: Optional[Union[List[str], str]] = None, + api_endpoint: Optional[str] = None, +): + """ + Upload learner checkpoint files to the Hub. + + Use `allow_patterns` and `ignore_patterns` to precisely filter which files should be pushed to the hub. Use + `delete_patterns` to delete existing remote files in the same commit. See [`upload_folder`] reference for more + details. + + Args: + learner (`Learner`): + The `fastai.Learner' you'd like to push to the Hub. + repo_id (`str`): + The repository id for your model in Hub in the format of "namespace/repo_name". The namespace can be your individual account or an organization to which you have write access (for example, 'stanfordnlp/stanza-de'). + commit_message (`str`, *optional*): + Message to commit while pushing. Will default to :obj:`"add model"`. + private (`bool`, *optional*, defaults to `False`): + Whether or not the repository created should be private. + token (`str`, *optional*): + The Hugging Face account token to use as HTTP bearer authorization for remote files. If :obj:`None`, the token will be asked by a prompt. + config (`dict`, *optional*): + Configuration object to be saved alongside the model weights. + branch (`str`, *optional*): + The git branch on which to push the model. This defaults to + the default branch as specified in your repository, which + defaults to `"main"`. + create_pr (`boolean`, *optional*): + Whether or not to create a Pull Request from `branch` with that commit. + Defaults to `False`. + api_endpoint (`str`, *optional*): + The API endpoint to use when pushing the model to the hub. + allow_patterns (`List[str]` or `str`, *optional*): + If provided, only files matching at least one pattern are pushed. + ignore_patterns (`List[str]` or `str`, *optional*): + If provided, files matching any of the patterns are not pushed. + delete_patterns (`List[str]` or `str`, *optional*): + If provided, remote files matching any of the patterns will be deleted from the repo. + + Returns: + The url of the commit of your model in the given repository. + + + + Raises the following error: + + - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) + if the user is not log on to the Hugging Face Hub. + + + """ + _check_fastai_fastcore_versions() + api = HfApi(endpoint=api_endpoint) + repo_id = api.create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True).repo_id + + # Push the files to the repo in a single commit + with SoftTemporaryDirectory() as tmp: + saved_path = Path(tmp) / repo_id + _save_pretrained_fastai(learner, saved_path, config=config) + return api.upload_folder( + repo_id=repo_id, + token=token, + folder_path=saved_path, + commit_message=commit_message, + revision=branch, + create_pr=create_pr, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + delete_patterns=delete_patterns, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/hub_mixin.py b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/hub_mixin.py new file mode 100644 index 0000000000000000000000000000000000000000..eb42ef4e23b31148d90d6533f3debc91d6ce84d0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/hub_mixin.py @@ -0,0 +1,704 @@ +import inspect +import json +import os +from dataclasses import asdict, dataclass, is_dataclass +from pathlib import Path +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, TypeVar, Union, get_args + +from .constants import CONFIG_NAME, PYTORCH_WEIGHTS_NAME, SAFETENSORS_SINGLE_FILE +from .file_download import hf_hub_download +from .hf_api import HfApi +from .repocard import ModelCard, ModelCardData +from .utils import ( + EntryNotFoundError, + HfHubHTTPError, + SoftTemporaryDirectory, + is_jsonable, + is_safetensors_available, + is_torch_available, + logging, + validate_hf_hub_args, +) +from .utils._deprecation import _deprecate_arguments + + +if TYPE_CHECKING: + from _typeshed import DataclassInstance + +if is_torch_available(): + import torch # type: ignore + +if is_safetensors_available(): + from safetensors.torch import load_model as load_model_as_safetensor + from safetensors.torch import save_model as save_model_as_safetensor + + +logger = logging.get_logger(__name__) + +# Generic variable that is either ModelHubMixin or a subclass thereof +T = TypeVar("T", bound="ModelHubMixin") + +DEFAULT_MODEL_CARD = """ +--- +# For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 +# Doc / guide: https://huggingface.co/docs/hub/model-cards +{{ card_data }} +--- + +This model has been pushed to the Hub using **{{ library_name }}**: +- Repo: {{ repo_url | default("[More Information Needed]", true) }} +- Docs: {{ docs_url | default("[More Information Needed]", true) }} +""" + + +@dataclass +class MixinInfo: + library_name: Optional[str] = None + tags: Optional[List[str]] = None + repo_url: Optional[str] = None + docs_url: Optional[str] = None + + +class ModelHubMixin: + """ + A generic mixin to integrate ANY machine learning framework with the Hub. + + To integrate your framework, your model class must inherit from this class. Custom logic for saving/loading models + have to be overwritten in [`_from_pretrained`] and [`_save_pretrained`]. [`PyTorchModelHubMixin`] is a good example + of mixin integration with the Hub. Check out our [integration guide](../guides/integrations) for more instructions. + + When inheriting from [`ModelHubMixin`], you can define class-level attributes. These attributes are not passed to + `__init__` but to the class definition itself. This is useful to define metadata about the library integrating + [`ModelHubMixin`]. + + Args: + library_name (`str`, *optional*): + Name of the library integrating ModelHubMixin. Used to generate model card. + tags (`List[str]`, *optional*): + Tags to be added to the model card. Used to generate model card. + repo_url (`str`, *optional*): + URL of the library repository. Used to generate model card. + docs_url (`str`, *optional*): + URL of the library documentation. Used to generate model card. + + Example: + + ```python + >>> from huggingface_hub import ModelHubMixin + + # Inherit from ModelHubMixin + >>> class MyCustomModel( + ... ModelHubMixin, + ... library_name="my-library", + ... tags=["x-custom-tag"], + ... repo_url="https://github.com/huggingface/my-cool-library", + ... docs_url="https://huggingface.co/docs/my-cool-library", + ... # ^ optional metadata to generate model card + ... ): + ... def __init__(self, size: int = 512, device: str = "cpu"): + ... # define how to initialize your model + ... super().__init__() + ... ... + ... + ... def _save_pretrained(self, save_directory: Path) -> None: + ... # define how to serialize your model + ... ... + ... + ... @classmethod + ... def from_pretrained( + ... cls: Type[T], + ... pretrained_model_name_or_path: Union[str, Path], + ... *, + ... force_download: bool = False, + ... resume_download: bool = False, + ... proxies: Optional[Dict] = None, + ... token: Optional[Union[str, bool]] = None, + ... cache_dir: Optional[Union[str, Path]] = None, + ... local_files_only: bool = False, + ... revision: Optional[str] = None, + ... **model_kwargs, + ... ) -> T: + ... # define how to deserialize your model + ... ... + + >>> model = MyCustomModel(size=256, device="gpu") + + # Save model weights to local directory + >>> model.save_pretrained("my-awesome-model") + + # Push model weights to the Hub + >>> model.push_to_hub("my-awesome-model") + + # Download and initialize weights from the Hub + >>> reloaded_model = MyCustomModel.from_pretrained("username/my-awesome-model") + >>> reloaded_model._hub_mixin_config + {"size": 256, "device": "gpu"} + + # Model card has been correctly populated + >>> from huggingface_hub import ModelCard + >>> card = ModelCard.load("username/my-awesome-model") + >>> card.data.tags + ["x-custom-tag", "pytorch_model_hub_mixin", "model_hub_mixin"] + >>> card.data.library_name + "my-library" + ``` + """ + + _hub_mixin_config: Optional[Union[dict, "DataclassInstance"]] = None + # ^ optional config attribute automatically set in `from_pretrained` + _hub_mixin_info: MixinInfo + # ^ information about the library integrating ModelHubMixin (used to generate model card) + _hub_mixin_init_parameters: Dict[str, inspect.Parameter] + _hub_mixin_jsonable_default_values: Dict[str, Any] + _hub_mixin_inject_config: bool + # ^ internal values to handle config + + def __init_subclass__( + cls, + *, + library_name: Optional[str] = None, + tags: Optional[List[str]] = None, + repo_url: Optional[str] = None, + docs_url: Optional[str] = None, + ) -> None: + """Inspect __init__ signature only once when subclassing + handle modelcard.""" + super().__init_subclass__() + + # Will be reused when creating modelcard + tags = tags or [] + tags.append("model_hub_mixin") + cls._hub_mixin_info = MixinInfo( + library_name=library_name, + tags=tags, + repo_url=repo_url, + docs_url=docs_url, + ) + + # Inspect __init__ signature to handle config + cls._hub_mixin_init_parameters = dict(inspect.signature(cls.__init__).parameters) + cls._hub_mixin_jsonable_default_values = { + param.name: param.default + for param in cls._hub_mixin_init_parameters.values() + if param.default is not inspect.Parameter.empty and is_jsonable(param.default) + } + cls._hub_mixin_inject_config = "config" in inspect.signature(cls._from_pretrained).parameters + + def __new__(cls, *args, **kwargs) -> "ModelHubMixin": + """Create a new instance of the class and handle config. + + 3 cases: + - If `self._hub_mixin_config` is already set, do nothing. + - If `config` is passed as a dataclass, set it as `self._hub_mixin_config`. + - Otherwise, build `self._hub_mixin_config` from default values and passed values. + """ + instance = super().__new__(cls) + + # If `config` is already set, return early + if instance._hub_mixin_config is not None: + return instance + + # Infer passed values + passed_values = { + **{ + key: value + for key, value in zip( + # [1:] to skip `self` parameter + list(cls._hub_mixin_init_parameters)[1:], + args, + ) + }, + **kwargs, + } + + # If config passed as dataclass => set it and return early + if is_dataclass(passed_values.get("config")): + instance._hub_mixin_config = passed_values["config"] + return instance + + # Otherwise, build config from default + passed values + init_config = { + # default values + **cls._hub_mixin_jsonable_default_values, + # passed values + **{key: value for key, value in passed_values.items() if is_jsonable(value)}, + } + init_config.pop("config", {}) + + # Populate `init_config` with provided config + provided_config = passed_values.get("config") + if isinstance(provided_config, dict): + init_config.update(provided_config) + + # Set `config` attribute and return + if init_config != {}: + instance._hub_mixin_config = init_config + return instance + + def save_pretrained( + self, + save_directory: Union[str, Path], + *, + config: Optional[Union[dict, "DataclassInstance"]] = None, + repo_id: Optional[str] = None, + push_to_hub: bool = False, + **push_to_hub_kwargs, + ) -> Optional[str]: + """ + Save weights in local directory. + + Args: + save_directory (`str` or `Path`): + Path to directory in which the model weights and configuration will be saved. + config (`dict` or `DataclassInstance`, *optional*): + Model configuration specified as a key/value dictionary or a dataclass instance. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Huggingface Hub after saving it. + repo_id (`str`, *optional*): + ID of your repository on the Hub. Used only if `push_to_hub=True`. Will default to the folder name if + not provided. + kwargs: + Additional key word arguments passed along to the [`~ModelHubMixin.push_to_hub`] method. + """ + save_directory = Path(save_directory) + save_directory.mkdir(parents=True, exist_ok=True) + + # Remove config.json if already exists. After `_save_pretrained` we don't want to overwrite config.json + # as it might have been saved by the custom `_save_pretrained` already. However we do want to overwrite + # an existing config.json if it was not saved by `_save_pretrained`. + config_path = save_directory / CONFIG_NAME + config_path.unlink(missing_ok=True) + + # save model weights/files (framework-specific) + self._save_pretrained(save_directory) + + # save config (if provided and if not serialized yet in `_save_pretrained`) + if config is None: + config = self._hub_mixin_config + if config is not None: + if is_dataclass(config): + config = asdict(config) # type: ignore[arg-type] + if not config_path.exists(): + config_str = json.dumps(config, sort_keys=True, indent=2) + config_path.write_text(config_str) + + # save model card + model_card_path = save_directory / "README.md" + if not model_card_path.exists(): # do not overwrite if already exists + self.generate_model_card().save(save_directory / "README.md") + + # push to the Hub if required + if push_to_hub: + kwargs = push_to_hub_kwargs.copy() # soft-copy to avoid mutating input + if config is not None: # kwarg for `push_to_hub` + kwargs["config"] = config + if repo_id is None: + repo_id = save_directory.name # Defaults to `save_directory` name + return self.push_to_hub(repo_id=repo_id, **kwargs) + return None + + def _save_pretrained(self, save_directory: Path) -> None: + """ + Overwrite this method in subclass to define how to save your model. + Check out our [integration guide](../guides/integrations) for instructions. + + Args: + save_directory (`str` or `Path`): + Path to directory in which the model weights and configuration will be saved. + """ + raise NotImplementedError + + @classmethod + @validate_hf_hub_args + def from_pretrained( + cls: Type[T], + pretrained_model_name_or_path: Union[str, Path], + *, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict] = None, + token: Optional[Union[str, bool]] = None, + cache_dir: Optional[Union[str, Path]] = None, + local_files_only: bool = False, + revision: Optional[str] = None, + **model_kwargs, + ) -> T: + """ + Download a model from the Huggingface Hub and instantiate it. + + Args: + pretrained_model_name_or_path (`str`, `Path`): + - Either the `model_id` (string) of a model hosted on the Hub, e.g. `bigscience/bloom`. + - Or a path to a `directory` containing model weights saved using + [`~transformers.PreTrainedModel.save_pretrained`], e.g., `../path/to/my_model_directory/`. + revision (`str`, *optional*): + Revision of the model on the Hub. Can be a branch name, a git tag or any commit id. + Defaults to the latest commit on `main` branch. + force_download (`bool`, *optional*, defaults to `False`): + Whether to force (re-)downloading the model weights and configuration files from the Hub, overriding + the existing cache. + resume_download (`bool`, *optional*, defaults to `False`): + Whether to delete incompletely received files. Will attempt to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on every request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. By default, it will use the token + cached when running `huggingface-cli login`. + cache_dir (`str`, `Path`, *optional*): + Path to the folder where cached files are stored. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, avoid downloading the file and return the path to the local cached file if it exists. + model_kwargs (`Dict`, *optional*): + Additional kwargs to pass to the model during initialization. + """ + model_id = str(pretrained_model_name_or_path) + config_file: Optional[str] = None + if os.path.isdir(model_id): + if CONFIG_NAME in os.listdir(model_id): + config_file = os.path.join(model_id, CONFIG_NAME) + else: + logger.warning(f"{CONFIG_NAME} not found in {Path(model_id).resolve()}") + else: + try: + config_file = hf_hub_download( + repo_id=model_id, + filename=CONFIG_NAME, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + except HfHubHTTPError as e: + logger.info(f"{CONFIG_NAME} not found on the HuggingFace Hub: {str(e)}") + + # Read config + config = None + if config_file is not None: + with open(config_file, "r", encoding="utf-8") as f: + config = json.load(f) + + # Populate model_kwargs from config + for param in cls._hub_mixin_init_parameters.values(): + if param.name not in model_kwargs and param.name in config: + model_kwargs[param.name] = config[param.name] + + # Check if `config` argument was passed at init + if "config" in cls._hub_mixin_init_parameters: + # Check if `config` argument is a dataclass + config_annotation = cls._hub_mixin_init_parameters["config"].annotation + if config_annotation is inspect.Parameter.empty: + pass # no annotation + elif is_dataclass(config_annotation): + config = _load_dataclass(config_annotation, config) + else: + # if Optional/Union annotation => check if a dataclass is in the Union + for _sub_annotation in get_args(config_annotation): + if is_dataclass(_sub_annotation): + config = _load_dataclass(_sub_annotation, config) + break + + # Forward config to model initialization + model_kwargs["config"] = config + + # Inject config if `**kwargs` are expected + if is_dataclass(cls): + for key in cls.__dataclass_fields__: + if key not in model_kwargs and key in config: + model_kwargs[key] = config[key] + elif any(param.kind == inspect.Parameter.VAR_KEYWORD for param in cls._hub_mixin_init_parameters.values()): + for key, value in config.items(): + if key not in model_kwargs: + model_kwargs[key] = value + + # Finally, also inject if `_from_pretrained` expects it + if cls._hub_mixin_inject_config: + model_kwargs["config"] = config + + instance = cls._from_pretrained( + model_id=str(model_id), + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + **model_kwargs, + ) + + # Implicitly set the config as instance attribute if not already set by the class + # This way `config` will be available when calling `save_pretrained` or `push_to_hub`. + if config is not None and (getattr(instance, "_hub_mixin_config", None) in (None, {})): + instance._hub_mixin_config = config + + return instance + + @classmethod + def _from_pretrained( + cls: Type[T], + *, + model_id: str, + revision: Optional[str], + cache_dir: Optional[Union[str, Path]], + force_download: bool, + proxies: Optional[Dict], + resume_download: bool, + local_files_only: bool, + token: Optional[Union[str, bool]], + **model_kwargs, + ) -> T: + """Overwrite this method in subclass to define how to load your model from pretrained. + + Use [`hf_hub_download`] or [`snapshot_download`] to download files from the Hub before loading them. Most + args taken as input can be directly passed to those 2 methods. If needed, you can add more arguments to this + method using "model_kwargs". For example [`PyTorchModelHubMixin._from_pretrained`] takes as input a `map_location` + parameter to set on which device the model should be loaded. + + Check out our [integration guide](../guides/integrations) for more instructions. + + Args: + model_id (`str`): + ID of the model to load from the Huggingface Hub (e.g. `bigscience/bloom`). + revision (`str`, *optional*): + Revision of the model on the Hub. Can be a branch name, a git tag or any commit id. Defaults to the + latest commit on `main` branch. + force_download (`bool`, *optional*, defaults to `False`): + Whether to force (re-)downloading the model weights and configuration files from the Hub, overriding + the existing cache. + resume_download (`bool`, *optional*, defaults to `False`): + Whether to delete incompletely received files. Will attempt to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint (e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`). + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. By default, it will use the token + cached when running `huggingface-cli login`. + cache_dir (`str`, `Path`, *optional*): + Path to the folder where cached files are stored. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, avoid downloading the file and return the path to the local cached file if it exists. + model_kwargs: + Additional keyword arguments passed along to the [`~ModelHubMixin._from_pretrained`] method. + """ + raise NotImplementedError + + @_deprecate_arguments( + version="0.23.0", + deprecated_args=["api_endpoint"], + custom_message="Use `HF_ENDPOINT` environment variable instead.", + ) + @validate_hf_hub_args + def push_to_hub( + self, + repo_id: str, + *, + config: Optional[Union[dict, "DataclassInstance"]] = None, + commit_message: str = "Push model using huggingface_hub.", + private: bool = False, + token: Optional[str] = None, + branch: Optional[str] = None, + create_pr: Optional[bool] = None, + allow_patterns: Optional[Union[List[str], str]] = None, + ignore_patterns: Optional[Union[List[str], str]] = None, + delete_patterns: Optional[Union[List[str], str]] = None, + # TODO: remove once deprecated + api_endpoint: Optional[str] = None, + ) -> str: + """ + Upload model checkpoint to the Hub. + + Use `allow_patterns` and `ignore_patterns` to precisely filter which files should be pushed to the hub. Use + `delete_patterns` to delete existing remote files in the same commit. See [`upload_folder`] reference for more + details. + + Args: + repo_id (`str`): + ID of the repository to push to (example: `"username/my-model"`). + config (`dict` or `DataclassInstance`, *optional*): + Model configuration specified as a key/value dictionary or a dataclass instance. + commit_message (`str`, *optional*): + Message to commit while pushing. + private (`bool`, *optional*, defaults to `False`): + Whether the repository created should be private. + api_endpoint (`str`, *optional*): + The API endpoint to use when pushing the model to the hub. + token (`str`, *optional*): + The token to use as HTTP bearer authorization for remote files. By default, it will use the token + cached when running `huggingface-cli login`. + branch (`str`, *optional*): + The git branch on which to push the model. This defaults to `"main"`. + create_pr (`boolean`, *optional*): + Whether or not to create a Pull Request from `branch` with that commit. Defaults to `False`. + allow_patterns (`List[str]` or `str`, *optional*): + If provided, only files matching at least one pattern are pushed. + ignore_patterns (`List[str]` or `str`, *optional*): + If provided, files matching any of the patterns are not pushed. + delete_patterns (`List[str]` or `str`, *optional*): + If provided, remote files matching any of the patterns will be deleted from the repo. + + Returns: + The url of the commit of your model in the given repository. + """ + api = HfApi(endpoint=api_endpoint, token=token) + repo_id = api.create_repo(repo_id=repo_id, private=private, exist_ok=True).repo_id + + # Push the files to the repo in a single commit + with SoftTemporaryDirectory() as tmp: + saved_path = Path(tmp) / repo_id + self.save_pretrained(saved_path, config=config) + return api.upload_folder( + repo_id=repo_id, + repo_type="model", + folder_path=saved_path, + commit_message=commit_message, + revision=branch, + create_pr=create_pr, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + delete_patterns=delete_patterns, + ) + + def generate_model_card(self, *args, **kwargs) -> ModelCard: + card = ModelCard.from_template( + card_data=ModelCardData(**asdict(self._hub_mixin_info)), + template_str=DEFAULT_MODEL_CARD, + ) + return card + + +class PyTorchModelHubMixin(ModelHubMixin): + """ + Implementation of [`ModelHubMixin`] to provide model Hub upload/download capabilities to PyTorch models. The model + is set in evaluation mode by default using `model.eval()` (dropout modules are deactivated). To train the model, + you should first set it back in training mode with `model.train()`. + + Example: + + ```python + >>> import torch + >>> import torch.nn as nn + >>> from huggingface_hub import PyTorchModelHubMixin + + >>> class MyModel( + ... nn.Module, + ... PyTorchModelHubMixin, + ... library_name="keras-nlp", + ... repo_url="https://github.com/keras-team/keras-nlp", + ... docs_url="https://keras.io/keras_nlp/", + ... # ^ optional metadata to generate model card + ... ): + ... def __init__(self, hidden_size: int = 512, vocab_size: int = 30000, output_size: int = 4): + ... super().__init__() + ... self.param = nn.Parameter(torch.rand(hidden_size, vocab_size)) + ... self.linear = nn.Linear(output_size, vocab_size) + + ... def forward(self, x): + ... return self.linear(x + self.param) + >>> model = MyModel(hidden_size=256) + + # Save model weights to local directory + >>> model.save_pretrained("my-awesome-model") + + # Push model weights to the Hub + >>> model.push_to_hub("my-awesome-model") + + # Download and initialize weights from the Hub + >>> model = MyModel.from_pretrained("username/my-awesome-model") + >>> model.hidden_size + 256 + ``` + """ + + def __init_subclass__(cls, *args, tags: Optional[List[str]] = None, **kwargs) -> None: + tags = tags or [] + tags.append("pytorch_model_hub_mixin") + kwargs["tags"] = tags + return super().__init_subclass__(*args, **kwargs) + + def _save_pretrained(self, save_directory: Path) -> None: + """Save weights from a Pytorch model to a local directory.""" + model_to_save = self.module if hasattr(self, "module") else self # type: ignore + save_model_as_safetensor(model_to_save, str(save_directory / SAFETENSORS_SINGLE_FILE)) + + @classmethod + def _from_pretrained( + cls, + *, + model_id: str, + revision: Optional[str], + cache_dir: Optional[Union[str, Path]], + force_download: bool, + proxies: Optional[Dict], + resume_download: bool, + local_files_only: bool, + token: Union[str, bool, None], + map_location: str = "cpu", + strict: bool = False, + **model_kwargs, + ): + """Load Pytorch pretrained weights and return the loaded model.""" + model = cls(**model_kwargs) + if os.path.isdir(model_id): + print("Loading weights from local directory") + model_file = os.path.join(model_id, SAFETENSORS_SINGLE_FILE) + return cls._load_as_safetensor(model, model_file, map_location, strict) + else: + try: + model_file = hf_hub_download( + repo_id=model_id, + filename=SAFETENSORS_SINGLE_FILE, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + return cls._load_as_safetensor(model, model_file, map_location, strict) + except EntryNotFoundError: + model_file = hf_hub_download( + repo_id=model_id, + filename=PYTORCH_WEIGHTS_NAME, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + return cls._load_as_pickle(model, model_file, map_location, strict) + + @classmethod + def _load_as_pickle(cls, model: T, model_file: str, map_location: str, strict: bool) -> T: + state_dict = torch.load(model_file, map_location=torch.device(map_location)) + model.load_state_dict(state_dict, strict=strict) # type: ignore + model.eval() # type: ignore + return model + + @classmethod + def _load_as_safetensor(cls, model: T, model_file: str, map_location: str, strict: bool) -> T: + load_model_as_safetensor(model, model_file, strict=strict) # type: ignore [arg-type] + if map_location != "cpu": + # TODO: remove this once https://github.com/huggingface/safetensors/pull/449 is merged. + logger.warning( + "Loading model weights on other devices than 'cpu' is not supported natively." + " This means that the model is loaded on 'cpu' first and then copied to the device." + " This leads to a slower loading time." + " Support for loading directly on other devices is planned to be added in future releases." + " See https://github.com/huggingface/huggingface_hub/pull/2086 for more details." + ) + model.to(map_location) # type: ignore [attr-defined] + return model + + +def _load_dataclass(datacls: Type["DataclassInstance"], data: dict) -> "DataclassInstance": + """Load a dataclass instance from a dictionary. + + Fields not expected by the dataclass are ignored. + """ + return datacls(**{k: v for k, v in data.items() if k in datacls.__dataclass_fields__}) diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/keras_mixin.py b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/keras_mixin.py new file mode 100644 index 0000000000000000000000000000000000000000..5584ce84a250b18d3620089e6f6240a37bb47baa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/keras_mixin.py @@ -0,0 +1,502 @@ +import collections.abc as collections +import json +import os +import warnings +from functools import wraps +from pathlib import Path +from shutil import copytree +from typing import Any, Dict, List, Optional, Union + +from huggingface_hub import ModelHubMixin, snapshot_download +from huggingface_hub.utils import ( + get_tf_version, + is_graphviz_available, + is_pydot_available, + is_tf_available, + yaml_dump, +) + +from .constants import CONFIG_NAME +from .hf_api import HfApi +from .utils import SoftTemporaryDirectory, logging, validate_hf_hub_args +from .utils._typing import CallableT + + +logger = logging.get_logger(__name__) + +keras = None +if is_tf_available(): + # Depending on which version of TensorFlow is installed, we need to import + # keras from the correct location. + # See https://github.com/tensorflow/tensorflow/releases/tag/v2.16.1. + # Note: saving a keras model only works with Keras<3.0. + try: + import tf_keras as keras # type: ignore + except ImportError: + import tensorflow as tf # type: ignore + + keras = tf.keras + + +def _requires_keras_2_model(fn: CallableT) -> CallableT: + # Wrapper to raise if user tries to save a Keras 3.x model + @wraps(fn) + def _inner(model, *args, **kwargs): + if not hasattr(model, "history"): # hacky way to check if model is Keras 2.x + raise NotImplementedError( + f"Cannot use '{fn.__name__}': Keras 3.x is not supported." + " Please save models manually and upload them using `upload_folder` or `huggingface-cli upload`." + ) + return fn(model, *args, **kwargs) + + return _inner # type: ignore [return-value] + + +def _flatten_dict(dictionary, parent_key=""): + """Flatten a nested dictionary. + Reference: https://stackoverflow.com/a/6027615/10319735 + + Args: + dictionary (`dict`): + The nested dictionary to be flattened. + parent_key (`str`): + The parent key to be prefixed to the children keys. + Necessary for recursing over the nested dictionary. + + Returns: + The flattened dictionary. + """ + items = [] + for key, value in dictionary.items(): + new_key = f"{parent_key}.{key}" if parent_key else key + if isinstance(value, collections.MutableMapping): + items.extend( + _flatten_dict( + value, + new_key, + ).items() + ) + else: + items.append((new_key, value)) + return dict(items) + + +def _create_hyperparameter_table(model): + """Parse hyperparameter dictionary into a markdown table.""" + table = None + if model.optimizer is not None: + optimizer_params = model.optimizer.get_config() + # flatten the configuration + optimizer_params = _flatten_dict(optimizer_params) + optimizer_params["training_precision"] = keras.mixed_precision.global_policy().name + table = "| Hyperparameters | Value |\n| :-- | :-- |\n" + for key, value in optimizer_params.items(): + table += f"| {key} | {value} |\n" + return table + + +def _plot_network(model, save_directory): + keras.utils.plot_model( + model, + to_file=f"{save_directory}/model.png", + show_shapes=False, + show_dtype=False, + show_layer_names=True, + rankdir="TB", + expand_nested=False, + dpi=96, + layer_range=None, + ) + + +def _create_model_card( + model, + repo_dir: Path, + plot_model: bool = True, + metadata: Optional[dict] = None, +): + """ + Creates a model card for the repository. + + Do not overwrite an existing README.md file. + """ + readme_path = repo_dir / "README.md" + if readme_path.exists(): + return + + hyperparameters = _create_hyperparameter_table(model) + if plot_model and is_graphviz_available() and is_pydot_available(): + _plot_network(model, repo_dir) + if metadata is None: + metadata = {} + metadata["library_name"] = "keras" + model_card: str = "---\n" + model_card += yaml_dump(metadata, default_flow_style=False) + model_card += "---\n" + model_card += "\n## Model description\n\nMore information needed\n" + model_card += "\n## Intended uses & limitations\n\nMore information needed\n" + model_card += "\n## Training and evaluation data\n\nMore information needed\n" + if hyperparameters is not None: + model_card += "\n## Training procedure\n" + model_card += "\n### Training hyperparameters\n" + model_card += "\nThe following hyperparameters were used during training:\n\n" + model_card += hyperparameters + model_card += "\n" + if plot_model and os.path.exists(f"{repo_dir}/model.png"): + model_card += "\n ## Model Plot\n" + model_card += "\n
" + model_card += "\nView Model Plot\n" + path_to_plot = "./model.png" + model_card += f"\n![Model Image]({path_to_plot})\n" + model_card += "\n
" + + readme_path.write_text(model_card) + + +@_requires_keras_2_model +def save_pretrained_keras( + model, + save_directory: Union[str, Path], + config: Optional[Dict[str, Any]] = None, + include_optimizer: bool = False, + plot_model: bool = True, + tags: Optional[Union[list, str]] = None, + **model_save_kwargs, +): + """ + Saves a Keras model to save_directory in SavedModel format. Use this if + you're using the Functional or Sequential APIs. + + Args: + model (`Keras.Model`): + The [Keras + model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) + you'd like to save. The model must be compiled and built. + save_directory (`str` or `Path`): + Specify directory in which you want to save the Keras model. + config (`dict`, *optional*): + Configuration object to be saved alongside the model weights. + include_optimizer(`bool`, *optional*, defaults to `False`): + Whether or not to include optimizer in serialization. + plot_model (`bool`, *optional*, defaults to `True`): + Setting this to `True` will plot the model and put it in the model + card. Requires graphviz and pydot to be installed. + tags (Union[`str`,`list`], *optional*): + List of tags that are related to model or string of a single tag. See example tags + [here](https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1). + model_save_kwargs(`dict`, *optional*): + model_save_kwargs will be passed to + [`tf.keras.models.save_model()`](https://www.tensorflow.org/api_docs/python/tf/keras/models/save_model). + """ + if keras is None: + raise ImportError("Called a Tensorflow-specific function but could not import it.") + + if not model.built: + raise ValueError("Model should be built before trying to save") + + save_directory = Path(save_directory) + save_directory.mkdir(parents=True, exist_ok=True) + + # saving config + if config: + if not isinstance(config, dict): + raise RuntimeError(f"Provided config to save_pretrained_keras should be a dict. Got: '{type(config)}'") + + with (save_directory / CONFIG_NAME).open("w") as f: + json.dump(config, f) + + metadata = {} + if isinstance(tags, list): + metadata["tags"] = tags + elif isinstance(tags, str): + metadata["tags"] = [tags] + + task_name = model_save_kwargs.pop("task_name", None) + if task_name is not None: + warnings.warn( + "`task_name` input argument is deprecated. Pass `tags` instead.", + FutureWarning, + ) + if "tags" in metadata: + metadata["tags"].append(task_name) + else: + metadata["tags"] = [task_name] + + if model.history is not None: + if model.history.history != {}: + path = save_directory / "history.json" + if path.exists(): + warnings.warn( + "`history.json` file already exists, it will be overwritten by the history of this version.", + UserWarning, + ) + with path.open("w", encoding="utf-8") as f: + json.dump(model.history.history, f, indent=2, sort_keys=True) + + _create_model_card(model, save_directory, plot_model, metadata) + keras.models.save_model(model, save_directory, include_optimizer=include_optimizer, **model_save_kwargs) + + +def from_pretrained_keras(*args, **kwargs) -> "KerasModelHubMixin": + r""" + Instantiate a pretrained Keras model from a pre-trained model from the Hub. + The model is expected to be in `SavedModel` format. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + Can be either: + - A string, the `model id` of a pretrained model hosted inside a + model repo on huggingface.co. Valid model ids can be located + at the root-level, like `bert-base-uncased`, or namespaced + under a user or organization name, like + `dbmdz/bert-base-german-cased`. + - You can add `revision` by appending `@` at the end of model_id + simply like this: `dbmdz/bert-base-german-cased@main` Revision + is the specific model version to use. It can be a branch name, + a tag name, or a commit id, since we use a git-based system + for storing models and other artifacts on huggingface.co, so + `revision` can be any identifier allowed by git. + - A path to a `directory` containing model weights saved using + [`~transformers.PreTrainedModel.save_pretrained`], e.g., + `./my_model_directory/`. + - `None` if you are both providing the configuration and state + dictionary (resp. with keyword arguments `config` and + `state_dict`). + force_download (`bool`, *optional*, defaults to `False`): + Whether to force the (re-)download of the model weights and + configuration files, overriding the cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether to delete incompletely received files. Will attempt to + resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., + `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The + proxies are used on each request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If + `True`, will use the token generated when running `transformers-cli + login` (stored in `~/.huggingface`). + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory in which a downloaded pretrained model + configuration should be cached if the standard cache should not be + used. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether to only look at local files (i.e., do not try to download + the model). + model_kwargs (`Dict`, *optional*): + model_kwargs will be passed to the model during initialization + + + + Passing `token=True` is required when you want to use a private + model. + + + """ + return KerasModelHubMixin.from_pretrained(*args, **kwargs) + + +@validate_hf_hub_args +@_requires_keras_2_model +def push_to_hub_keras( + model, + repo_id: str, + *, + config: Optional[dict] = None, + commit_message: str = "Push Keras model using huggingface_hub.", + private: bool = False, + api_endpoint: Optional[str] = None, + token: Optional[str] = None, + branch: Optional[str] = None, + create_pr: Optional[bool] = None, + allow_patterns: Optional[Union[List[str], str]] = None, + ignore_patterns: Optional[Union[List[str], str]] = None, + delete_patterns: Optional[Union[List[str], str]] = None, + log_dir: Optional[str] = None, + include_optimizer: bool = False, + tags: Optional[Union[list, str]] = None, + plot_model: bool = True, + **model_save_kwargs, +): + """ + Upload model checkpoint to the Hub. + + Use `allow_patterns` and `ignore_patterns` to precisely filter which files should be pushed to the hub. Use + `delete_patterns` to delete existing remote files in the same commit. See [`upload_folder`] reference for more + details. + + Args: + model (`Keras.Model`): + The [Keras model](`https://www.tensorflow.org/api_docs/python/tf/keras/Model`) you'd like to push to the + Hub. The model must be compiled and built. + repo_id (`str`): + ID of the repository to push to (example: `"username/my-model"`). + commit_message (`str`, *optional*, defaults to "Add Keras model"): + Message to commit while pushing. + private (`bool`, *optional*, defaults to `False`): + Whether the repository created should be private. + api_endpoint (`str`, *optional*): + The API endpoint to use when pushing the model to the hub. + token (`str`, *optional*): + The token to use as HTTP bearer authorization for remote files. If + not set, will use the token set when logging in with + `huggingface-cli login` (stored in `~/.huggingface`). + branch (`str`, *optional*): + The git branch on which to push the model. This defaults to + the default branch as specified in your repository, which + defaults to `"main"`. + create_pr (`boolean`, *optional*): + Whether or not to create a Pull Request from `branch` with that commit. + Defaults to `False`. + config (`dict`, *optional*): + Configuration object to be saved alongside the model weights. + allow_patterns (`List[str]` or `str`, *optional*): + If provided, only files matching at least one pattern are pushed. + ignore_patterns (`List[str]` or `str`, *optional*): + If provided, files matching any of the patterns are not pushed. + delete_patterns (`List[str]` or `str`, *optional*): + If provided, remote files matching any of the patterns will be deleted from the repo. + log_dir (`str`, *optional*): + TensorBoard logging directory to be pushed. The Hub automatically + hosts and displays a TensorBoard instance if log files are included + in the repository. + include_optimizer (`bool`, *optional*, defaults to `False`): + Whether or not to include optimizer during serialization. + tags (Union[`list`, `str`], *optional*): + List of tags that are related to model or string of a single tag. See example tags + [here](https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1). + plot_model (`bool`, *optional*, defaults to `True`): + Setting this to `True` will plot the model and put it in the model + card. Requires graphviz and pydot to be installed. + model_save_kwargs(`dict`, *optional*): + model_save_kwargs will be passed to + [`tf.keras.models.save_model()`](https://www.tensorflow.org/api_docs/python/tf/keras/models/save_model). + + Returns: + The url of the commit of your model in the given repository. + """ + api = HfApi(endpoint=api_endpoint) + repo_id = api.create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True).repo_id + + # Push the files to the repo in a single commit + with SoftTemporaryDirectory() as tmp: + saved_path = Path(tmp) / repo_id + save_pretrained_keras( + model, + saved_path, + config=config, + include_optimizer=include_optimizer, + tags=tags, + plot_model=plot_model, + **model_save_kwargs, + ) + + # If `log_dir` provided, delete remote logs and upload new ones + if log_dir is not None: + delete_patterns = ( + [] + if delete_patterns is None + else ( + [delete_patterns] # convert `delete_patterns` to a list + if isinstance(delete_patterns, str) + else delete_patterns + ) + ) + delete_patterns.append("logs/*") + copytree(log_dir, saved_path / "logs") + + return api.upload_folder( + repo_type="model", + repo_id=repo_id, + folder_path=saved_path, + commit_message=commit_message, + token=token, + revision=branch, + create_pr=create_pr, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + delete_patterns=delete_patterns, + ) + + +class KerasModelHubMixin(ModelHubMixin): + """ + Implementation of [`ModelHubMixin`] to provide model Hub upload/download + capabilities to Keras models. + + + ```python + >>> import tensorflow as tf + >>> from huggingface_hub import KerasModelHubMixin + + + >>> class MyModel(tf.keras.Model, KerasModelHubMixin): + ... def __init__(self, **kwargs): + ... super().__init__() + ... self.config = kwargs.pop("config", None) + ... self.dummy_inputs = ... + ... self.layer = ... + + ... def call(self, *args): + ... return ... + + + >>> # Initialize and compile the model as you normally would + >>> model = MyModel() + >>> model.compile(...) + >>> # Build the graph by training it or passing dummy inputs + >>> _ = model(model.dummy_inputs) + >>> # Save model weights to local directory + >>> model.save_pretrained("my-awesome-model") + >>> # Push model weights to the Hub + >>> model.push_to_hub("my-awesome-model") + >>> # Download and initialize weights from the Hub + >>> model = MyModel.from_pretrained("username/super-cool-model") + ``` + """ + + def _save_pretrained(self, save_directory): + save_pretrained_keras(self, save_directory) + + @classmethod + def _from_pretrained( + cls, + model_id, + revision, + cache_dir, + force_download, + proxies, + resume_download, + local_files_only, + token, + config: Optional[Dict[str, Any]] = None, + **model_kwargs, + ): + """Here we just call [`from_pretrained_keras`] function so both the mixin and + functional APIs stay in sync. + + TODO - Some args above aren't used since we are calling + snapshot_download instead of hf_hub_download. + """ + if keras is None: + raise ImportError("Called a TensorFlow-specific function but could not import it.") + + # Root is either a local filepath matching model_id or a cached snapshot + if not os.path.isdir(model_id): + storage_folder = snapshot_download( + repo_id=model_id, + revision=revision, + cache_dir=cache_dir, + library_name="keras", + library_version=get_tf_version(), + ) + else: + storage_folder = model_id + + # TODO: change this in a future PR. We are not returning a KerasModelHubMixin instance here... + model = keras.models.load_model(storage_folder) + + # For now, we add a new attribute, config, to store the config loaded from the hub/a local dir. + model.config = config + + return model diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/repocard_data.py b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/repocard_data.py new file mode 100644 index 0000000000000000000000000000000000000000..c85c450cdbad00f1e2850fab431b3b22862a8539 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/repocard_data.py @@ -0,0 +1,729 @@ +import copy +from collections import defaultdict +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +from huggingface_hub.utils import logging, yaml_dump + + +logger = logging.get_logger(__name__) + + +@dataclass +class EvalResult: + """ + Flattened representation of individual evaluation results found in model-index of Model Cards. + + For more information on the model-index spec, see https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1. + + Args: + task_type (`str`): + The task identifier. Example: "image-classification". + dataset_type (`str`): + The dataset identifier. Example: "common_voice". Use dataset id from https://hf.co/datasets. + dataset_name (`str`): + A pretty name for the dataset. Example: "Common Voice (French)". + metric_type (`str`): + The metric identifier. Example: "wer". Use metric id from https://hf.co/metrics. + metric_value (`Any`): + The metric value. Example: 0.9 or "20.0 ± 1.2". + task_name (`str`, *optional*): + A pretty name for the task. Example: "Speech Recognition". + dataset_config (`str`, *optional*): + The name of the dataset configuration used in `load_dataset()`. + Example: fr in `load_dataset("common_voice", "fr")`. See the `datasets` docs for more info: + https://hf.co/docs/datasets/package_reference/loading_methods#datasets.load_dataset.name + dataset_split (`str`, *optional*): + The split used in `load_dataset()`. Example: "test". + dataset_revision (`str`, *optional*): + The revision (AKA Git Sha) of the dataset used in `load_dataset()`. + Example: 5503434ddd753f426f4b38109466949a1217c2bb + dataset_args (`Dict[str, Any]`, *optional*): + The arguments passed during `Metric.compute()`. Example for `bleu`: `{"max_order": 4}` + metric_name (`str`, *optional*): + A pretty name for the metric. Example: "Test WER". + metric_config (`str`, *optional*): + The name of the metric configuration used in `load_metric()`. + Example: bleurt-large-512 in `load_metric("bleurt", "bleurt-large-512")`. + See the `datasets` docs for more info: https://huggingface.co/docs/datasets/v2.1.0/en/loading#load-configurations + metric_args (`Dict[str, Any]`, *optional*): + The arguments passed during `Metric.compute()`. Example for `bleu`: max_order: 4 + verified (`bool`, *optional*): + Indicates whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set. + verify_token (`str`, *optional*): + A JSON Web Token that is used to verify whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. + source_name (`str`, *optional*): + The name of the source of the evaluation result. Example: "Open LLM Leaderboard". + source_url (`str`, *optional*): + The URL of the source of the evaluation result. Example: "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard". + """ + + # Required + + # The task identifier + # Example: automatic-speech-recognition + task_type: str + + # The dataset identifier + # Example: common_voice. Use dataset id from https://hf.co/datasets + dataset_type: str + + # A pretty name for the dataset. + # Example: Common Voice (French) + dataset_name: str + + # The metric identifier + # Example: wer. Use metric id from https://hf.co/metrics + metric_type: str + + # Value of the metric. + # Example: 20.0 or "20.0 ± 1.2" + metric_value: Any + + # Optional + + # A pretty name for the task. + # Example: Speech Recognition + task_name: Optional[str] = None + + # The name of the dataset configuration used in `load_dataset()`. + # Example: fr in `load_dataset("common_voice", "fr")`. + # See the `datasets` docs for more info: + # https://huggingface.co/docs/datasets/package_reference/loading_methods#datasets.load_dataset.name + dataset_config: Optional[str] = None + + # The split used in `load_dataset()`. + # Example: test + dataset_split: Optional[str] = None + + # The revision (AKA Git Sha) of the dataset used in `load_dataset()`. + # Example: 5503434ddd753f426f4b38109466949a1217c2bb + dataset_revision: Optional[str] = None + + # The arguments passed during `Metric.compute()`. + # Example for `bleu`: max_order: 4 + dataset_args: Optional[Dict[str, Any]] = None + + # A pretty name for the metric. + # Example: Test WER + metric_name: Optional[str] = None + + # The name of the metric configuration used in `load_metric()`. + # Example: bleurt-large-512 in `load_metric("bleurt", "bleurt-large-512")`. + # See the `datasets` docs for more info: https://huggingface.co/docs/datasets/v2.1.0/en/loading#load-configurations + metric_config: Optional[str] = None + + # The arguments passed during `Metric.compute()`. + # Example for `bleu`: max_order: 4 + metric_args: Optional[Dict[str, Any]] = None + + # Indicates whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set. + verified: Optional[bool] = None + + # A JSON Web Token that is used to verify whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. + verify_token: Optional[str] = None + + # The name of the source of the evaluation result. + # Example: Open LLM Leaderboard + source_name: Optional[str] = None + + # The URL of the source of the evaluation result. + # Example: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard + source_url: Optional[str] = None + + @property + def unique_identifier(self) -> tuple: + """Returns a tuple that uniquely identifies this evaluation.""" + return ( + self.task_type, + self.dataset_type, + self.dataset_config, + self.dataset_split, + self.dataset_revision, + ) + + def is_equal_except_value(self, other: "EvalResult") -> bool: + """ + Return True if `self` and `other` describe exactly the same metric but with a + different value. + """ + for key, _ in self.__dict__.items(): + if key == "metric_value": + continue + # For metrics computed by Hugging Face's evaluation service, `verify_token` is derived from `metric_value`, + # so we exclude it here in the comparison. + if key != "verify_token" and getattr(self, key) != getattr(other, key): + return False + return True + + def __post_init__(self) -> None: + if self.source_name is not None and self.source_url is None: + raise ValueError("If `source_name` is provided, `source_url` must also be provided.") + + +@dataclass +class CardData: + """Structure containing metadata from a RepoCard. + + [`CardData`] is the parent class of [`ModelCardData`] and [`DatasetCardData`]. + + Metadata can be exported as a dictionary or YAML. Export can be customized to alter the representation of the data + (example: flatten evaluation results). `CardData` behaves as a dictionary (can get, pop, set values) but do not + inherit from `dict` to allow this export step. + """ + + def __init__(self, ignore_metadata_errors: bool = False, **kwargs): + self.__dict__.update(kwargs) + + def to_dict(self) -> Dict[str, Any]: + """Converts CardData to a dict. + + Returns: + `dict`: CardData represented as a dictionary ready to be dumped to a YAML + block for inclusion in a README.md file. + """ + + data_dict = copy.deepcopy(self.__dict__) + self._to_dict(data_dict) + return _remove_none(data_dict) + + def _to_dict(self, data_dict): + """Use this method in child classes to alter the dict representation of the data. Alter the dict in-place. + + Args: + data_dict (`dict`): The raw dict representation of the card data. + """ + pass + + def to_yaml(self, line_break=None) -> str: + """Dumps CardData to a YAML block for inclusion in a README.md file. + + Args: + line_break (str, *optional*): + The line break to use when dumping to yaml. + + Returns: + `str`: CardData represented as a YAML block. + """ + return yaml_dump(self.to_dict(), sort_keys=False, line_break=line_break).strip() + + def __repr__(self): + return repr(self.__dict__) + + def __str__(self): + return self.to_yaml() + + def get(self, key: str, default: Any = None) -> Any: + """Get value for a given metadata key.""" + return self.__dict__.get(key, default) + + def pop(self, key: str, default: Any = None) -> Any: + """Pop value for a given metadata key.""" + return self.__dict__.pop(key, default) + + def __getitem__(self, key: str) -> Any: + """Get value for a given metadata key.""" + return self.__dict__[key] + + def __setitem__(self, key: str, value: Any) -> None: + """Set value for a given metadata key.""" + self.__dict__[key] = value + + def __contains__(self, key: str) -> bool: + """Check if a given metadata key is set.""" + return key in self.__dict__ + + def __len__(self) -> int: + """Return the number of metadata keys set.""" + return len(self.__dict__) + + +class ModelCardData(CardData): + """Model Card Metadata that is used by Hugging Face Hub when included at the top of your README.md + + Args: + language (`Union[str, List[str]]`, *optional*): + Language of model's training data or metadata. It must be an ISO 639-1, 639-2 or + 639-3 code (two/three letters), or a special value like "code", "multilingual". Defaults to `None`. + license (`str`, *optional*): + License of this model. Example: apache-2.0 or any license from + https://huggingface.co/docs/hub/repositories-licenses. Defaults to None. + library_name (`str`, *optional*): + Name of library used by this model. Example: keras or any library from + https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/model-libraries.ts. + Defaults to None. + tags (`List[str]`, *optional*): + List of tags to add to your model that can be used when filtering on the Hugging + Face Hub. Defaults to None. + base_model (`str` or `List[str]`, *optional*): + The identifier of the base model from which the model derives. This is applicable for example if your model is a + fine-tune or adapter of an existing model. The value must be the ID of a model on the Hub (or a list of IDs + if your model derives from multiple models). Defaults to None. + datasets (`List[str]`, *optional*): + List of datasets that were used to train this model. Should be a dataset ID + found on https://hf.co/datasets. Defaults to None. + metrics (`List[str]`, *optional*): + List of metrics used to evaluate this model. Should be a metric name that can be found + at https://hf.co/metrics. Example: 'accuracy'. Defaults to None. + eval_results (`Union[List[EvalResult], EvalResult]`, *optional*): + List of `huggingface_hub.EvalResult` that define evaluation results of the model. If provided, + `model_name` is used to as a name on PapersWithCode's leaderboards. Defaults to `None`. + model_name (`str`, *optional*): + A name for this model. It is used along with + `eval_results` to construct the `model-index` within the card's metadata. The name + you supply here is what will be used on PapersWithCode's leaderboards. If None is provided + then the repo name is used as a default. Defaults to None. + ignore_metadata_errors (`str`): + If True, errors while parsing the metadata section will be ignored. Some information might be lost during + the process. Use it at your own risk. + kwargs (`dict`, *optional*): + Additional metadata that will be added to the model card. Defaults to None. + + Example: + ```python + >>> from huggingface_hub import ModelCardData + >>> card_data = ModelCardData( + ... language="en", + ... license="mit", + ... library_name="timm", + ... tags=['image-classification', 'resnet'], + ... ) + >>> card_data.to_dict() + {'language': 'en', 'license': 'mit', 'library_name': 'timm', 'tags': ['image-classification', 'resnet']} + + ``` + """ + + def __init__( + self, + *, + language: Optional[Union[str, List[str]]] = None, + license: Optional[str] = None, + library_name: Optional[str] = None, + tags: Optional[List[str]] = None, + base_model: Optional[Union[str, List[str]]] = None, + datasets: Optional[List[str]] = None, + metrics: Optional[List[str]] = None, + eval_results: Optional[List[EvalResult]] = None, + model_name: Optional[str] = None, + ignore_metadata_errors: bool = False, + **kwargs, + ): + self.language = language + self.license = license + self.library_name = library_name + self.tags = _to_unique_list(tags) + self.base_model = base_model + self.datasets = datasets + self.metrics = metrics + self.eval_results = eval_results + self.model_name = model_name + + model_index = kwargs.pop("model-index", None) + if model_index: + try: + model_name, eval_results = model_index_to_eval_results(model_index) + self.model_name = model_name + self.eval_results = eval_results + except (KeyError, TypeError) as error: + if ignore_metadata_errors: + logger.warning("Invalid model-index. Not loading eval results into CardData.") + else: + raise ValueError( + f"Invalid `model_index` in metadata cannot be parsed: {error.__class__} {error}. Pass" + " `ignore_metadata_errors=True` to ignore this error while loading a Model Card. Warning:" + " some information will be lost. Use it at your own risk." + ) + + super().__init__(**kwargs) + + if self.eval_results: + if type(self.eval_results) == EvalResult: + self.eval_results = [self.eval_results] + if self.model_name is None: + raise ValueError("Passing `eval_results` requires `model_name` to be set.") + + def _to_dict(self, data_dict): + """Format the internal data dict. In this case, we convert eval results to a valid model index""" + if self.eval_results is not None: + data_dict["model-index"] = eval_results_to_model_index(self.model_name, self.eval_results) + del data_dict["eval_results"], data_dict["model_name"] + + +class DatasetCardData(CardData): + """Dataset Card Metadata that is used by Hugging Face Hub when included at the top of your README.md + + Args: + language (`List[str]`, *optional*): + Language of dataset's data or metadata. It must be an ISO 639-1, 639-2 or + 639-3 code (two/three letters), or a special value like "code", "multilingual". + license (`Union[str, List[str]]`, *optional*): + License(s) of this dataset. Example: apache-2.0 or any license from + https://huggingface.co/docs/hub/repositories-licenses. + annotations_creators (`Union[str, List[str]]`, *optional*): + How the annotations for the dataset were created. + Options are: 'found', 'crowdsourced', 'expert-generated', 'machine-generated', 'no-annotation', 'other'. + language_creators (`Union[str, List[str]]`, *optional*): + How the text-based data in the dataset was created. + Options are: 'found', 'crowdsourced', 'expert-generated', 'machine-generated', 'other' + multilinguality (`Union[str, List[str]]`, *optional*): + Whether the dataset is multilingual. + Options are: 'monolingual', 'multilingual', 'translation', 'other'. + size_categories (`Union[str, List[str]]`, *optional*): + The number of examples in the dataset. Options are: 'n<1K', '1K1T', and 'other'. + source_datasets (`List[str]]`, *optional*): + Indicates whether the dataset is an original dataset or extended from another existing dataset. + Options are: 'original' and 'extended'. + task_categories (`Union[str, List[str]]`, *optional*): + What categories of task does the dataset support? + task_ids (`Union[str, List[str]]`, *optional*): + What specific tasks does the dataset support? + paperswithcode_id (`str`, *optional*): + ID of the dataset on PapersWithCode. + pretty_name (`str`, *optional*): + A more human-readable name for the dataset. (ex. "Cats vs. Dogs") + train_eval_index (`Dict`, *optional*): + A dictionary that describes the necessary spec for doing evaluation on the Hub. + If not provided, it will be gathered from the 'train-eval-index' key of the kwargs. + config_names (`Union[str, List[str]]`, *optional*): + A list of the available dataset configs for the dataset. + """ + + def __init__( + self, + *, + language: Optional[Union[str, List[str]]] = None, + license: Optional[Union[str, List[str]]] = None, + annotations_creators: Optional[Union[str, List[str]]] = None, + language_creators: Optional[Union[str, List[str]]] = None, + multilinguality: Optional[Union[str, List[str]]] = None, + size_categories: Optional[Union[str, List[str]]] = None, + source_datasets: Optional[List[str]] = None, + task_categories: Optional[Union[str, List[str]]] = None, + task_ids: Optional[Union[str, List[str]]] = None, + paperswithcode_id: Optional[str] = None, + pretty_name: Optional[str] = None, + train_eval_index: Optional[Dict] = None, + config_names: Optional[Union[str, List[str]]] = None, + ignore_metadata_errors: bool = False, + **kwargs, + ): + self.annotations_creators = annotations_creators + self.language_creators = language_creators + self.language = language + self.license = license + self.multilinguality = multilinguality + self.size_categories = size_categories + self.source_datasets = source_datasets + self.task_categories = task_categories + self.task_ids = task_ids + self.paperswithcode_id = paperswithcode_id + self.pretty_name = pretty_name + self.config_names = config_names + + # TODO - maybe handle this similarly to EvalResult? + self.train_eval_index = train_eval_index or kwargs.pop("train-eval-index", None) + super().__init__(**kwargs) + + def _to_dict(self, data_dict): + data_dict["train-eval-index"] = data_dict.pop("train_eval_index") + + +class SpaceCardData(CardData): + """Space Card Metadata that is used by Hugging Face Hub when included at the top of your README.md + + To get an exhaustive reference of Spaces configuration, please visit https://huggingface.co/docs/hub/spaces-config-reference#spaces-configuration-reference. + + Args: + title (`str`, *optional*) + Title of the Space. + sdk (`str`, *optional*) + SDK of the Space (one of `gradio`, `streamlit`, `docker`, or `static`). + sdk_version (`str`, *optional*) + Version of the used SDK (if Gradio/Streamlit sdk). + python_version (`str`, *optional*) + Python version used in the Space (if Gradio/Streamlit sdk). + app_file (`str`, *optional*) + Path to your main application file (which contains either gradio or streamlit Python code, or static html code). + Path is relative to the root of the repository. + app_port (`str`, *optional*) + Port on which your application is running. Used only if sdk is `docker`. + license (`str`, *optional*) + License of this model. Example: apache-2.0 or any license from + https://huggingface.co/docs/hub/repositories-licenses. + duplicated_from (`str`, *optional*) + ID of the original Space if this is a duplicated Space. + models (List[`str`], *optional*) + List of models related to this Space. Should be a dataset ID found on https://hf.co/models. + datasets (`List[str]`, *optional*) + List of datasets related to this Space. Should be a dataset ID found on https://hf.co/datasets. + tags (`List[str]`, *optional*) + List of tags to add to your Space that can be used when filtering on the Hub. + ignore_metadata_errors (`str`): + If True, errors while parsing the metadata section will be ignored. Some information might be lost during + the process. Use it at your own risk. + kwargs (`dict`, *optional*): + Additional metadata that will be added to the space card. + + Example: + ```python + >>> from huggingface_hub import SpaceCardData + >>> card_data = SpaceCardData( + ... title="Dreambooth Training", + ... license="mit", + ... sdk="gradio", + ... duplicated_from="multimodalart/dreambooth-training" + ... ) + >>> card_data.to_dict() + {'title': 'Dreambooth Training', 'sdk': 'gradio', 'license': 'mit', 'duplicated_from': 'multimodalart/dreambooth-training'} + ``` + """ + + def __init__( + self, + *, + title: Optional[str] = None, + sdk: Optional[str] = None, + sdk_version: Optional[str] = None, + python_version: Optional[str] = None, + app_file: Optional[str] = None, + app_port: Optional[int] = None, + license: Optional[str] = None, + duplicated_from: Optional[str] = None, + models: Optional[List[str]] = None, + datasets: Optional[List[str]] = None, + tags: Optional[List[str]] = None, + ignore_metadata_errors: bool = False, + **kwargs, + ): + self.title = title + self.sdk = sdk + self.sdk_version = sdk_version + self.python_version = python_version + self.app_file = app_file + self.app_port = app_port + self.license = license + self.duplicated_from = duplicated_from + self.models = models + self.datasets = datasets + self.tags = _to_unique_list(tags) + super().__init__(**kwargs) + + +def model_index_to_eval_results(model_index: List[Dict[str, Any]]) -> Tuple[str, List[EvalResult]]: + """Takes in a model index and returns the model name and a list of `huggingface_hub.EvalResult` objects. + + A detailed spec of the model index can be found here: + https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 + + Args: + model_index (`List[Dict[str, Any]]`): + A model index data structure, likely coming from a README.md file on the + Hugging Face Hub. + + Returns: + model_name (`str`): + The name of the model as found in the model index. This is used as the + identifier for the model on leaderboards like PapersWithCode. + eval_results (`List[EvalResult]`): + A list of `huggingface_hub.EvalResult` objects containing the metrics + reported in the provided model_index. + + Example: + ```python + >>> from huggingface_hub.repocard_data import model_index_to_eval_results + >>> # Define a minimal model index + >>> model_index = [ + ... { + ... "name": "my-cool-model", + ... "results": [ + ... { + ... "task": { + ... "type": "image-classification" + ... }, + ... "dataset": { + ... "type": "beans", + ... "name": "Beans" + ... }, + ... "metrics": [ + ... { + ... "type": "accuracy", + ... "value": 0.9 + ... } + ... ] + ... } + ... ] + ... } + ... ] + >>> model_name, eval_results = model_index_to_eval_results(model_index) + >>> model_name + 'my-cool-model' + >>> eval_results[0].task_type + 'image-classification' + >>> eval_results[0].metric_type + 'accuracy' + + ``` + """ + + eval_results = [] + for elem in model_index: + name = elem["name"] + results = elem["results"] + for result in results: + task_type = result["task"]["type"] + task_name = result["task"].get("name") + dataset_type = result["dataset"]["type"] + dataset_name = result["dataset"]["name"] + dataset_config = result["dataset"].get("config") + dataset_split = result["dataset"].get("split") + dataset_revision = result["dataset"].get("revision") + dataset_args = result["dataset"].get("args") + source_name = result.get("source", {}).get("name") + source_url = result.get("source", {}).get("url") + + for metric in result["metrics"]: + metric_type = metric["type"] + metric_value = metric["value"] + metric_name = metric.get("name") + metric_args = metric.get("args") + metric_config = metric.get("config") + verified = metric.get("verified") + verify_token = metric.get("verifyToken") + + eval_result = EvalResult( + task_type=task_type, # Required + dataset_type=dataset_type, # Required + dataset_name=dataset_name, # Required + metric_type=metric_type, # Required + metric_value=metric_value, # Required + task_name=task_name, + dataset_config=dataset_config, + dataset_split=dataset_split, + dataset_revision=dataset_revision, + dataset_args=dataset_args, + metric_name=metric_name, + metric_args=metric_args, + metric_config=metric_config, + verified=verified, + verify_token=verify_token, + source_name=source_name, + source_url=source_url, + ) + eval_results.append(eval_result) + return name, eval_results + + +def _remove_none(obj): + """ + Recursively remove `None` values from a dict. Borrowed from: https://stackoverflow.com/a/20558778 + """ + if isinstance(obj, (list, tuple, set)): + return type(obj)(_remove_none(x) for x in obj if x is not None) + elif isinstance(obj, dict): + return type(obj)((_remove_none(k), _remove_none(v)) for k, v in obj.items() if k is not None and v is not None) + else: + return obj + + +def eval_results_to_model_index(model_name: str, eval_results: List[EvalResult]) -> List[Dict[str, Any]]: + """Takes in given model name and list of `huggingface_hub.EvalResult` and returns a + valid model-index that will be compatible with the format expected by the + Hugging Face Hub. + + Args: + model_name (`str`): + Name of the model (ex. "my-cool-model"). This is used as the identifier + for the model on leaderboards like PapersWithCode. + eval_results (`List[EvalResult]`): + List of `huggingface_hub.EvalResult` objects containing the metrics to be + reported in the model-index. + + Returns: + model_index (`List[Dict[str, Any]]`): The eval_results converted to a model-index. + + Example: + ```python + >>> from huggingface_hub.repocard_data import eval_results_to_model_index, EvalResult + >>> # Define minimal eval_results + >>> eval_results = [ + ... EvalResult( + ... task_type="image-classification", # Required + ... dataset_type="beans", # Required + ... dataset_name="Beans", # Required + ... metric_type="accuracy", # Required + ... metric_value=0.9, # Required + ... ) + ... ] + >>> eval_results_to_model_index("my-cool-model", eval_results) + [{'name': 'my-cool-model', 'results': [{'task': {'type': 'image-classification'}, 'dataset': {'name': 'Beans', 'type': 'beans'}, 'metrics': [{'type': 'accuracy', 'value': 0.9}]}]}] + + ``` + """ + + # Metrics are reported on a unique task-and-dataset basis. + # Here, we make a map of those pairs and the associated EvalResults. + task_and_ds_types_map: Dict[Any, List[EvalResult]] = defaultdict(list) + for eval_result in eval_results: + task_and_ds_types_map[eval_result.unique_identifier].append(eval_result) + + # Use the map from above to generate the model index data. + model_index_data = [] + for results in task_and_ds_types_map.values(): + # All items from `results` share same metadata + sample_result = results[0] + data = { + "task": { + "type": sample_result.task_type, + "name": sample_result.task_name, + }, + "dataset": { + "name": sample_result.dataset_name, + "type": sample_result.dataset_type, + "config": sample_result.dataset_config, + "split": sample_result.dataset_split, + "revision": sample_result.dataset_revision, + "args": sample_result.dataset_args, + }, + "metrics": [ + { + "type": result.metric_type, + "value": result.metric_value, + "name": result.metric_name, + "config": result.metric_config, + "args": result.metric_args, + "verified": result.verified, + "verifyToken": result.verify_token, + } + for result in results + ], + } + if sample_result.source_url is not None: + source = { + "url": sample_result.source_url, + } + if sample_result.source_name is not None: + source["name"] = sample_result.source_name + data["source"] = source + model_index_data.append(data) + + # TODO - Check if there cases where this list is longer than one? + # Finally, the model index itself is list of dicts. + model_index = [ + { + "name": model_name, + "results": model_index_data, + } + ] + return _remove_none(model_index) + + +def _to_unique_list(tags: Optional[List[str]]) -> Optional[List[str]]: + if tags is None: + return tags + unique_tags = [] # make tags unique + keep order explicitly + for tag in tags: + if tag not in unique_tags: + unique_tags.append(tag) + return unique_tags diff --git a/env-llmeval/lib/python3.10/site-packages/more_itertools/__init__.py b/env-llmeval/lib/python3.10/site-packages/more_itertools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aff94a9abd02da42c8c012ea973ac8e812a52284 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/more_itertools/__init__.py @@ -0,0 +1,6 @@ +"""More routines for operating on iterables, beyond itertools""" + +from .more import * # noqa +from .recipes import * # noqa + +__version__ = '10.2.0' diff --git a/env-llmeval/lib/python3.10/site-packages/more_itertools/__init__.pyi b/env-llmeval/lib/python3.10/site-packages/more_itertools/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..96f6e36c7f4ac9ea0aebdcd9e11b8d1ff092d2ef --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/more_itertools/__init__.pyi @@ -0,0 +1,2 @@ +from .more import * +from .recipes import * diff --git a/env-llmeval/lib/python3.10/site-packages/more_itertools/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/more_itertools/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d406895130f2510691b7112427b5df5e22649fd8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/more_itertools/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/more_itertools/__pycache__/more.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/more_itertools/__pycache__/more.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36ab7ffa196836c8b010ab290569623ab28784b6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/more_itertools/__pycache__/more.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/more_itertools/__pycache__/recipes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/more_itertools/__pycache__/recipes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..016d7c39f5278fc72cdca50f92207570a3e97889 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/more_itertools/__pycache__/recipes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/more_itertools/more.py b/env-llmeval/lib/python3.10/site-packages/more_itertools/more.py new file mode 100644 index 0000000000000000000000000000000000000000..dd711a47634861f8106eda506bd6d2e521bcbb8b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/more_itertools/more.py @@ -0,0 +1,4656 @@ +import warnings + +from collections import Counter, defaultdict, deque, abc +from collections.abc import Sequence +from functools import cached_property, partial, reduce, wraps +from heapq import heapify, heapreplace, heappop +from itertools import ( + chain, + compress, + count, + cycle, + dropwhile, + groupby, + islice, + repeat, + starmap, + takewhile, + tee, + zip_longest, + product, +) +from math import exp, factorial, floor, log, perm, comb +from queue import Empty, Queue +from random import random, randrange, uniform +from operator import itemgetter, mul, sub, gt, lt, ge, le +from sys import hexversion, maxsize +from time import monotonic + +from .recipes import ( + _marker, + _zip_equal, + UnequalIterablesError, + consume, + flatten, + pairwise, + powerset, + take, + unique_everseen, + all_equal, + batched, +) + +__all__ = [ + 'AbortThread', + 'SequenceView', + 'UnequalIterablesError', + 'adjacent', + 'all_unique', + 'always_iterable', + 'always_reversible', + 'bucket', + 'callback_iter', + 'chunked', + 'chunked_even', + 'circular_shifts', + 'collapse', + 'combination_index', + 'combination_with_replacement_index', + 'consecutive_groups', + 'constrained_batches', + 'consumer', + 'count_cycle', + 'countable', + 'difference', + 'distinct_combinations', + 'distinct_permutations', + 'distribute', + 'divide', + 'duplicates_everseen', + 'duplicates_justseen', + 'classify_unique', + 'exactly_n', + 'filter_except', + 'filter_map', + 'first', + 'gray_product', + 'groupby_transform', + 'ichunked', + 'iequals', + 'ilen', + 'interleave', + 'interleave_evenly', + 'interleave_longest', + 'intersperse', + 'is_sorted', + 'islice_extended', + 'iterate', + 'iter_suppress', + 'last', + 'locate', + 'longest_common_prefix', + 'lstrip', + 'make_decorator', + 'map_except', + 'map_if', + 'map_reduce', + 'mark_ends', + 'minmax', + 'nth_or_last', + 'nth_permutation', + 'nth_product', + 'nth_combination_with_replacement', + 'numeric_range', + 'one', + 'only', + 'outer_product', + 'padded', + 'partial_product', + 'partitions', + 'peekable', + 'permutation_index', + 'product_index', + 'raise_', + 'repeat_each', + 'repeat_last', + 'replace', + 'rlocate', + 'rstrip', + 'run_length', + 'sample', + 'seekable', + 'set_partitions', + 'side_effect', + 'sliced', + 'sort_together', + 'split_after', + 'split_at', + 'split_before', + 'split_into', + 'split_when', + 'spy', + 'stagger', + 'strip', + 'strictly_n', + 'substrings', + 'substrings_indexes', + 'takewhile_inclusive', + 'time_limited', + 'unique_in_window', + 'unique_to_each', + 'unzip', + 'value_chain', + 'windowed', + 'windowed_complete', + 'with_iter', + 'zip_broadcast', + 'zip_equal', + 'zip_offset', +] + + +def chunked(iterable, n, strict=False): + """Break *iterable* into lists of length *n*: + + >>> list(chunked([1, 2, 3, 4, 5, 6], 3)) + [[1, 2, 3], [4, 5, 6]] + + By the default, the last yielded list will have fewer than *n* elements + if the length of *iterable* is not divisible by *n*: + + >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3)) + [[1, 2, 3], [4, 5, 6], [7, 8]] + + To use a fill-in value instead, see the :func:`grouper` recipe. + + If the length of *iterable* is not divisible by *n* and *strict* is + ``True``, then ``ValueError`` will be raised before the last + list is yielded. + + """ + iterator = iter(partial(take, n, iter(iterable)), []) + if strict: + if n is None: + raise ValueError('n must not be None when using strict mode.') + + def ret(): + for chunk in iterator: + if len(chunk) != n: + raise ValueError('iterable is not divisible by n.') + yield chunk + + return iter(ret()) + else: + return iterator + + +def first(iterable, default=_marker): + """Return the first item of *iterable*, or *default* if *iterable* is + empty. + + >>> first([0, 1, 2, 3]) + 0 + >>> first([], 'some default') + 'some default' + + If *default* is not provided and there are no items in the iterable, + raise ``ValueError``. + + :func:`first` is useful when you have a generator of expensive-to-retrieve + values and want any arbitrary one. It is marginally shorter than + ``next(iter(iterable), default)``. + + """ + for item in iterable: + return item + if default is _marker: + raise ValueError( + 'first() was called on an empty iterable, and no ' + 'default value was provided.' + ) + return default + + +def last(iterable, default=_marker): + """Return the last item of *iterable*, or *default* if *iterable* is + empty. + + >>> last([0, 1, 2, 3]) + 3 + >>> last([], 'some default') + 'some default' + + If *default* is not provided and there are no items in the iterable, + raise ``ValueError``. + """ + try: + if isinstance(iterable, Sequence): + return iterable[-1] + # Work around https://bugs.python.org/issue38525 + elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0): + return next(reversed(iterable)) + else: + return deque(iterable, maxlen=1)[-1] + except (IndexError, TypeError, StopIteration): + if default is _marker: + raise ValueError( + 'last() was called on an empty iterable, and no default was ' + 'provided.' + ) + return default + + +def nth_or_last(iterable, n, default=_marker): + """Return the nth or the last item of *iterable*, + or *default* if *iterable* is empty. + + >>> nth_or_last([0, 1, 2, 3], 2) + 2 + >>> nth_or_last([0, 1], 2) + 1 + >>> nth_or_last([], 0, 'some default') + 'some default' + + If *default* is not provided and there are no items in the iterable, + raise ``ValueError``. + """ + return last(islice(iterable, n + 1), default=default) + + +class peekable: + """Wrap an iterator to allow lookahead and prepending elements. + + Call :meth:`peek` on the result to get the value that will be returned + by :func:`next`. This won't advance the iterator: + + >>> p = peekable(['a', 'b']) + >>> p.peek() + 'a' + >>> next(p) + 'a' + + Pass :meth:`peek` a default value to return that instead of raising + ``StopIteration`` when the iterator is exhausted. + + >>> p = peekable([]) + >>> p.peek('hi') + 'hi' + + peekables also offer a :meth:`prepend` method, which "inserts" items + at the head of the iterable: + + >>> p = peekable([1, 2, 3]) + >>> p.prepend(10, 11, 12) + >>> next(p) + 10 + >>> p.peek() + 11 + >>> list(p) + [11, 12, 1, 2, 3] + + peekables can be indexed. Index 0 is the item that will be returned by + :func:`next`, index 1 is the item after that, and so on: + The values up to the given index will be cached. + + >>> p = peekable(['a', 'b', 'c', 'd']) + >>> p[0] + 'a' + >>> p[1] + 'b' + >>> next(p) + 'a' + + Negative indexes are supported, but be aware that they will cache the + remaining items in the source iterator, which may require significant + storage. + + To check whether a peekable is exhausted, check its truth value: + + >>> p = peekable(['a', 'b']) + >>> if p: # peekable has items + ... list(p) + ['a', 'b'] + >>> if not p: # peekable is exhausted + ... list(p) + [] + + """ + + def __init__(self, iterable): + self._it = iter(iterable) + self._cache = deque() + + def __iter__(self): + return self + + def __bool__(self): + try: + self.peek() + except StopIteration: + return False + return True + + def peek(self, default=_marker): + """Return the item that will be next returned from ``next()``. + + Return ``default`` if there are no items left. If ``default`` is not + provided, raise ``StopIteration``. + + """ + if not self._cache: + try: + self._cache.append(next(self._it)) + except StopIteration: + if default is _marker: + raise + return default + return self._cache[0] + + def prepend(self, *items): + """Stack up items to be the next ones returned from ``next()`` or + ``self.peek()``. The items will be returned in + first in, first out order:: + + >>> p = peekable([1, 2, 3]) + >>> p.prepend(10, 11, 12) + >>> next(p) + 10 + >>> list(p) + [11, 12, 1, 2, 3] + + It is possible, by prepending items, to "resurrect" a peekable that + previously raised ``StopIteration``. + + >>> p = peekable([]) + >>> next(p) + Traceback (most recent call last): + ... + StopIteration + >>> p.prepend(1) + >>> next(p) + 1 + >>> next(p) + Traceback (most recent call last): + ... + StopIteration + + """ + self._cache.extendleft(reversed(items)) + + def __next__(self): + if self._cache: + return self._cache.popleft() + + return next(self._it) + + def _get_slice(self, index): + # Normalize the slice's arguments + step = 1 if (index.step is None) else index.step + if step > 0: + start = 0 if (index.start is None) else index.start + stop = maxsize if (index.stop is None) else index.stop + elif step < 0: + start = -1 if (index.start is None) else index.start + stop = (-maxsize - 1) if (index.stop is None) else index.stop + else: + raise ValueError('slice step cannot be zero') + + # If either the start or stop index is negative, we'll need to cache + # the rest of the iterable in order to slice from the right side. + if (start < 0) or (stop < 0): + self._cache.extend(self._it) + # Otherwise we'll need to find the rightmost index and cache to that + # point. + else: + n = min(max(start, stop) + 1, maxsize) + cache_len = len(self._cache) + if n >= cache_len: + self._cache.extend(islice(self._it, n - cache_len)) + + return list(self._cache)[index] + + def __getitem__(self, index): + if isinstance(index, slice): + return self._get_slice(index) + + cache_len = len(self._cache) + if index < 0: + self._cache.extend(self._it) + elif index >= cache_len: + self._cache.extend(islice(self._it, index + 1 - cache_len)) + + return self._cache[index] + + +def consumer(func): + """Decorator that automatically advances a PEP-342-style "reverse iterator" + to its first yield point so you don't have to call ``next()`` on it + manually. + + >>> @consumer + ... def tally(): + ... i = 0 + ... while True: + ... print('Thing number %s is %s.' % (i, (yield))) + ... i += 1 + ... + >>> t = tally() + >>> t.send('red') + Thing number 0 is red. + >>> t.send('fish') + Thing number 1 is fish. + + Without the decorator, you would have to call ``next(t)`` before + ``t.send()`` could be used. + + """ + + @wraps(func) + def wrapper(*args, **kwargs): + gen = func(*args, **kwargs) + next(gen) + return gen + + return wrapper + + +def ilen(iterable): + """Return the number of items in *iterable*. + + >>> ilen(x for x in range(1000000) if x % 3 == 0) + 333334 + + This consumes the iterable, so handle with care. + + """ + # This approach was selected because benchmarks showed it's likely the + # fastest of the known implementations at the time of writing. + # See GitHub tracker: #236, #230. + counter = count() + deque(zip(iterable, counter), maxlen=0) + return next(counter) + + +def iterate(func, start): + """Return ``start``, ``func(start)``, ``func(func(start))``, ... + + >>> from itertools import islice + >>> list(islice(iterate(lambda x: 2*x, 1), 10)) + [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + """ + while True: + yield start + try: + start = func(start) + except StopIteration: + break + + +def with_iter(context_manager): + """Wrap an iterable in a ``with`` statement, so it closes once exhausted. + + For example, this will close the file when the iterator is exhausted:: + + upper_lines = (line.upper() for line in with_iter(open('foo'))) + + Any context manager which returns an iterable is a candidate for + ``with_iter``. + + """ + with context_manager as iterable: + yield from iterable + + +def one(iterable, too_short=None, too_long=None): + """Return the first item from *iterable*, which is expected to contain only + that item. Raise an exception if *iterable* is empty or has more than one + item. + + :func:`one` is useful for ensuring that an iterable contains only one item. + For example, it can be used to retrieve the result of a database query + that is expected to return a single row. + + If *iterable* is empty, ``ValueError`` will be raised. You may specify a + different exception with the *too_short* keyword: + + >>> it = [] + >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + ValueError: too many items in iterable (expected 1)' + >>> too_short = IndexError('too few items') + >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + IndexError: too few items + + Similarly, if *iterable* contains more than one item, ``ValueError`` will + be raised. You may specify a different exception with the *too_long* + keyword: + + >>> it = ['too', 'many'] + >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + ValueError: Expected exactly one item in iterable, but got 'too', + 'many', and perhaps more. + >>> too_long = RuntimeError + >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + RuntimeError + + Note that :func:`one` attempts to advance *iterable* twice to ensure there + is only one item. See :func:`spy` or :func:`peekable` to check iterable + contents less destructively. + + """ + it = iter(iterable) + + try: + first_value = next(it) + except StopIteration as e: + raise ( + too_short or ValueError('too few items in iterable (expected 1)') + ) from e + + try: + second_value = next(it) + except StopIteration: + pass + else: + msg = ( + 'Expected exactly one item in iterable, but got {!r}, {!r}, ' + 'and perhaps more.'.format(first_value, second_value) + ) + raise too_long or ValueError(msg) + + return first_value + + +def raise_(exception, *args): + raise exception(*args) + + +def strictly_n(iterable, n, too_short=None, too_long=None): + """Validate that *iterable* has exactly *n* items and return them if + it does. If it has fewer than *n* items, call function *too_short* + with those items. If it has more than *n* items, call function + *too_long* with the first ``n + 1`` items. + + >>> iterable = ['a', 'b', 'c', 'd'] + >>> n = 4 + >>> list(strictly_n(iterable, n)) + ['a', 'b', 'c', 'd'] + + Note that the returned iterable must be consumed in order for the check to + be made. + + By default, *too_short* and *too_long* are functions that raise + ``ValueError``. + + >>> list(strictly_n('ab', 3)) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + ValueError: too few items in iterable (got 2) + + >>> list(strictly_n('abc', 2)) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + ValueError: too many items in iterable (got at least 3) + + You can instead supply functions that do something else. + *too_short* will be called with the number of items in *iterable*. + *too_long* will be called with `n + 1`. + + >>> def too_short(item_count): + ... raise RuntimeError + >>> it = strictly_n('abcd', 6, too_short=too_short) + >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + RuntimeError + + >>> def too_long(item_count): + ... print('The boss is going to hear about this') + >>> it = strictly_n('abcdef', 4, too_long=too_long) + >>> list(it) + The boss is going to hear about this + ['a', 'b', 'c', 'd'] + + """ + if too_short is None: + too_short = lambda item_count: raise_( + ValueError, + 'Too few items in iterable (got {})'.format(item_count), + ) + + if too_long is None: + too_long = lambda item_count: raise_( + ValueError, + 'Too many items in iterable (got at least {})'.format(item_count), + ) + + it = iter(iterable) + for i in range(n): + try: + item = next(it) + except StopIteration: + too_short(i) + return + else: + yield item + + try: + next(it) + except StopIteration: + pass + else: + too_long(n + 1) + + +def distinct_permutations(iterable, r=None): + """Yield successive distinct permutations of the elements in *iterable*. + + >>> sorted(distinct_permutations([1, 0, 1])) + [(0, 1, 1), (1, 0, 1), (1, 1, 0)] + + Equivalent to ``set(permutations(iterable))``, except duplicates are not + generated and thrown away. For larger input sequences this is much more + efficient. + + Duplicate permutations arise when there are duplicated elements in the + input iterable. The number of items returned is + `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of + items input, and each `x_i` is the count of a distinct item in the input + sequence. + + If *r* is given, only the *r*-length permutations are yielded. + + >>> sorted(distinct_permutations([1, 0, 1], r=2)) + [(0, 1), (1, 0), (1, 1)] + >>> sorted(distinct_permutations(range(3), r=2)) + [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + + """ + + # Algorithm: https://w.wiki/Qai + def _full(A): + while True: + # Yield the permutation we have + yield tuple(A) + + # Find the largest index i such that A[i] < A[i + 1] + for i in range(size - 2, -1, -1): + if A[i] < A[i + 1]: + break + # If no such index exists, this permutation is the last one + else: + return + + # Find the largest index j greater than j such that A[i] < A[j] + for j in range(size - 1, i, -1): + if A[i] < A[j]: + break + + # Swap the value of A[i] with that of A[j], then reverse the + # sequence from A[i + 1] to form the new permutation + A[i], A[j] = A[j], A[i] + A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1] + + # Algorithm: modified from the above + def _partial(A, r): + # Split A into the first r items and the last r items + head, tail = A[:r], A[r:] + right_head_indexes = range(r - 1, -1, -1) + left_tail_indexes = range(len(tail)) + + while True: + # Yield the permutation we have + yield tuple(head) + + # Starting from the right, find the first index of the head with + # value smaller than the maximum value of the tail - call it i. + pivot = tail[-1] + for i in right_head_indexes: + if head[i] < pivot: + break + pivot = head[i] + else: + return + + # Starting from the left, find the first value of the tail + # with a value greater than head[i] and swap. + for j in left_tail_indexes: + if tail[j] > head[i]: + head[i], tail[j] = tail[j], head[i] + break + # If we didn't find one, start from the right and find the first + # index of the head with a value greater than head[i] and swap. + else: + for j in right_head_indexes: + if head[j] > head[i]: + head[i], head[j] = head[j], head[i] + break + + # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)] + tail += head[: i - r : -1] # head[i + 1:][::-1] + i += 1 + head[i:], tail[:] = tail[: r - i], tail[r - i :] + + items = sorted(iterable) + + size = len(items) + if r is None: + r = size + + if 0 < r <= size: + return _full(items) if (r == size) else _partial(items, r) + + return iter(() if r else ((),)) + + +def intersperse(e, iterable, n=1): + """Intersperse filler element *e* among the items in *iterable*, leaving + *n* items between each filler element. + + >>> list(intersperse('!', [1, 2, 3, 4, 5])) + [1, '!', 2, '!', 3, '!', 4, '!', 5] + + >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2)) + [1, 2, None, 3, 4, None, 5] + + """ + if n == 0: + raise ValueError('n must be > 0') + elif n == 1: + # interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2... + # islice(..., 1, None) -> x_0, e, x_1, e, x_2... + return islice(interleave(repeat(e), iterable), 1, None) + else: + # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]... + # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]... + # flatten(...) -> x_0, x_1, e, x_2, x_3... + filler = repeat([e]) + chunks = chunked(iterable, n) + return flatten(islice(interleave(filler, chunks), 1, None)) + + +def unique_to_each(*iterables): + """Return the elements from each of the input iterables that aren't in the + other input iterables. + + For example, suppose you have a set of packages, each with a set of + dependencies:: + + {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}} + + If you remove one package, which dependencies can also be removed? + + If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not + associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for + ``pkg_2``, and ``D`` is only needed for ``pkg_3``:: + + >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'}) + [['A'], ['C'], ['D']] + + If there are duplicates in one input iterable that aren't in the others + they will be duplicated in the output. Input order is preserved:: + + >>> unique_to_each("mississippi", "missouri") + [['p', 'p'], ['o', 'u', 'r']] + + It is assumed that the elements of each iterable are hashable. + + """ + pool = [list(it) for it in iterables] + counts = Counter(chain.from_iterable(map(set, pool))) + uniques = {element for element in counts if counts[element] == 1} + return [list(filter(uniques.__contains__, it)) for it in pool] + + +def windowed(seq, n, fillvalue=None, step=1): + """Return a sliding window of width *n* over the given iterable. + + >>> all_windows = windowed([1, 2, 3, 4, 5], 3) + >>> list(all_windows) + [(1, 2, 3), (2, 3, 4), (3, 4, 5)] + + When the window is larger than the iterable, *fillvalue* is used in place + of missing values: + + >>> list(windowed([1, 2, 3], 4)) + [(1, 2, 3, None)] + + Each window will advance in increments of *step*: + + >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2)) + [(1, 2, 3), (3, 4, 5), (5, 6, '!')] + + To slide into the iterable's items, use :func:`chain` to add filler items + to the left: + + >>> iterable = [1, 2, 3, 4] + >>> n = 3 + >>> padding = [None] * (n - 1) + >>> list(windowed(chain(padding, iterable), 3)) + [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)] + """ + if n < 0: + raise ValueError('n must be >= 0') + if n == 0: + yield tuple() + return + if step < 1: + raise ValueError('step must be >= 1') + + window = deque(maxlen=n) + i = n + for _ in map(window.append, seq): + i -= 1 + if not i: + i = step + yield tuple(window) + + size = len(window) + if size == 0: + return + elif size < n: + yield tuple(chain(window, repeat(fillvalue, n - size))) + elif 0 < i < min(step, n): + window += (fillvalue,) * i + yield tuple(window) + + +def substrings(iterable): + """Yield all of the substrings of *iterable*. + + >>> [''.join(s) for s in substrings('more')] + ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more'] + + Note that non-string iterables can also be subdivided. + + >>> list(substrings([0, 1, 2])) + [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)] + + """ + # The length-1 substrings + seq = [] + for item in iter(iterable): + seq.append(item) + yield (item,) + seq = tuple(seq) + item_count = len(seq) + + # And the rest + for n in range(2, item_count + 1): + for i in range(item_count - n + 1): + yield seq[i : i + n] + + +def substrings_indexes(seq, reverse=False): + """Yield all substrings and their positions in *seq* + + The items yielded will be a tuple of the form ``(substr, i, j)``, where + ``substr == seq[i:j]``. + + This function only works for iterables that support slicing, such as + ``str`` objects. + + >>> for item in substrings_indexes('more'): + ... print(item) + ('m', 0, 1) + ('o', 1, 2) + ('r', 2, 3) + ('e', 3, 4) + ('mo', 0, 2) + ('or', 1, 3) + ('re', 2, 4) + ('mor', 0, 3) + ('ore', 1, 4) + ('more', 0, 4) + + Set *reverse* to ``True`` to yield the same items in the opposite order. + + + """ + r = range(1, len(seq) + 1) + if reverse: + r = reversed(r) + return ( + (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1) + ) + + +class bucket: + """Wrap *iterable* and return an object that buckets the iterable into + child iterables based on a *key* function. + + >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3'] + >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character + >>> sorted(list(s)) # Get the keys + ['a', 'b', 'c'] + >>> a_iterable = s['a'] + >>> next(a_iterable) + 'a1' + >>> next(a_iterable) + 'a2' + >>> list(s['b']) + ['b1', 'b2', 'b3'] + + The original iterable will be advanced and its items will be cached until + they are used by the child iterables. This may require significant storage. + + By default, attempting to select a bucket to which no items belong will + exhaust the iterable and cache all values. + If you specify a *validator* function, selected buckets will instead be + checked against it. + + >>> from itertools import count + >>> it = count(1, 2) # Infinite sequence of odd numbers + >>> key = lambda x: x % 10 # Bucket by last digit + >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only + >>> s = bucket(it, key=key, validator=validator) + >>> 2 in s + False + >>> list(s[2]) + [] + + """ + + def __init__(self, iterable, key, validator=None): + self._it = iter(iterable) + self._key = key + self._cache = defaultdict(deque) + self._validator = validator or (lambda x: True) + + def __contains__(self, value): + if not self._validator(value): + return False + + try: + item = next(self[value]) + except StopIteration: + return False + else: + self._cache[value].appendleft(item) + + return True + + def _get_values(self, value): + """ + Helper to yield items from the parent iterator that match *value*. + Items that don't match are stored in the local cache as they + are encountered. + """ + while True: + # If we've cached some items that match the target value, emit + # the first one and evict it from the cache. + if self._cache[value]: + yield self._cache[value].popleft() + # Otherwise we need to advance the parent iterator to search for + # a matching item, caching the rest. + else: + while True: + try: + item = next(self._it) + except StopIteration: + return + item_value = self._key(item) + if item_value == value: + yield item + break + elif self._validator(item_value): + self._cache[item_value].append(item) + + def __iter__(self): + for item in self._it: + item_value = self._key(item) + if self._validator(item_value): + self._cache[item_value].append(item) + + yield from self._cache.keys() + + def __getitem__(self, value): + if not self._validator(value): + return iter(()) + + return self._get_values(value) + + +def spy(iterable, n=1): + """Return a 2-tuple with a list containing the first *n* elements of + *iterable*, and an iterator with the same items as *iterable*. + This allows you to "look ahead" at the items in the iterable without + advancing it. + + There is one item in the list by default: + + >>> iterable = 'abcdefg' + >>> head, iterable = spy(iterable) + >>> head + ['a'] + >>> list(iterable) + ['a', 'b', 'c', 'd', 'e', 'f', 'g'] + + You may use unpacking to retrieve items instead of lists: + + >>> (head,), iterable = spy('abcdefg') + >>> head + 'a' + >>> (first, second), iterable = spy('abcdefg', 2) + >>> first + 'a' + >>> second + 'b' + + The number of items requested can be larger than the number of items in + the iterable: + + >>> iterable = [1, 2, 3, 4, 5] + >>> head, iterable = spy(iterable, 10) + >>> head + [1, 2, 3, 4, 5] + >>> list(iterable) + [1, 2, 3, 4, 5] + + """ + it = iter(iterable) + head = take(n, it) + + return head.copy(), chain(head, it) + + +def interleave(*iterables): + """Return a new iterable yielding from each iterable in turn, + until the shortest is exhausted. + + >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8])) + [1, 4, 6, 2, 5, 7] + + For a version that doesn't terminate after the shortest iterable is + exhausted, see :func:`interleave_longest`. + + """ + return chain.from_iterable(zip(*iterables)) + + +def interleave_longest(*iterables): + """Return a new iterable yielding from each iterable in turn, + skipping any that are exhausted. + + >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8])) + [1, 4, 6, 2, 5, 7, 3, 8] + + This function produces the same output as :func:`roundrobin`, but may + perform better for some inputs (in particular when the number of iterables + is large). + + """ + i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker)) + return (x for x in i if x is not _marker) + + +def interleave_evenly(iterables, lengths=None): + """ + Interleave multiple iterables so that their elements are evenly distributed + throughout the output sequence. + + >>> iterables = [1, 2, 3, 4, 5], ['a', 'b'] + >>> list(interleave_evenly(iterables)) + [1, 2, 'a', 3, 4, 'b', 5] + + >>> iterables = [[1, 2, 3], [4, 5], [6, 7, 8]] + >>> list(interleave_evenly(iterables)) + [1, 6, 4, 2, 7, 3, 8, 5] + + This function requires iterables of known length. Iterables without + ``__len__()`` can be used by manually specifying lengths with *lengths*: + + >>> from itertools import combinations, repeat + >>> iterables = [combinations(range(4), 2), ['a', 'b', 'c']] + >>> lengths = [4 * (4 - 1) // 2, 3] + >>> list(interleave_evenly(iterables, lengths=lengths)) + [(0, 1), (0, 2), 'a', (0, 3), (1, 2), 'b', (1, 3), (2, 3), 'c'] + + Based on Bresenham's algorithm. + """ + if lengths is None: + try: + lengths = [len(it) for it in iterables] + except TypeError: + raise ValueError( + 'Iterable lengths could not be determined automatically. ' + 'Specify them with the lengths keyword.' + ) + elif len(iterables) != len(lengths): + raise ValueError('Mismatching number of iterables and lengths.') + + dims = len(lengths) + + # sort iterables by length, descending + lengths_permute = sorted( + range(dims), key=lambda i: lengths[i], reverse=True + ) + lengths_desc = [lengths[i] for i in lengths_permute] + iters_desc = [iter(iterables[i]) for i in lengths_permute] + + # the longest iterable is the primary one (Bresenham: the longest + # distance along an axis) + delta_primary, deltas_secondary = lengths_desc[0], lengths_desc[1:] + iter_primary, iters_secondary = iters_desc[0], iters_desc[1:] + errors = [delta_primary // dims] * len(deltas_secondary) + + to_yield = sum(lengths) + while to_yield: + yield next(iter_primary) + to_yield -= 1 + # update errors for each secondary iterable + errors = [e - delta for e, delta in zip(errors, deltas_secondary)] + + # those iterables for which the error is negative are yielded + # ("diagonal step" in Bresenham) + for i, e in enumerate(errors): + if e < 0: + yield next(iters_secondary[i]) + to_yield -= 1 + errors[i] += delta_primary + + +def collapse(iterable, base_type=None, levels=None): + """Flatten an iterable with multiple levels of nesting (e.g., a list of + lists of tuples) into non-iterable types. + + >>> iterable = [(1, 2), ([3, 4], [[5], [6]])] + >>> list(collapse(iterable)) + [1, 2, 3, 4, 5, 6] + + Binary and text strings are not considered iterable and + will not be collapsed. + + To avoid collapsing other types, specify *base_type*: + + >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']] + >>> list(collapse(iterable, base_type=tuple)) + ['ab', ('cd', 'ef'), 'gh', 'ij'] + + Specify *levels* to stop flattening after a certain level: + + >>> iterable = [('a', ['b']), ('c', ['d'])] + >>> list(collapse(iterable)) # Fully flattened + ['a', 'b', 'c', 'd'] + >>> list(collapse(iterable, levels=1)) # Only one level flattened + ['a', ['b'], 'c', ['d']] + + """ + + def walk(node, level): + if ( + ((levels is not None) and (level > levels)) + or isinstance(node, (str, bytes)) + or ((base_type is not None) and isinstance(node, base_type)) + ): + yield node + return + + try: + tree = iter(node) + except TypeError: + yield node + return + else: + for child in tree: + yield from walk(child, level + 1) + + yield from walk(iterable, 0) + + +def side_effect(func, iterable, chunk_size=None, before=None, after=None): + """Invoke *func* on each item in *iterable* (or on each *chunk_size* group + of items) before yielding the item. + + `func` must be a function that takes a single argument. Its return value + will be discarded. + + *before* and *after* are optional functions that take no arguments. They + will be executed before iteration starts and after it ends, respectively. + + `side_effect` can be used for logging, updating progress bars, or anything + that is not functionally "pure." + + Emitting a status message: + + >>> from more_itertools import consume + >>> func = lambda item: print('Received {}'.format(item)) + >>> consume(side_effect(func, range(2))) + Received 0 + Received 1 + + Operating on chunks of items: + + >>> pair_sums = [] + >>> func = lambda chunk: pair_sums.append(sum(chunk)) + >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2)) + [0, 1, 2, 3, 4, 5] + >>> list(pair_sums) + [1, 5, 9] + + Writing to a file-like object: + + >>> from io import StringIO + >>> from more_itertools import consume + >>> f = StringIO() + >>> func = lambda x: print(x, file=f) + >>> before = lambda: print(u'HEADER', file=f) + >>> after = f.close + >>> it = [u'a', u'b', u'c'] + >>> consume(side_effect(func, it, before=before, after=after)) + >>> f.closed + True + + """ + try: + if before is not None: + before() + + if chunk_size is None: + for item in iterable: + func(item) + yield item + else: + for chunk in chunked(iterable, chunk_size): + func(chunk) + yield from chunk + finally: + if after is not None: + after() + + +def sliced(seq, n, strict=False): + """Yield slices of length *n* from the sequence *seq*. + + >>> list(sliced((1, 2, 3, 4, 5, 6), 3)) + [(1, 2, 3), (4, 5, 6)] + + By the default, the last yielded slice will have fewer than *n* elements + if the length of *seq* is not divisible by *n*: + + >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3)) + [(1, 2, 3), (4, 5, 6), (7, 8)] + + If the length of *seq* is not divisible by *n* and *strict* is + ``True``, then ``ValueError`` will be raised before the last + slice is yielded. + + This function will only work for iterables that support slicing. + For non-sliceable iterables, see :func:`chunked`. + + """ + iterator = takewhile(len, (seq[i : i + n] for i in count(0, n))) + if strict: + + def ret(): + for _slice in iterator: + if len(_slice) != n: + raise ValueError("seq is not divisible by n.") + yield _slice + + return iter(ret()) + else: + return iterator + + +def split_at(iterable, pred, maxsplit=-1, keep_separator=False): + """Yield lists of items from *iterable*, where each list is delimited by + an item where callable *pred* returns ``True``. + + >>> list(split_at('abcdcba', lambda x: x == 'b')) + [['a'], ['c', 'd', 'c'], ['a']] + + >>> list(split_at(range(10), lambda n: n % 2 == 1)) + [[0], [2], [4], [6], [8], []] + + At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, + then there is no limit on the number of splits: + + >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2)) + [[0], [2], [4, 5, 6, 7, 8, 9]] + + By default, the delimiting items are not included in the output. + To include them, set *keep_separator* to ``True``. + + >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True)) + [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']] + + """ + if maxsplit == 0: + yield list(iterable) + return + + buf = [] + it = iter(iterable) + for item in it: + if pred(item): + yield buf + if keep_separator: + yield [item] + if maxsplit == 1: + yield list(it) + return + buf = [] + maxsplit -= 1 + else: + buf.append(item) + yield buf + + +def split_before(iterable, pred, maxsplit=-1): + """Yield lists of items from *iterable*, where each list ends just before + an item for which callable *pred* returns ``True``: + + >>> list(split_before('OneTwo', lambda s: s.isupper())) + [['O', 'n', 'e'], ['T', 'w', 'o']] + + >>> list(split_before(range(10), lambda n: n % 3 == 0)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] + + At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, + then there is no limit on the number of splits: + + >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]] + """ + if maxsplit == 0: + yield list(iterable) + return + + buf = [] + it = iter(iterable) + for item in it: + if pred(item) and buf: + yield buf + if maxsplit == 1: + yield [item] + list(it) + return + buf = [] + maxsplit -= 1 + buf.append(item) + if buf: + yield buf + + +def split_after(iterable, pred, maxsplit=-1): + """Yield lists of items from *iterable*, where each list ends with an + item where callable *pred* returns ``True``: + + >>> list(split_after('one1two2', lambda s: s.isdigit())) + [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']] + + >>> list(split_after(range(10), lambda n: n % 3 == 0)) + [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]] + + At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, + then there is no limit on the number of splits: + + >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2)) + [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]] + + """ + if maxsplit == 0: + yield list(iterable) + return + + buf = [] + it = iter(iterable) + for item in it: + buf.append(item) + if pred(item) and buf: + yield buf + if maxsplit == 1: + buf = list(it) + if buf: + yield buf + return + buf = [] + maxsplit -= 1 + if buf: + yield buf + + +def split_when(iterable, pred, maxsplit=-1): + """Split *iterable* into pieces based on the output of *pred*. + *pred* should be a function that takes successive pairs of items and + returns ``True`` if the iterable should be split in between them. + + For example, to find runs of increasing numbers, split the iterable when + element ``i`` is larger than element ``i + 1``: + + >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y)) + [[1, 2, 3, 3], [2, 5], [2, 4], [2]] + + At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, + then there is no limit on the number of splits: + + >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], + ... lambda x, y: x > y, maxsplit=2)) + [[1, 2, 3, 3], [2, 5], [2, 4, 2]] + + """ + if maxsplit == 0: + yield list(iterable) + return + + it = iter(iterable) + try: + cur_item = next(it) + except StopIteration: + return + + buf = [cur_item] + for next_item in it: + if pred(cur_item, next_item): + yield buf + if maxsplit == 1: + yield [next_item] + list(it) + return + buf = [] + maxsplit -= 1 + + buf.append(next_item) + cur_item = next_item + + yield buf + + +def split_into(iterable, sizes): + """Yield a list of sequential items from *iterable* of length 'n' for each + integer 'n' in *sizes*. + + >>> list(split_into([1,2,3,4,5,6], [1,2,3])) + [[1], [2, 3], [4, 5, 6]] + + If the sum of *sizes* is smaller than the length of *iterable*, then the + remaining items of *iterable* will not be returned. + + >>> list(split_into([1,2,3,4,5,6], [2,3])) + [[1, 2], [3, 4, 5]] + + If the sum of *sizes* is larger than the length of *iterable*, fewer items + will be returned in the iteration that overruns *iterable* and further + lists will be empty: + + >>> list(split_into([1,2,3,4], [1,2,3,4])) + [[1], [2, 3], [4], []] + + When a ``None`` object is encountered in *sizes*, the returned list will + contain items up to the end of *iterable* the same way that itertools.slice + does: + + >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None])) + [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]] + + :func:`split_into` can be useful for grouping a series of items where the + sizes of the groups are not uniform. An example would be where in a row + from a table, multiple columns represent elements of the same feature + (e.g. a point represented by x,y,z) but, the format is not the same for + all columns. + """ + # convert the iterable argument into an iterator so its contents can + # be consumed by islice in case it is a generator + it = iter(iterable) + + for size in sizes: + if size is None: + yield list(it) + return + else: + yield list(islice(it, size)) + + +def padded(iterable, fillvalue=None, n=None, next_multiple=False): + """Yield the elements from *iterable*, followed by *fillvalue*, such that + at least *n* items are emitted. + + >>> list(padded([1, 2, 3], '?', 5)) + [1, 2, 3, '?', '?'] + + If *next_multiple* is ``True``, *fillvalue* will be emitted until the + number of items emitted is a multiple of *n*:: + + >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True)) + [1, 2, 3, 4, None, None] + + If *n* is ``None``, *fillvalue* will be emitted indefinitely. + + """ + it = iter(iterable) + if n is None: + yield from chain(it, repeat(fillvalue)) + elif n < 1: + raise ValueError('n must be at least 1') + else: + item_count = 0 + for item in it: + yield item + item_count += 1 + + remaining = (n - item_count) % n if next_multiple else n - item_count + for _ in range(remaining): + yield fillvalue + + +def repeat_each(iterable, n=2): + """Repeat each element in *iterable* *n* times. + + >>> list(repeat_each('ABC', 3)) + ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C'] + """ + return chain.from_iterable(map(repeat, iterable, repeat(n))) + + +def repeat_last(iterable, default=None): + """After the *iterable* is exhausted, keep yielding its last element. + + >>> list(islice(repeat_last(range(3)), 5)) + [0, 1, 2, 2, 2] + + If the iterable is empty, yield *default* forever:: + + >>> list(islice(repeat_last(range(0), 42), 5)) + [42, 42, 42, 42, 42] + + """ + item = _marker + for item in iterable: + yield item + final = default if item is _marker else item + yield from repeat(final) + + +def distribute(n, iterable): + """Distribute the items from *iterable* among *n* smaller iterables. + + >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6]) + >>> list(group_1) + [1, 3, 5] + >>> list(group_2) + [2, 4, 6] + + If the length of *iterable* is not evenly divisible by *n*, then the + length of the returned iterables will not be identical: + + >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7]) + >>> [list(c) for c in children] + [[1, 4, 7], [2, 5], [3, 6]] + + If the length of *iterable* is smaller than *n*, then the last returned + iterables will be empty: + + >>> children = distribute(5, [1, 2, 3]) + >>> [list(c) for c in children] + [[1], [2], [3], [], []] + + This function uses :func:`itertools.tee` and may require significant + storage. If you need the order items in the smaller iterables to match the + original iterable, see :func:`divide`. + + """ + if n < 1: + raise ValueError('n must be at least 1') + + children = tee(iterable, n) + return [islice(it, index, None, n) for index, it in enumerate(children)] + + +def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None): + """Yield tuples whose elements are offset from *iterable*. + The amount by which the `i`-th item in each tuple is offset is given by + the `i`-th item in *offsets*. + + >>> list(stagger([0, 1, 2, 3])) + [(None, 0, 1), (0, 1, 2), (1, 2, 3)] + >>> list(stagger(range(8), offsets=(0, 2, 4))) + [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)] + + By default, the sequence will end when the final element of a tuple is the + last item in the iterable. To continue until the first element of a tuple + is the last item in the iterable, set *longest* to ``True``:: + + >>> list(stagger([0, 1, 2, 3], longest=True)) + [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)] + + By default, ``None`` will be used to replace offsets beyond the end of the + sequence. Specify *fillvalue* to use some other value. + + """ + children = tee(iterable, len(offsets)) + + return zip_offset( + *children, offsets=offsets, longest=longest, fillvalue=fillvalue + ) + + +def zip_equal(*iterables): + """``zip`` the input *iterables* together, but raise + ``UnequalIterablesError`` if they aren't all the same length. + + >>> it_1 = range(3) + >>> it_2 = iter('abc') + >>> list(zip_equal(it_1, it_2)) + [(0, 'a'), (1, 'b'), (2, 'c')] + + >>> it_1 = range(3) + >>> it_2 = iter('abcd') + >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + more_itertools.more.UnequalIterablesError: Iterables have different + lengths + + """ + if hexversion >= 0x30A00A6: + warnings.warn( + ( + 'zip_equal will be removed in a future version of ' + 'more-itertools. Use the builtin zip function with ' + 'strict=True instead.' + ), + DeprecationWarning, + ) + + return _zip_equal(*iterables) + + +def zip_offset(*iterables, offsets, longest=False, fillvalue=None): + """``zip`` the input *iterables* together, but offset the `i`-th iterable + by the `i`-th item in *offsets*. + + >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1))) + [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')] + + This can be used as a lightweight alternative to SciPy or pandas to analyze + data sets in which some series have a lead or lag relationship. + + By default, the sequence will end when the shortest iterable is exhausted. + To continue until the longest iterable is exhausted, set *longest* to + ``True``. + + >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True)) + [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')] + + By default, ``None`` will be used to replace offsets beyond the end of the + sequence. Specify *fillvalue* to use some other value. + + """ + if len(iterables) != len(offsets): + raise ValueError("Number of iterables and offsets didn't match") + + staggered = [] + for it, n in zip(iterables, offsets): + if n < 0: + staggered.append(chain(repeat(fillvalue, -n), it)) + elif n > 0: + staggered.append(islice(it, n, None)) + else: + staggered.append(it) + + if longest: + return zip_longest(*staggered, fillvalue=fillvalue) + + return zip(*staggered) + + +def sort_together(iterables, key_list=(0,), key=None, reverse=False): + """Return the input iterables sorted together, with *key_list* as the + priority for sorting. All iterables are trimmed to the length of the + shortest one. + + This can be used like the sorting function in a spreadsheet. If each + iterable represents a column of data, the key list determines which + columns are used for sorting. + + By default, all iterables are sorted using the ``0``-th iterable:: + + >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')] + >>> sort_together(iterables) + [(1, 2, 3, 4), ('d', 'c', 'b', 'a')] + + Set a different key list to sort according to another iterable. + Specifying multiple keys dictates how ties are broken:: + + >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')] + >>> sort_together(iterables, key_list=(1, 2)) + [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')] + + To sort by a function of the elements of the iterable, pass a *key* + function. Its arguments are the elements of the iterables corresponding to + the key list:: + + >>> names = ('a', 'b', 'c') + >>> lengths = (1, 2, 3) + >>> widths = (5, 2, 1) + >>> def area(length, width): + ... return length * width + >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area) + [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)] + + Set *reverse* to ``True`` to sort in descending order. + + >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True) + [(3, 2, 1), ('a', 'b', 'c')] + + """ + if key is None: + # if there is no key function, the key argument to sorted is an + # itemgetter + key_argument = itemgetter(*key_list) + else: + # if there is a key function, call it with the items at the offsets + # specified by the key function as arguments + key_list = list(key_list) + if len(key_list) == 1: + # if key_list contains a single item, pass the item at that offset + # as the only argument to the key function + key_offset = key_list[0] + key_argument = lambda zipped_items: key(zipped_items[key_offset]) + else: + # if key_list contains multiple items, use itemgetter to return a + # tuple of items, which we pass as *args to the key function + get_key_items = itemgetter(*key_list) + key_argument = lambda zipped_items: key( + *get_key_items(zipped_items) + ) + + return list( + zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse)) + ) + + +def unzip(iterable): + """The inverse of :func:`zip`, this function disaggregates the elements + of the zipped *iterable*. + + The ``i``-th iterable contains the ``i``-th element from each element + of the zipped iterable. The first element is used to determine the + length of the remaining elements. + + >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] + >>> letters, numbers = unzip(iterable) + >>> list(letters) + ['a', 'b', 'c', 'd'] + >>> list(numbers) + [1, 2, 3, 4] + + This is similar to using ``zip(*iterable)``, but it avoids reading + *iterable* into memory. Note, however, that this function uses + :func:`itertools.tee` and thus may require significant storage. + + """ + head, iterable = spy(iter(iterable)) + if not head: + # empty iterable, e.g. zip([], [], []) + return () + # spy returns a one-length iterable as head + head = head[0] + iterables = tee(iterable, len(head)) + + def itemgetter(i): + def getter(obj): + try: + return obj[i] + except IndexError: + # basically if we have an iterable like + # iter([(1, 2, 3), (4, 5), (6,)]) + # the second unzipped iterable would fail at the third tuple + # since it would try to access tup[1] + # same with the third unzipped iterable and the second tuple + # to support these "improperly zipped" iterables, + # we create a custom itemgetter + # which just stops the unzipped iterables + # at first length mismatch + raise StopIteration + + return getter + + return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables)) + + +def divide(n, iterable): + """Divide the elements from *iterable* into *n* parts, maintaining + order. + + >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6]) + >>> list(group_1) + [1, 2, 3] + >>> list(group_2) + [4, 5, 6] + + If the length of *iterable* is not evenly divisible by *n*, then the + length of the returned iterables will not be identical: + + >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7]) + >>> [list(c) for c in children] + [[1, 2, 3], [4, 5], [6, 7]] + + If the length of the iterable is smaller than n, then the last returned + iterables will be empty: + + >>> children = divide(5, [1, 2, 3]) + >>> [list(c) for c in children] + [[1], [2], [3], [], []] + + This function will exhaust the iterable before returning and may require + significant storage. If order is not important, see :func:`distribute`, + which does not first pull the iterable into memory. + + """ + if n < 1: + raise ValueError('n must be at least 1') + + try: + iterable[:0] + except TypeError: + seq = tuple(iterable) + else: + seq = iterable + + q, r = divmod(len(seq), n) + + ret = [] + stop = 0 + for i in range(1, n + 1): + start = stop + stop += q + 1 if i <= r else q + ret.append(iter(seq[start:stop])) + + return ret + + +def always_iterable(obj, base_type=(str, bytes)): + """If *obj* is iterable, return an iterator over its items:: + + >>> obj = (1, 2, 3) + >>> list(always_iterable(obj)) + [1, 2, 3] + + If *obj* is not iterable, return a one-item iterable containing *obj*:: + + >>> obj = 1 + >>> list(always_iterable(obj)) + [1] + + If *obj* is ``None``, return an empty iterable: + + >>> obj = None + >>> list(always_iterable(None)) + [] + + By default, binary and text strings are not considered iterable:: + + >>> obj = 'foo' + >>> list(always_iterable(obj)) + ['foo'] + + If *base_type* is set, objects for which ``isinstance(obj, base_type)`` + returns ``True`` won't be considered iterable. + + >>> obj = {'a': 1} + >>> list(always_iterable(obj)) # Iterate over the dict's keys + ['a'] + >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit + [{'a': 1}] + + Set *base_type* to ``None`` to avoid any special handling and treat objects + Python considers iterable as iterable: + + >>> obj = 'foo' + >>> list(always_iterable(obj, base_type=None)) + ['f', 'o', 'o'] + """ + if obj is None: + return iter(()) + + if (base_type is not None) and isinstance(obj, base_type): + return iter((obj,)) + + try: + return iter(obj) + except TypeError: + return iter((obj,)) + + +def adjacent(predicate, iterable, distance=1): + """Return an iterable over `(bool, item)` tuples where the `item` is + drawn from *iterable* and the `bool` indicates whether + that item satisfies the *predicate* or is adjacent to an item that does. + + For example, to find whether items are adjacent to a ``3``:: + + >>> list(adjacent(lambda x: x == 3, range(6))) + [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)] + + Set *distance* to change what counts as adjacent. For example, to find + whether items are two places away from a ``3``: + + >>> list(adjacent(lambda x: x == 3, range(6), distance=2)) + [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)] + + This is useful for contextualizing the results of a search function. + For example, a code comparison tool might want to identify lines that + have changed, but also surrounding lines to give the viewer of the diff + context. + + The predicate function will only be called once for each item in the + iterable. + + See also :func:`groupby_transform`, which can be used with this function + to group ranges of items with the same `bool` value. + + """ + # Allow distance=0 mainly for testing that it reproduces results with map() + if distance < 0: + raise ValueError('distance must be at least 0') + + i1, i2 = tee(iterable) + padding = [False] * distance + selected = chain(padding, map(predicate, i1), padding) + adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1)) + return zip(adjacent_to_selected, i2) + + +def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None): + """An extension of :func:`itertools.groupby` that can apply transformations + to the grouped data. + + * *keyfunc* is a function computing a key value for each item in *iterable* + * *valuefunc* is a function that transforms the individual items from + *iterable* after grouping + * *reducefunc* is a function that transforms each group of items + + >>> iterable = 'aAAbBBcCC' + >>> keyfunc = lambda k: k.upper() + >>> valuefunc = lambda v: v.lower() + >>> reducefunc = lambda g: ''.join(g) + >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc)) + [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')] + + Each optional argument defaults to an identity function if not specified. + + :func:`groupby_transform` is useful when grouping elements of an iterable + using a separate iterable as the key. To do this, :func:`zip` the iterables + and pass a *keyfunc* that extracts the first element and a *valuefunc* + that extracts the second element:: + + >>> from operator import itemgetter + >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3] + >>> values = 'abcdefghi' + >>> iterable = zip(keys, values) + >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1)) + >>> [(k, ''.join(g)) for k, g in grouper] + [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')] + + Note that the order of items in the iterable is significant. + Only adjacent items are grouped together, so if you don't want any + duplicate groups, you should sort the iterable by the key function. + + """ + ret = groupby(iterable, keyfunc) + if valuefunc: + ret = ((k, map(valuefunc, g)) for k, g in ret) + if reducefunc: + ret = ((k, reducefunc(g)) for k, g in ret) + + return ret + + +class numeric_range(abc.Sequence, abc.Hashable): + """An extension of the built-in ``range()`` function whose arguments can + be any orderable numeric type. + + With only *stop* specified, *start* defaults to ``0`` and *step* + defaults to ``1``. The output items will match the type of *stop*: + + >>> list(numeric_range(3.5)) + [0.0, 1.0, 2.0, 3.0] + + With only *start* and *stop* specified, *step* defaults to ``1``. The + output items will match the type of *start*: + + >>> from decimal import Decimal + >>> start = Decimal('2.1') + >>> stop = Decimal('5.1') + >>> list(numeric_range(start, stop)) + [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')] + + With *start*, *stop*, and *step* specified the output items will match + the type of ``start + step``: + + >>> from fractions import Fraction + >>> start = Fraction(1, 2) # Start at 1/2 + >>> stop = Fraction(5, 2) # End at 5/2 + >>> step = Fraction(1, 2) # Count by 1/2 + >>> list(numeric_range(start, stop, step)) + [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)] + + If *step* is zero, ``ValueError`` is raised. Negative steps are supported: + + >>> list(numeric_range(3, -1, -1.0)) + [3.0, 2.0, 1.0, 0.0] + + Be aware of the limitations of floating point numbers; the representation + of the yielded numbers may be surprising. + + ``datetime.datetime`` objects can be used for *start* and *stop*, if *step* + is a ``datetime.timedelta`` object: + + >>> import datetime + >>> start = datetime.datetime(2019, 1, 1) + >>> stop = datetime.datetime(2019, 1, 3) + >>> step = datetime.timedelta(days=1) + >>> items = iter(numeric_range(start, stop, step)) + >>> next(items) + datetime.datetime(2019, 1, 1, 0, 0) + >>> next(items) + datetime.datetime(2019, 1, 2, 0, 0) + + """ + + _EMPTY_HASH = hash(range(0, 0)) + + def __init__(self, *args): + argc = len(args) + if argc == 1: + (self._stop,) = args + self._start = type(self._stop)(0) + self._step = type(self._stop - self._start)(1) + elif argc == 2: + self._start, self._stop = args + self._step = type(self._stop - self._start)(1) + elif argc == 3: + self._start, self._stop, self._step = args + elif argc == 0: + raise TypeError( + 'numeric_range expected at least ' + '1 argument, got {}'.format(argc) + ) + else: + raise TypeError( + 'numeric_range expected at most ' + '3 arguments, got {}'.format(argc) + ) + + self._zero = type(self._step)(0) + if self._step == self._zero: + raise ValueError('numeric_range() arg 3 must not be zero') + self._growing = self._step > self._zero + + def __bool__(self): + if self._growing: + return self._start < self._stop + else: + return self._start > self._stop + + def __contains__(self, elem): + if self._growing: + if self._start <= elem < self._stop: + return (elem - self._start) % self._step == self._zero + else: + if self._start >= elem > self._stop: + return (self._start - elem) % (-self._step) == self._zero + + return False + + def __eq__(self, other): + if isinstance(other, numeric_range): + empty_self = not bool(self) + empty_other = not bool(other) + if empty_self or empty_other: + return empty_self and empty_other # True if both empty + else: + return ( + self._start == other._start + and self._step == other._step + and self._get_by_index(-1) == other._get_by_index(-1) + ) + else: + return False + + def __getitem__(self, key): + if isinstance(key, int): + return self._get_by_index(key) + elif isinstance(key, slice): + step = self._step if key.step is None else key.step * self._step + + if key.start is None or key.start <= -self._len: + start = self._start + elif key.start >= self._len: + start = self._stop + else: # -self._len < key.start < self._len + start = self._get_by_index(key.start) + + if key.stop is None or key.stop >= self._len: + stop = self._stop + elif key.stop <= -self._len: + stop = self._start + else: # -self._len < key.stop < self._len + stop = self._get_by_index(key.stop) + + return numeric_range(start, stop, step) + else: + raise TypeError( + 'numeric range indices must be ' + 'integers or slices, not {}'.format(type(key).__name__) + ) + + def __hash__(self): + if self: + return hash((self._start, self._get_by_index(-1), self._step)) + else: + return self._EMPTY_HASH + + def __iter__(self): + values = (self._start + (n * self._step) for n in count()) + if self._growing: + return takewhile(partial(gt, self._stop), values) + else: + return takewhile(partial(lt, self._stop), values) + + def __len__(self): + return self._len + + @cached_property + def _len(self): + if self._growing: + start = self._start + stop = self._stop + step = self._step + else: + start = self._stop + stop = self._start + step = -self._step + distance = stop - start + if distance <= self._zero: + return 0 + else: # distance > 0 and step > 0: regular euclidean division + q, r = divmod(distance, step) + return int(q) + int(r != self._zero) + + def __reduce__(self): + return numeric_range, (self._start, self._stop, self._step) + + def __repr__(self): + if self._step == 1: + return "numeric_range({}, {})".format( + repr(self._start), repr(self._stop) + ) + else: + return "numeric_range({}, {}, {})".format( + repr(self._start), repr(self._stop), repr(self._step) + ) + + def __reversed__(self): + return iter( + numeric_range( + self._get_by_index(-1), self._start - self._step, -self._step + ) + ) + + def count(self, value): + return int(value in self) + + def index(self, value): + if self._growing: + if self._start <= value < self._stop: + q, r = divmod(value - self._start, self._step) + if r == self._zero: + return int(q) + else: + if self._start >= value > self._stop: + q, r = divmod(self._start - value, -self._step) + if r == self._zero: + return int(q) + + raise ValueError("{} is not in numeric range".format(value)) + + def _get_by_index(self, i): + if i < 0: + i += self._len + if i < 0 or i >= self._len: + raise IndexError("numeric range object index out of range") + return self._start + i * self._step + + +def count_cycle(iterable, n=None): + """Cycle through the items from *iterable* up to *n* times, yielding + the number of completed cycles along with each item. If *n* is omitted the + process repeats indefinitely. + + >>> list(count_cycle('AB', 3)) + [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')] + + """ + iterable = tuple(iterable) + if not iterable: + return iter(()) + counter = count() if n is None else range(n) + return ((i, item) for i in counter for item in iterable) + + +def mark_ends(iterable): + """Yield 3-tuples of the form ``(is_first, is_last, item)``. + + >>> list(mark_ends('ABC')) + [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')] + + Use this when looping over an iterable to take special action on its first + and/or last items: + + >>> iterable = ['Header', 100, 200, 'Footer'] + >>> total = 0 + >>> for is_first, is_last, item in mark_ends(iterable): + ... if is_first: + ... continue # Skip the header + ... if is_last: + ... continue # Skip the footer + ... total += item + >>> print(total) + 300 + """ + it = iter(iterable) + + try: + b = next(it) + except StopIteration: + return + + try: + for i in count(): + a = b + b = next(it) + yield i == 0, False, a + + except StopIteration: + yield i == 0, True, a + + +def locate(iterable, pred=bool, window_size=None): + """Yield the index of each item in *iterable* for which *pred* returns + ``True``. + + *pred* defaults to :func:`bool`, which will select truthy items: + + >>> list(locate([0, 1, 1, 0, 1, 0, 0])) + [1, 2, 4] + + Set *pred* to a custom function to, e.g., find the indexes for a particular + item. + + >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b')) + [1, 3] + + If *window_size* is given, then the *pred* function will be called with + that many items. This enables searching for sub-sequences: + + >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] + >>> pred = lambda *args: args == (1, 2, 3) + >>> list(locate(iterable, pred=pred, window_size=3)) + [1, 5, 9] + + Use with :func:`seekable` to find indexes and then retrieve the associated + items: + + >>> from itertools import count + >>> from more_itertools import seekable + >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count()) + >>> it = seekable(source) + >>> pred = lambda x: x > 100 + >>> indexes = locate(it, pred=pred) + >>> i = next(indexes) + >>> it.seek(i) + >>> next(it) + 106 + + """ + if window_size is None: + return compress(count(), map(pred, iterable)) + + if window_size < 1: + raise ValueError('window size must be at least 1') + + it = windowed(iterable, window_size, fillvalue=_marker) + return compress(count(), starmap(pred, it)) + + +def longest_common_prefix(iterables): + """Yield elements of the longest common prefix amongst given *iterables*. + + >>> ''.join(longest_common_prefix(['abcd', 'abc', 'abf'])) + 'ab' + + """ + return (c[0] for c in takewhile(all_equal, zip(*iterables))) + + +def lstrip(iterable, pred): + """Yield the items from *iterable*, but strip any from the beginning + for which *pred* returns ``True``. + + For example, to remove a set of items from the start of an iterable: + + >>> iterable = (None, False, None, 1, 2, None, 3, False, None) + >>> pred = lambda x: x in {None, False, ''} + >>> list(lstrip(iterable, pred)) + [1, 2, None, 3, False, None] + + This function is analogous to to :func:`str.lstrip`, and is essentially + an wrapper for :func:`itertools.dropwhile`. + + """ + return dropwhile(pred, iterable) + + +def rstrip(iterable, pred): + """Yield the items from *iterable*, but strip any from the end + for which *pred* returns ``True``. + + For example, to remove a set of items from the end of an iterable: + + >>> iterable = (None, False, None, 1, 2, None, 3, False, None) + >>> pred = lambda x: x in {None, False, ''} + >>> list(rstrip(iterable, pred)) + [None, False, None, 1, 2, None, 3] + + This function is analogous to :func:`str.rstrip`. + + """ + cache = [] + cache_append = cache.append + cache_clear = cache.clear + for x in iterable: + if pred(x): + cache_append(x) + else: + yield from cache + cache_clear() + yield x + + +def strip(iterable, pred): + """Yield the items from *iterable*, but strip any from the + beginning and end for which *pred* returns ``True``. + + For example, to remove a set of items from both ends of an iterable: + + >>> iterable = (None, False, None, 1, 2, None, 3, False, None) + >>> pred = lambda x: x in {None, False, ''} + >>> list(strip(iterable, pred)) + [1, 2, None, 3] + + This function is analogous to :func:`str.strip`. + + """ + return rstrip(lstrip(iterable, pred), pred) + + +class islice_extended: + """An extension of :func:`itertools.islice` that supports negative values + for *stop*, *start*, and *step*. + + >>> iterable = iter('abcdefgh') + >>> list(islice_extended(iterable, -4, -1)) + ['e', 'f', 'g'] + + Slices with negative values require some caching of *iterable*, but this + function takes care to minimize the amount of memory required. + + For example, you can use a negative step with an infinite iterator: + + >>> from itertools import count + >>> list(islice_extended(count(), 110, 99, -2)) + [110, 108, 106, 104, 102, 100] + + You can also use slice notation directly: + + >>> iterable = map(str, count()) + >>> it = islice_extended(iterable)[10:20:2] + >>> list(it) + ['10', '12', '14', '16', '18'] + + """ + + def __init__(self, iterable, *args): + it = iter(iterable) + if args: + self._iterable = _islice_helper(it, slice(*args)) + else: + self._iterable = it + + def __iter__(self): + return self + + def __next__(self): + return next(self._iterable) + + def __getitem__(self, key): + if isinstance(key, slice): + return islice_extended(_islice_helper(self._iterable, key)) + + raise TypeError('islice_extended.__getitem__ argument must be a slice') + + +def _islice_helper(it, s): + start = s.start + stop = s.stop + if s.step == 0: + raise ValueError('step argument must be a non-zero integer or None.') + step = s.step or 1 + + if step > 0: + start = 0 if (start is None) else start + + if start < 0: + # Consume all but the last -start items + cache = deque(enumerate(it, 1), maxlen=-start) + len_iter = cache[-1][0] if cache else 0 + + # Adjust start to be positive + i = max(len_iter + start, 0) + + # Adjust stop to be positive + if stop is None: + j = len_iter + elif stop >= 0: + j = min(stop, len_iter) + else: + j = max(len_iter + stop, 0) + + # Slice the cache + n = j - i + if n <= 0: + return + + for index, item in islice(cache, 0, n, step): + yield item + elif (stop is not None) and (stop < 0): + # Advance to the start position + next(islice(it, start, start), None) + + # When stop is negative, we have to carry -stop items while + # iterating + cache = deque(islice(it, -stop), maxlen=-stop) + + for index, item in enumerate(it): + cached_item = cache.popleft() + if index % step == 0: + yield cached_item + cache.append(item) + else: + # When both start and stop are positive we have the normal case + yield from islice(it, start, stop, step) + else: + start = -1 if (start is None) else start + + if (stop is not None) and (stop < 0): + # Consume all but the last items + n = -stop - 1 + cache = deque(enumerate(it, 1), maxlen=n) + len_iter = cache[-1][0] if cache else 0 + + # If start and stop are both negative they are comparable and + # we can just slice. Otherwise we can adjust start to be negative + # and then slice. + if start < 0: + i, j = start, stop + else: + i, j = min(start - len_iter, -1), None + + for index, item in list(cache)[i:j:step]: + yield item + else: + # Advance to the stop position + if stop is not None: + m = stop + 1 + next(islice(it, m, m), None) + + # stop is positive, so if start is negative they are not comparable + # and we need the rest of the items. + if start < 0: + i = start + n = None + # stop is None and start is positive, so we just need items up to + # the start index. + elif stop is None: + i = None + n = start + 1 + # Both stop and start are positive, so they are comparable. + else: + i = None + n = start - stop + if n <= 0: + return + + cache = list(islice(it, n)) + + yield from cache[i::step] + + +def always_reversible(iterable): + """An extension of :func:`reversed` that supports all iterables, not + just those which implement the ``Reversible`` or ``Sequence`` protocols. + + >>> print(*always_reversible(x for x in range(3))) + 2 1 0 + + If the iterable is already reversible, this function returns the + result of :func:`reversed()`. If the iterable is not reversible, + this function will cache the remaining items in the iterable and + yield them in reverse order, which may require significant storage. + """ + try: + return reversed(iterable) + except TypeError: + return reversed(list(iterable)) + + +def consecutive_groups(iterable, ordering=lambda x: x): + """Yield groups of consecutive items using :func:`itertools.groupby`. + The *ordering* function determines whether two items are adjacent by + returning their position. + + By default, the ordering function is the identity function. This is + suitable for finding runs of numbers: + + >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40] + >>> for group in consecutive_groups(iterable): + ... print(list(group)) + [1] + [10, 11, 12] + [20] + [30, 31, 32, 33] + [40] + + For finding runs of adjacent letters, try using the :meth:`index` method + of a string of letters: + + >>> from string import ascii_lowercase + >>> iterable = 'abcdfgilmnop' + >>> ordering = ascii_lowercase.index + >>> for group in consecutive_groups(iterable, ordering): + ... print(list(group)) + ['a', 'b', 'c', 'd'] + ['f', 'g'] + ['i'] + ['l', 'm', 'n', 'o', 'p'] + + Each group of consecutive items is an iterator that shares it source with + *iterable*. When an an output group is advanced, the previous group is + no longer available unless its elements are copied (e.g., into a ``list``). + + >>> iterable = [1, 2, 11, 12, 21, 22] + >>> saved_groups = [] + >>> for group in consecutive_groups(iterable): + ... saved_groups.append(list(group)) # Copy group elements + >>> saved_groups + [[1, 2], [11, 12], [21, 22]] + + """ + for k, g in groupby( + enumerate(iterable), key=lambda x: x[0] - ordering(x[1]) + ): + yield map(itemgetter(1), g) + + +def difference(iterable, func=sub, *, initial=None): + """This function is the inverse of :func:`itertools.accumulate`. By default + it will compute the first difference of *iterable* using + :func:`operator.sub`: + + >>> from itertools import accumulate + >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10 + >>> list(difference(iterable)) + [0, 1, 2, 3, 4] + + *func* defaults to :func:`operator.sub`, but other functions can be + specified. They will be applied as follows:: + + A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ... + + For example, to do progressive division: + + >>> iterable = [1, 2, 6, 24, 120] + >>> func = lambda x, y: x // y + >>> list(difference(iterable, func)) + [1, 2, 3, 4, 5] + + If the *initial* keyword is set, the first element will be skipped when + computing successive differences. + + >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10) + >>> list(difference(it, initial=10)) + [1, 2, 3] + + """ + a, b = tee(iterable) + try: + first = [next(b)] + except StopIteration: + return iter([]) + + if initial is not None: + first = [] + + return chain(first, map(func, b, a)) + + +class SequenceView(Sequence): + """Return a read-only view of the sequence object *target*. + + :class:`SequenceView` objects are analogous to Python's built-in + "dictionary view" types. They provide a dynamic view of a sequence's items, + meaning that when the sequence updates, so does the view. + + >>> seq = ['0', '1', '2'] + >>> view = SequenceView(seq) + >>> view + SequenceView(['0', '1', '2']) + >>> seq.append('3') + >>> view + SequenceView(['0', '1', '2', '3']) + + Sequence views support indexing, slicing, and length queries. They act + like the underlying sequence, except they don't allow assignment: + + >>> view[1] + '1' + >>> view[1:-1] + ['1', '2'] + >>> len(view) + 4 + + Sequence views are useful as an alternative to copying, as they don't + require (much) extra storage. + + """ + + def __init__(self, target): + if not isinstance(target, Sequence): + raise TypeError + self._target = target + + def __getitem__(self, index): + return self._target[index] + + def __len__(self): + return len(self._target) + + def __repr__(self): + return '{}({})'.format(self.__class__.__name__, repr(self._target)) + + +class seekable: + """Wrap an iterator to allow for seeking backward and forward. This + progressively caches the items in the source iterable so they can be + re-visited. + + Call :meth:`seek` with an index to seek to that position in the source + iterable. + + To "reset" an iterator, seek to ``0``: + + >>> from itertools import count + >>> it = seekable((str(n) for n in count())) + >>> next(it), next(it), next(it) + ('0', '1', '2') + >>> it.seek(0) + >>> next(it), next(it), next(it) + ('0', '1', '2') + >>> next(it) + '3' + + You can also seek forward: + + >>> it = seekable((str(n) for n in range(20))) + >>> it.seek(10) + >>> next(it) + '10' + >>> it.relative_seek(-2) # Seeking relative to the current position + >>> next(it) + '9' + >>> it.seek(20) # Seeking past the end of the source isn't a problem + >>> list(it) + [] + >>> it.seek(0) # Resetting works even after hitting the end + >>> next(it), next(it), next(it) + ('0', '1', '2') + + Call :meth:`peek` to look ahead one item without advancing the iterator: + + >>> it = seekable('1234') + >>> it.peek() + '1' + >>> list(it) + ['1', '2', '3', '4'] + >>> it.peek(default='empty') + 'empty' + + Before the iterator is at its end, calling :func:`bool` on it will return + ``True``. After it will return ``False``: + + >>> it = seekable('5678') + >>> bool(it) + True + >>> list(it) + ['5', '6', '7', '8'] + >>> bool(it) + False + + You may view the contents of the cache with the :meth:`elements` method. + That returns a :class:`SequenceView`, a view that updates automatically: + + >>> it = seekable((str(n) for n in range(10))) + >>> next(it), next(it), next(it) + ('0', '1', '2') + >>> elements = it.elements() + >>> elements + SequenceView(['0', '1', '2']) + >>> next(it) + '3' + >>> elements + SequenceView(['0', '1', '2', '3']) + + By default, the cache grows as the source iterable progresses, so beware of + wrapping very large or infinite iterables. Supply *maxlen* to limit the + size of the cache (this of course limits how far back you can seek). + + >>> from itertools import count + >>> it = seekable((str(n) for n in count()), maxlen=2) + >>> next(it), next(it), next(it), next(it) + ('0', '1', '2', '3') + >>> list(it.elements()) + ['2', '3'] + >>> it.seek(0) + >>> next(it), next(it), next(it), next(it) + ('2', '3', '4', '5') + >>> next(it) + '6' + + """ + + def __init__(self, iterable, maxlen=None): + self._source = iter(iterable) + if maxlen is None: + self._cache = [] + else: + self._cache = deque([], maxlen) + self._index = None + + def __iter__(self): + return self + + def __next__(self): + if self._index is not None: + try: + item = self._cache[self._index] + except IndexError: + self._index = None + else: + self._index += 1 + return item + + item = next(self._source) + self._cache.append(item) + return item + + def __bool__(self): + try: + self.peek() + except StopIteration: + return False + return True + + def peek(self, default=_marker): + try: + peeked = next(self) + except StopIteration: + if default is _marker: + raise + return default + if self._index is None: + self._index = len(self._cache) + self._index -= 1 + return peeked + + def elements(self): + return SequenceView(self._cache) + + def seek(self, index): + self._index = index + remainder = index - len(self._cache) + if remainder > 0: + consume(self, remainder) + + def relative_seek(self, count): + index = len(self._cache) + self.seek(max(index + count, 0)) + + +class run_length: + """ + :func:`run_length.encode` compresses an iterable with run-length encoding. + It yields groups of repeated items with the count of how many times they + were repeated: + + >>> uncompressed = 'abbcccdddd' + >>> list(run_length.encode(uncompressed)) + [('a', 1), ('b', 2), ('c', 3), ('d', 4)] + + :func:`run_length.decode` decompresses an iterable that was previously + compressed with run-length encoding. It yields the items of the + decompressed iterable: + + >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] + >>> list(run_length.decode(compressed)) + ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd'] + + """ + + @staticmethod + def encode(iterable): + return ((k, ilen(g)) for k, g in groupby(iterable)) + + @staticmethod + def decode(iterable): + return chain.from_iterable(repeat(k, n) for k, n in iterable) + + +def exactly_n(iterable, n, predicate=bool): + """Return ``True`` if exactly ``n`` items in the iterable are ``True`` + according to the *predicate* function. + + >>> exactly_n([True, True, False], 2) + True + >>> exactly_n([True, True, False], 1) + False + >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3) + True + + The iterable will be advanced until ``n + 1`` truthy items are encountered, + so avoid calling it on infinite iterables. + + """ + return len(take(n + 1, filter(predicate, iterable))) == n + + +def circular_shifts(iterable): + """Return a list of circular shifts of *iterable*. + + >>> circular_shifts(range(4)) + [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)] + """ + lst = list(iterable) + return take(len(lst), windowed(cycle(lst), len(lst))) + + +def make_decorator(wrapping_func, result_index=0): + """Return a decorator version of *wrapping_func*, which is a function that + modifies an iterable. *result_index* is the position in that function's + signature where the iterable goes. + + This lets you use itertools on the "production end," i.e. at function + definition. This can augment what the function returns without changing the + function's code. + + For example, to produce a decorator version of :func:`chunked`: + + >>> from more_itertools import chunked + >>> chunker = make_decorator(chunked, result_index=0) + >>> @chunker(3) + ... def iter_range(n): + ... return iter(range(n)) + ... + >>> list(iter_range(9)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + + To only allow truthy items to be returned: + + >>> truth_serum = make_decorator(filter, result_index=1) + >>> @truth_serum(bool) + ... def boolean_test(): + ... return [0, 1, '', ' ', False, True] + ... + >>> list(boolean_test()) + [1, ' ', True] + + The :func:`peekable` and :func:`seekable` wrappers make for practical + decorators: + + >>> from more_itertools import peekable + >>> peekable_function = make_decorator(peekable) + >>> @peekable_function() + ... def str_range(*args): + ... return (str(x) for x in range(*args)) + ... + >>> it = str_range(1, 20, 2) + >>> next(it), next(it), next(it) + ('1', '3', '5') + >>> it.peek() + '7' + >>> next(it) + '7' + + """ + + # See https://sites.google.com/site/bbayles/index/decorator_factory for + # notes on how this works. + def decorator(*wrapping_args, **wrapping_kwargs): + def outer_wrapper(f): + def inner_wrapper(*args, **kwargs): + result = f(*args, **kwargs) + wrapping_args_ = list(wrapping_args) + wrapping_args_.insert(result_index, result) + return wrapping_func(*wrapping_args_, **wrapping_kwargs) + + return inner_wrapper + + return outer_wrapper + + return decorator + + +def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None): + """Return a dictionary that maps the items in *iterable* to categories + defined by *keyfunc*, transforms them with *valuefunc*, and + then summarizes them by category with *reducefunc*. + + *valuefunc* defaults to the identity function if it is unspecified. + If *reducefunc* is unspecified, no summarization takes place: + + >>> keyfunc = lambda x: x.upper() + >>> result = map_reduce('abbccc', keyfunc) + >>> sorted(result.items()) + [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])] + + Specifying *valuefunc* transforms the categorized items: + + >>> keyfunc = lambda x: x.upper() + >>> valuefunc = lambda x: 1 + >>> result = map_reduce('abbccc', keyfunc, valuefunc) + >>> sorted(result.items()) + [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])] + + Specifying *reducefunc* summarizes the categorized items: + + >>> keyfunc = lambda x: x.upper() + >>> valuefunc = lambda x: 1 + >>> reducefunc = sum + >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc) + >>> sorted(result.items()) + [('A', 1), ('B', 2), ('C', 3)] + + You may want to filter the input iterable before applying the map/reduce + procedure: + + >>> all_items = range(30) + >>> items = [x for x in all_items if 10 <= x <= 20] # Filter + >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1 + >>> categories = map_reduce(items, keyfunc=keyfunc) + >>> sorted(categories.items()) + [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])] + >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum) + >>> sorted(summaries.items()) + [(0, 90), (1, 75)] + + Note that all items in the iterable are gathered into a list before the + summarization step, which may require significant storage. + + The returned object is a :obj:`collections.defaultdict` with the + ``default_factory`` set to ``None``, such that it behaves like a normal + dictionary. + + """ + valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc + + ret = defaultdict(list) + for item in iterable: + key = keyfunc(item) + value = valuefunc(item) + ret[key].append(value) + + if reducefunc is not None: + for key, value_list in ret.items(): + ret[key] = reducefunc(value_list) + + ret.default_factory = None + return ret + + +def rlocate(iterable, pred=bool, window_size=None): + """Yield the index of each item in *iterable* for which *pred* returns + ``True``, starting from the right and moving left. + + *pred* defaults to :func:`bool`, which will select truthy items: + + >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4 + [4, 2, 1] + + Set *pred* to a custom function to, e.g., find the indexes for a particular + item: + + >>> iterable = iter('abcb') + >>> pred = lambda x: x == 'b' + >>> list(rlocate(iterable, pred)) + [3, 1] + + If *window_size* is given, then the *pred* function will be called with + that many items. This enables searching for sub-sequences: + + >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] + >>> pred = lambda *args: args == (1, 2, 3) + >>> list(rlocate(iterable, pred=pred, window_size=3)) + [9, 5, 1] + + Beware, this function won't return anything for infinite iterables. + If *iterable* is reversible, ``rlocate`` will reverse it and search from + the right. Otherwise, it will search from the left and return the results + in reverse order. + + See :func:`locate` to for other example applications. + + """ + if window_size is None: + try: + len_iter = len(iterable) + return (len_iter - i - 1 for i in locate(reversed(iterable), pred)) + except TypeError: + pass + + return reversed(list(locate(iterable, pred, window_size))) + + +def replace(iterable, pred, substitutes, count=None, window_size=1): + """Yield the items from *iterable*, replacing the items for which *pred* + returns ``True`` with the items from the iterable *substitutes*. + + >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1] + >>> pred = lambda x: x == 0 + >>> substitutes = (2, 3) + >>> list(replace(iterable, pred, substitutes)) + [1, 1, 2, 3, 1, 1, 2, 3, 1, 1] + + If *count* is given, the number of replacements will be limited: + + >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0] + >>> pred = lambda x: x == 0 + >>> substitutes = [None] + >>> list(replace(iterable, pred, substitutes, count=2)) + [1, 1, None, 1, 1, None, 1, 1, 0] + + Use *window_size* to control the number of items passed as arguments to + *pred*. This allows for locating and replacing subsequences. + + >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5] + >>> window_size = 3 + >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred + >>> substitutes = [3, 4] # Splice in these items + >>> list(replace(iterable, pred, substitutes, window_size=window_size)) + [3, 4, 5, 3, 4, 5] + + """ + if window_size < 1: + raise ValueError('window_size must be at least 1') + + # Save the substitutes iterable, since it's used more than once + substitutes = tuple(substitutes) + + # Add padding such that the number of windows matches the length of the + # iterable + it = chain(iterable, [_marker] * (window_size - 1)) + windows = windowed(it, window_size) + + n = 0 + for w in windows: + # If the current window matches our predicate (and we haven't hit + # our maximum number of replacements), splice in the substitutes + # and then consume the following windows that overlap with this one. + # For example, if the iterable is (0, 1, 2, 3, 4...) + # and the window size is 2, we have (0, 1), (1, 2), (2, 3)... + # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2) + if pred(*w): + if (count is None) or (n < count): + n += 1 + yield from substitutes + consume(windows, window_size - 1) + continue + + # If there was no match (or we've reached the replacement limit), + # yield the first item from the window. + if w and (w[0] is not _marker): + yield w[0] + + +def partitions(iterable): + """Yield all possible order-preserving partitions of *iterable*. + + >>> iterable = 'abc' + >>> for part in partitions(iterable): + ... print([''.join(p) for p in part]) + ['abc'] + ['a', 'bc'] + ['ab', 'c'] + ['a', 'b', 'c'] + + This is unrelated to :func:`partition`. + + """ + sequence = list(iterable) + n = len(sequence) + for i in powerset(range(1, n)): + yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))] + + +def set_partitions(iterable, k=None): + """ + Yield the set partitions of *iterable* into *k* parts. Set partitions are + not order-preserving. + + >>> iterable = 'abc' + >>> for part in set_partitions(iterable, 2): + ... print([''.join(p) for p in part]) + ['a', 'bc'] + ['ab', 'c'] + ['b', 'ac'] + + + If *k* is not given, every set partition is generated. + + >>> iterable = 'abc' + >>> for part in set_partitions(iterable): + ... print([''.join(p) for p in part]) + ['abc'] + ['a', 'bc'] + ['ab', 'c'] + ['b', 'ac'] + ['a', 'b', 'c'] + + """ + L = list(iterable) + n = len(L) + if k is not None: + if k < 1: + raise ValueError( + "Can't partition in a negative or zero number of groups" + ) + elif k > n: + return + + def set_partitions_helper(L, k): + n = len(L) + if k == 1: + yield [L] + elif n == k: + yield [[s] for s in L] + else: + e, *M = L + for p in set_partitions_helper(M, k - 1): + yield [[e], *p] + for p in set_partitions_helper(M, k): + for i in range(len(p)): + yield p[:i] + [[e] + p[i]] + p[i + 1 :] + + if k is None: + for k in range(1, n + 1): + yield from set_partitions_helper(L, k) + else: + yield from set_partitions_helper(L, k) + + +class time_limited: + """ + Yield items from *iterable* until *limit_seconds* have passed. + If the time limit expires before all items have been yielded, the + ``timed_out`` parameter will be set to ``True``. + + >>> from time import sleep + >>> def generator(): + ... yield 1 + ... yield 2 + ... sleep(0.2) + ... yield 3 + >>> iterable = time_limited(0.1, generator()) + >>> list(iterable) + [1, 2] + >>> iterable.timed_out + True + + Note that the time is checked before each item is yielded, and iteration + stops if the time elapsed is greater than *limit_seconds*. If your time + limit is 1 second, but it takes 2 seconds to generate the first item from + the iterable, the function will run for 2 seconds and not yield anything. + As a special case, when *limit_seconds* is zero, the iterator never + returns anything. + + """ + + def __init__(self, limit_seconds, iterable): + if limit_seconds < 0: + raise ValueError('limit_seconds must be positive') + self.limit_seconds = limit_seconds + self._iterable = iter(iterable) + self._start_time = monotonic() + self.timed_out = False + + def __iter__(self): + return self + + def __next__(self): + if self.limit_seconds == 0: + self.timed_out = True + raise StopIteration + item = next(self._iterable) + if monotonic() - self._start_time > self.limit_seconds: + self.timed_out = True + raise StopIteration + + return item + + +def only(iterable, default=None, too_long=None): + """If *iterable* has only one item, return it. + If it has zero items, return *default*. + If it has more than one item, raise the exception given by *too_long*, + which is ``ValueError`` by default. + + >>> only([], default='missing') + 'missing' + >>> only([1]) + 1 + >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + ValueError: Expected exactly one item in iterable, but got 1, 2, + and perhaps more.' + >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + TypeError + + Note that :func:`only` attempts to advance *iterable* twice to ensure there + is only one item. See :func:`spy` or :func:`peekable` to check + iterable contents less destructively. + """ + it = iter(iterable) + first_value = next(it, default) + + try: + second_value = next(it) + except StopIteration: + pass + else: + msg = ( + 'Expected exactly one item in iterable, but got {!r}, {!r}, ' + 'and perhaps more.'.format(first_value, second_value) + ) + raise too_long or ValueError(msg) + + return first_value + + +class _IChunk: + def __init__(self, iterable, n): + self._it = islice(iterable, n) + self._cache = deque() + + def fill_cache(self): + self._cache.extend(self._it) + + def __iter__(self): + return self + + def __next__(self): + try: + return next(self._it) + except StopIteration: + if self._cache: + return self._cache.popleft() + else: + raise + + +def ichunked(iterable, n): + """Break *iterable* into sub-iterables with *n* elements each. + :func:`ichunked` is like :func:`chunked`, but it yields iterables + instead of lists. + + If the sub-iterables are read in order, the elements of *iterable* + won't be stored in memory. + If they are read out of order, :func:`itertools.tee` is used to cache + elements as necessary. + + >>> from itertools import count + >>> all_chunks = ichunked(count(), 4) + >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks) + >>> list(c_2) # c_1's elements have been cached; c_3's haven't been + [4, 5, 6, 7] + >>> list(c_1) + [0, 1, 2, 3] + >>> list(c_3) + [8, 9, 10, 11] + + """ + source = peekable(iter(iterable)) + ichunk_marker = object() + while True: + # Check to see whether we're at the end of the source iterable + item = source.peek(ichunk_marker) + if item is ichunk_marker: + return + + chunk = _IChunk(source, n) + yield chunk + + # Advance the source iterable and fill previous chunk's cache + chunk.fill_cache() + + +def iequals(*iterables): + """Return ``True`` if all given *iterables* are equal to each other, + which means that they contain the same elements in the same order. + + The function is useful for comparing iterables of different data types + or iterables that do not support equality checks. + + >>> iequals("abc", ['a', 'b', 'c'], ('a', 'b', 'c'), iter("abc")) + True + + >>> iequals("abc", "acb") + False + + Not to be confused with :func:`all_equal`, which checks whether all + elements of iterable are equal to each other. + + """ + return all(map(all_equal, zip_longest(*iterables, fillvalue=object()))) + + +def distinct_combinations(iterable, r): + """Yield the distinct combinations of *r* items taken from *iterable*. + + >>> list(distinct_combinations([0, 0, 1], 2)) + [(0, 0), (0, 1)] + + Equivalent to ``set(combinations(iterable))``, except duplicates are not + generated and thrown away. For larger input sequences this is much more + efficient. + + """ + if r < 0: + raise ValueError('r must be non-negative') + elif r == 0: + yield () + return + pool = tuple(iterable) + generators = [unique_everseen(enumerate(pool), key=itemgetter(1))] + current_combo = [None] * r + level = 0 + while generators: + try: + cur_idx, p = next(generators[-1]) + except StopIteration: + generators.pop() + level -= 1 + continue + current_combo[level] = p + if level + 1 == r: + yield tuple(current_combo) + else: + generators.append( + unique_everseen( + enumerate(pool[cur_idx + 1 :], cur_idx + 1), + key=itemgetter(1), + ) + ) + level += 1 + + +def filter_except(validator, iterable, *exceptions): + """Yield the items from *iterable* for which the *validator* function does + not raise one of the specified *exceptions*. + + *validator* is called for each item in *iterable*. + It should be a function that accepts one argument and raises an exception + if that item is not valid. + + >>> iterable = ['1', '2', 'three', '4', None] + >>> list(filter_except(int, iterable, ValueError, TypeError)) + ['1', '2', '4'] + + If an exception other than one given by *exceptions* is raised by + *validator*, it is raised like normal. + """ + for item in iterable: + try: + validator(item) + except exceptions: + pass + else: + yield item + + +def map_except(function, iterable, *exceptions): + """Transform each item from *iterable* with *function* and yield the + result, unless *function* raises one of the specified *exceptions*. + + *function* is called to transform each item in *iterable*. + It should accept one argument. + + >>> iterable = ['1', '2', 'three', '4', None] + >>> list(map_except(int, iterable, ValueError, TypeError)) + [1, 2, 4] + + If an exception other than one given by *exceptions* is raised by + *function*, it is raised like normal. + """ + for item in iterable: + try: + yield function(item) + except exceptions: + pass + + +def map_if(iterable, pred, func, func_else=lambda x: x): + """Evaluate each item from *iterable* using *pred*. If the result is + equivalent to ``True``, transform the item with *func* and yield it. + Otherwise, transform the item with *func_else* and yield it. + + *pred*, *func*, and *func_else* should each be functions that accept + one argument. By default, *func_else* is the identity function. + + >>> from math import sqrt + >>> iterable = list(range(-5, 5)) + >>> iterable + [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] + >>> list(map_if(iterable, lambda x: x > 3, lambda x: 'toobig')) + [-5, -4, -3, -2, -1, 0, 1, 2, 3, 'toobig'] + >>> list(map_if(iterable, lambda x: x >= 0, + ... lambda x: f'{sqrt(x):.2f}', lambda x: None)) + [None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00'] + """ + for item in iterable: + yield func(item) if pred(item) else func_else(item) + + +def _sample_unweighted(iterable, k): + # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li: + # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))". + + # Fill up the reservoir (collection of samples) with the first `k` samples + reservoir = take(k, iterable) + + # Generate random number that's the largest in a sample of k U(0,1) numbers + # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic + W = exp(log(random()) / k) + + # The number of elements to skip before changing the reservoir is a random + # number with a geometric distribution. Sample it using random() and logs. + next_index = k + floor(log(random()) / log(1 - W)) + + for index, element in enumerate(iterable, k): + if index == next_index: + reservoir[randrange(k)] = element + # The new W is the largest in a sample of k U(0, `old_W`) numbers + W *= exp(log(random()) / k) + next_index += floor(log(random()) / log(1 - W)) + 1 + + return reservoir + + +def _sample_weighted(iterable, k, weights): + # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. : + # "Weighted random sampling with a reservoir". + + # Log-transform for numerical stability for weights that are small/large + weight_keys = (log(random()) / weight for weight in weights) + + # Fill up the reservoir (collection of samples) with the first `k` + # weight-keys and elements, then heapify the list. + reservoir = take(k, zip(weight_keys, iterable)) + heapify(reservoir) + + # The number of jumps before changing the reservoir is a random variable + # with an exponential distribution. Sample it using random() and logs. + smallest_weight_key, _ = reservoir[0] + weights_to_skip = log(random()) / smallest_weight_key + + for weight, element in zip(weights, iterable): + if weight >= weights_to_skip: + # The notation here is consistent with the paper, but we store + # the weight-keys in log-space for better numerical stability. + smallest_weight_key, _ = reservoir[0] + t_w = exp(weight * smallest_weight_key) + r_2 = uniform(t_w, 1) # generate U(t_w, 1) + weight_key = log(r_2) / weight + heapreplace(reservoir, (weight_key, element)) + smallest_weight_key, _ = reservoir[0] + weights_to_skip = log(random()) / smallest_weight_key + else: + weights_to_skip -= weight + + # Equivalent to [element for weight_key, element in sorted(reservoir)] + return [heappop(reservoir)[1] for _ in range(k)] + + +def sample(iterable, k, weights=None): + """Return a *k*-length list of elements chosen (without replacement) + from the *iterable*. Like :func:`random.sample`, but works on iterables + of unknown length. + + >>> iterable = range(100) + >>> sample(iterable, 5) # doctest: +SKIP + [81, 60, 96, 16, 4] + + An iterable with *weights* may also be given: + + >>> iterable = range(100) + >>> weights = (i * i + 1 for i in range(100)) + >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP + [79, 67, 74, 66, 78] + + The algorithm can also be used to generate weighted random permutations. + The relative weight of each item determines the probability that it + appears late in the permutation. + + >>> data = "abcdefgh" + >>> weights = range(1, len(data) + 1) + >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP + ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f'] + """ + if k == 0: + return [] + + iterable = iter(iterable) + if weights is None: + return _sample_unweighted(iterable, k) + else: + weights = iter(weights) + return _sample_weighted(iterable, k, weights) + + +def is_sorted(iterable, key=None, reverse=False, strict=False): + """Returns ``True`` if the items of iterable are in sorted order, and + ``False`` otherwise. *key* and *reverse* have the same meaning that they do + in the built-in :func:`sorted` function. + + >>> is_sorted(['1', '2', '3', '4', '5'], key=int) + True + >>> is_sorted([5, 4, 3, 1, 2], reverse=True) + False + + If *strict*, tests for strict sorting, that is, returns ``False`` if equal + elements are found: + + >>> is_sorted([1, 2, 2]) + True + >>> is_sorted([1, 2, 2], strict=True) + False + + The function returns ``False`` after encountering the first out-of-order + item. If there are no out-of-order items, the iterable is exhausted. + """ + + compare = (le if reverse else ge) if strict else (lt if reverse else gt) + it = iterable if key is None else map(key, iterable) + return not any(starmap(compare, pairwise(it))) + + +class AbortThread(BaseException): + pass + + +class callback_iter: + """Convert a function that uses callbacks to an iterator. + + Let *func* be a function that takes a `callback` keyword argument. + For example: + + >>> def func(callback=None): + ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]: + ... if callback: + ... callback(i, c) + ... return 4 + + + Use ``with callback_iter(func)`` to get an iterator over the parameters + that are delivered to the callback. + + >>> with callback_iter(func) as it: + ... for args, kwargs in it: + ... print(args) + (1, 'a') + (2, 'b') + (3, 'c') + + The function will be called in a background thread. The ``done`` property + indicates whether it has completed execution. + + >>> it.done + True + + If it completes successfully, its return value will be available + in the ``result`` property. + + >>> it.result + 4 + + Notes: + + * If the function uses some keyword argument besides ``callback``, supply + *callback_kwd*. + * If it finished executing, but raised an exception, accessing the + ``result`` property will raise the same exception. + * If it hasn't finished executing, accessing the ``result`` + property from within the ``with`` block will raise ``RuntimeError``. + * If it hasn't finished executing, accessing the ``result`` property from + outside the ``with`` block will raise a + ``more_itertools.AbortThread`` exception. + * Provide *wait_seconds* to adjust how frequently the it is polled for + output. + + """ + + def __init__(self, func, callback_kwd='callback', wait_seconds=0.1): + self._func = func + self._callback_kwd = callback_kwd + self._aborted = False + self._future = None + self._wait_seconds = wait_seconds + # Lazily import concurrent.future + self._executor = __import__( + 'concurrent.futures' + ).futures.ThreadPoolExecutor(max_workers=1) + self._iterator = self._reader() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self._aborted = True + self._executor.shutdown() + + def __iter__(self): + return self + + def __next__(self): + return next(self._iterator) + + @property + def done(self): + if self._future is None: + return False + return self._future.done() + + @property + def result(self): + if not self.done: + raise RuntimeError('Function has not yet completed') + + return self._future.result() + + def _reader(self): + q = Queue() + + def callback(*args, **kwargs): + if self._aborted: + raise AbortThread('canceled by user') + + q.put((args, kwargs)) + + self._future = self._executor.submit( + self._func, **{self._callback_kwd: callback} + ) + + while True: + try: + item = q.get(timeout=self._wait_seconds) + except Empty: + pass + else: + q.task_done() + yield item + + if self._future.done(): + break + + remaining = [] + while True: + try: + item = q.get_nowait() + except Empty: + break + else: + q.task_done() + remaining.append(item) + q.join() + yield from remaining + + +def windowed_complete(iterable, n): + """ + Yield ``(beginning, middle, end)`` tuples, where: + + * Each ``middle`` has *n* items from *iterable* + * Each ``beginning`` has the items before the ones in ``middle`` + * Each ``end`` has the items after the ones in ``middle`` + + >>> iterable = range(7) + >>> n = 3 + >>> for beginning, middle, end in windowed_complete(iterable, n): + ... print(beginning, middle, end) + () (0, 1, 2) (3, 4, 5, 6) + (0,) (1, 2, 3) (4, 5, 6) + (0, 1) (2, 3, 4) (5, 6) + (0, 1, 2) (3, 4, 5) (6,) + (0, 1, 2, 3) (4, 5, 6) () + + Note that *n* must be at least 0 and most equal to the length of + *iterable*. + + This function will exhaust the iterable and may require significant + storage. + """ + if n < 0: + raise ValueError('n must be >= 0') + + seq = tuple(iterable) + size = len(seq) + + if n > size: + raise ValueError('n must be <= len(seq)') + + for i in range(size - n + 1): + beginning = seq[:i] + middle = seq[i : i + n] + end = seq[i + n :] + yield beginning, middle, end + + +def all_unique(iterable, key=None): + """ + Returns ``True`` if all the elements of *iterable* are unique (no two + elements are equal). + + >>> all_unique('ABCB') + False + + If a *key* function is specified, it will be used to make comparisons. + + >>> all_unique('ABCb') + True + >>> all_unique('ABCb', str.lower) + False + + The function returns as soon as the first non-unique element is + encountered. Iterables with a mix of hashable and unhashable items can + be used, but the function will be slower for unhashable items. + """ + seenset = set() + seenset_add = seenset.add + seenlist = [] + seenlist_add = seenlist.append + for element in map(key, iterable) if key else iterable: + try: + if element in seenset: + return False + seenset_add(element) + except TypeError: + if element in seenlist: + return False + seenlist_add(element) + return True + + +def nth_product(index, *args): + """Equivalent to ``list(product(*args))[index]``. + + The products of *args* can be ordered lexicographically. + :func:`nth_product` computes the product at sort position *index* without + computing the previous products. + + >>> nth_product(8, range(2), range(2), range(2), range(2)) + (1, 0, 0, 0) + + ``IndexError`` will be raised if the given *index* is invalid. + """ + pools = list(map(tuple, reversed(args))) + ns = list(map(len, pools)) + + c = reduce(mul, ns) + + if index < 0: + index += c + + if not 0 <= index < c: + raise IndexError + + result = [] + for pool, n in zip(pools, ns): + result.append(pool[index % n]) + index //= n + + return tuple(reversed(result)) + + +def nth_permutation(iterable, r, index): + """Equivalent to ``list(permutations(iterable, r))[index]``` + + The subsequences of *iterable* that are of length *r* where order is + important can be ordered lexicographically. :func:`nth_permutation` + computes the subsequence at sort position *index* directly, without + computing the previous subsequences. + + >>> nth_permutation('ghijk', 2, 5) + ('h', 'i') + + ``ValueError`` will be raised If *r* is negative or greater than the length + of *iterable*. + ``IndexError`` will be raised if the given *index* is invalid. + """ + pool = list(iterable) + n = len(pool) + + if r is None or r == n: + r, c = n, factorial(n) + elif not 0 <= r < n: + raise ValueError + else: + c = perm(n, r) + + if index < 0: + index += c + + if not 0 <= index < c: + raise IndexError + + if c == 0: + return tuple() + + result = [0] * r + q = index * factorial(n) // c if r < n else index + for d in range(1, n + 1): + q, i = divmod(q, d) + if 0 <= n - d < r: + result[n - d] = i + if q == 0: + break + + return tuple(map(pool.pop, result)) + + +def nth_combination_with_replacement(iterable, r, index): + """Equivalent to + ``list(combinations_with_replacement(iterable, r))[index]``. + + + The subsequences with repetition of *iterable* that are of length *r* can + be ordered lexicographically. :func:`nth_combination_with_replacement` + computes the subsequence at sort position *index* directly, without + computing the previous subsequences with replacement. + + >>> nth_combination_with_replacement(range(5), 3, 5) + (0, 1, 1) + + ``ValueError`` will be raised If *r* is negative or greater than the length + of *iterable*. + ``IndexError`` will be raised if the given *index* is invalid. + """ + pool = tuple(iterable) + n = len(pool) + if (r < 0) or (r > n): + raise ValueError + + c = comb(n + r - 1, r) + + if index < 0: + index += c + + if (index < 0) or (index >= c): + raise IndexError + + result = [] + i = 0 + while r: + r -= 1 + while n >= 0: + num_combs = comb(n + r - 1, r) + if index < num_combs: + break + n -= 1 + i += 1 + index -= num_combs + result.append(pool[i]) + + return tuple(result) + + +def value_chain(*args): + """Yield all arguments passed to the function in the same order in which + they were passed. If an argument itself is iterable then iterate over its + values. + + >>> list(value_chain(1, 2, 3, [4, 5, 6])) + [1, 2, 3, 4, 5, 6] + + Binary and text strings are not considered iterable and are emitted + as-is: + + >>> list(value_chain('12', '34', ['56', '78'])) + ['12', '34', '56', '78'] + + + Multiple levels of nesting are not flattened. + + """ + for value in args: + if isinstance(value, (str, bytes)): + yield value + continue + try: + yield from value + except TypeError: + yield value + + +def product_index(element, *args): + """Equivalent to ``list(product(*args)).index(element)`` + + The products of *args* can be ordered lexicographically. + :func:`product_index` computes the first index of *element* without + computing the previous products. + + >>> product_index([8, 2], range(10), range(5)) + 42 + + ``ValueError`` will be raised if the given *element* isn't in the product + of *args*. + """ + index = 0 + + for x, pool in zip_longest(element, args, fillvalue=_marker): + if x is _marker or pool is _marker: + raise ValueError('element is not a product of args') + + pool = tuple(pool) + index = index * len(pool) + pool.index(x) + + return index + + +def combination_index(element, iterable): + """Equivalent to ``list(combinations(iterable, r)).index(element)`` + + The subsequences of *iterable* that are of length *r* can be ordered + lexicographically. :func:`combination_index` computes the index of the + first *element*, without computing the previous combinations. + + >>> combination_index('adf', 'abcdefg') + 10 + + ``ValueError`` will be raised if the given *element* isn't one of the + combinations of *iterable*. + """ + element = enumerate(element) + k, y = next(element, (None, None)) + if k is None: + return 0 + + indexes = [] + pool = enumerate(iterable) + for n, x in pool: + if x == y: + indexes.append(n) + tmp, y = next(element, (None, None)) + if tmp is None: + break + else: + k = tmp + else: + raise ValueError('element is not a combination of iterable') + + n, _ = last(pool, default=(n, None)) + + # Python versions below 3.8 don't have math.comb + index = 1 + for i, j in enumerate(reversed(indexes), start=1): + j = n - j + if i <= j: + index += comb(j, i) + + return comb(n + 1, k + 1) - index + + +def combination_with_replacement_index(element, iterable): + """Equivalent to + ``list(combinations_with_replacement(iterable, r)).index(element)`` + + The subsequences with repetition of *iterable* that are of length *r* can + be ordered lexicographically. :func:`combination_with_replacement_index` + computes the index of the first *element*, without computing the previous + combinations with replacement. + + >>> combination_with_replacement_index('adf', 'abcdefg') + 20 + + ``ValueError`` will be raised if the given *element* isn't one of the + combinations with replacement of *iterable*. + """ + element = tuple(element) + l = len(element) + element = enumerate(element) + + k, y = next(element, (None, None)) + if k is None: + return 0 + + indexes = [] + pool = tuple(iterable) + for n, x in enumerate(pool): + while x == y: + indexes.append(n) + tmp, y = next(element, (None, None)) + if tmp is None: + break + else: + k = tmp + if y is None: + break + else: + raise ValueError( + 'element is not a combination with replacement of iterable' + ) + + n = len(pool) + occupations = [0] * n + for p in indexes: + occupations[p] += 1 + + index = 0 + cumulative_sum = 0 + for k in range(1, n): + cumulative_sum += occupations[k - 1] + j = l + n - 1 - k - cumulative_sum + i = n - k + if i <= j: + index += comb(j, i) + + return index + + +def permutation_index(element, iterable): + """Equivalent to ``list(permutations(iterable, r)).index(element)``` + + The subsequences of *iterable* that are of length *r* where order is + important can be ordered lexicographically. :func:`permutation_index` + computes the index of the first *element* directly, without computing + the previous permutations. + + >>> permutation_index([1, 3, 2], range(5)) + 19 + + ``ValueError`` will be raised if the given *element* isn't one of the + permutations of *iterable*. + """ + index = 0 + pool = list(iterable) + for i, x in zip(range(len(pool), -1, -1), element): + r = pool.index(x) + index = index * i + r + del pool[r] + + return index + + +class countable: + """Wrap *iterable* and keep a count of how many items have been consumed. + + The ``items_seen`` attribute starts at ``0`` and increments as the iterable + is consumed: + + >>> iterable = map(str, range(10)) + >>> it = countable(iterable) + >>> it.items_seen + 0 + >>> next(it), next(it) + ('0', '1') + >>> list(it) + ['2', '3', '4', '5', '6', '7', '8', '9'] + >>> it.items_seen + 10 + """ + + def __init__(self, iterable): + self._it = iter(iterable) + self.items_seen = 0 + + def __iter__(self): + return self + + def __next__(self): + item = next(self._it) + self.items_seen += 1 + + return item + + +def chunked_even(iterable, n): + """Break *iterable* into lists of approximately length *n*. + Items are distributed such the lengths of the lists differ by at most + 1 item. + + >>> iterable = [1, 2, 3, 4, 5, 6, 7] + >>> n = 3 + >>> list(chunked_even(iterable, n)) # List lengths: 3, 2, 2 + [[1, 2, 3], [4, 5], [6, 7]] + >>> list(chunked(iterable, n)) # List lengths: 3, 3, 1 + [[1, 2, 3], [4, 5, 6], [7]] + + """ + + len_method = getattr(iterable, '__len__', None) + + if len_method is None: + return _chunked_even_online(iterable, n) + else: + return _chunked_even_finite(iterable, len_method(), n) + + +def _chunked_even_online(iterable, n): + buffer = [] + maxbuf = n + (n - 2) * (n - 1) + for x in iterable: + buffer.append(x) + if len(buffer) == maxbuf: + yield buffer[:n] + buffer = buffer[n:] + yield from _chunked_even_finite(buffer, len(buffer), n) + + +def _chunked_even_finite(iterable, N, n): + if N < 1: + return + + # Lists are either size `full_size <= n` or `partial_size = full_size - 1` + q, r = divmod(N, n) + num_lists = q + (1 if r > 0 else 0) + q, r = divmod(N, num_lists) + full_size = q + (1 if r > 0 else 0) + partial_size = full_size - 1 + num_full = N - partial_size * num_lists + num_partial = num_lists - num_full + + # Yield num_full lists of full_size + partial_start_idx = num_full * full_size + if full_size > 0: + for i in range(0, partial_start_idx, full_size): + yield list(islice(iterable, i, i + full_size)) + + # Yield num_partial lists of partial_size + if partial_size > 0: + for i in range( + partial_start_idx, + partial_start_idx + (num_partial * partial_size), + partial_size, + ): + yield list(islice(iterable, i, i + partial_size)) + + +def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False): + """A version of :func:`zip` that "broadcasts" any scalar + (i.e., non-iterable) items into output tuples. + + >>> iterable_1 = [1, 2, 3] + >>> iterable_2 = ['a', 'b', 'c'] + >>> scalar = '_' + >>> list(zip_broadcast(iterable_1, iterable_2, scalar)) + [(1, 'a', '_'), (2, 'b', '_'), (3, 'c', '_')] + + The *scalar_types* keyword argument determines what types are considered + scalar. It is set to ``(str, bytes)`` by default. Set it to ``None`` to + treat strings and byte strings as iterable: + + >>> list(zip_broadcast('abc', 0, 'xyz', scalar_types=None)) + [('a', 0, 'x'), ('b', 0, 'y'), ('c', 0, 'z')] + + If the *strict* keyword argument is ``True``, then + ``UnequalIterablesError`` will be raised if any of the iterables have + different lengths. + """ + + def is_scalar(obj): + if scalar_types and isinstance(obj, scalar_types): + return True + try: + iter(obj) + except TypeError: + return True + else: + return False + + size = len(objects) + if not size: + return + + new_item = [None] * size + iterables, iterable_positions = [], [] + for i, obj in enumerate(objects): + if is_scalar(obj): + new_item[i] = obj + else: + iterables.append(iter(obj)) + iterable_positions.append(i) + + if not iterables: + yield tuple(objects) + return + + zipper = _zip_equal if strict else zip + for item in zipper(*iterables): + for i, new_item[i] in zip(iterable_positions, item): + pass + yield tuple(new_item) + + +def unique_in_window(iterable, n, key=None): + """Yield the items from *iterable* that haven't been seen recently. + *n* is the size of the lookback window. + + >>> iterable = [0, 1, 0, 2, 3, 0] + >>> n = 3 + >>> list(unique_in_window(iterable, n)) + [0, 1, 2, 3, 0] + + The *key* function, if provided, will be used to determine uniqueness: + + >>> list(unique_in_window('abAcda', 3, key=lambda x: x.lower())) + ['a', 'b', 'c', 'd', 'a'] + + The items in *iterable* must be hashable. + + """ + if n <= 0: + raise ValueError('n must be greater than 0') + + window = deque(maxlen=n) + counts = defaultdict(int) + use_key = key is not None + + for item in iterable: + if len(window) == n: + to_discard = window[0] + if counts[to_discard] == 1: + del counts[to_discard] + else: + counts[to_discard] -= 1 + + k = key(item) if use_key else item + if k not in counts: + yield item + counts[k] += 1 + window.append(k) + + +def duplicates_everseen(iterable, key=None): + """Yield duplicate elements after their first appearance. + + >>> list(duplicates_everseen('mississippi')) + ['s', 'i', 's', 's', 'i', 'p', 'i'] + >>> list(duplicates_everseen('AaaBbbCccAaa', str.lower)) + ['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a'] + + This function is analogous to :func:`unique_everseen` and is subject to + the same performance considerations. + + """ + seen_set = set() + seen_list = [] + use_key = key is not None + + for element in iterable: + k = key(element) if use_key else element + try: + if k not in seen_set: + seen_set.add(k) + else: + yield element + except TypeError: + if k not in seen_list: + seen_list.append(k) + else: + yield element + + +def duplicates_justseen(iterable, key=None): + """Yields serially-duplicate elements after their first appearance. + + >>> list(duplicates_justseen('mississippi')) + ['s', 's', 'p'] + >>> list(duplicates_justseen('AaaBbbCccAaa', str.lower)) + ['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a'] + + This function is analogous to :func:`unique_justseen`. + + """ + return flatten(g for _, g in groupby(iterable, key) for _ in g) + + +def classify_unique(iterable, key=None): + """Classify each element in terms of its uniqueness. + + For each element in the input iterable, return a 3-tuple consisting of: + + 1. The element itself + 2. ``False`` if the element is equal to the one preceding it in the input, + ``True`` otherwise (i.e. the equivalent of :func:`unique_justseen`) + 3. ``False`` if this element has been seen anywhere in the input before, + ``True`` otherwise (i.e. the equivalent of :func:`unique_everseen`) + + >>> list(classify_unique('otto')) # doctest: +NORMALIZE_WHITESPACE + [('o', True, True), + ('t', True, True), + ('t', False, False), + ('o', True, False)] + + This function is analogous to :func:`unique_everseen` and is subject to + the same performance considerations. + + """ + seen_set = set() + seen_list = [] + use_key = key is not None + previous = None + + for i, element in enumerate(iterable): + k = key(element) if use_key else element + is_unique_justseen = not i or previous != k + previous = k + is_unique_everseen = False + try: + if k not in seen_set: + seen_set.add(k) + is_unique_everseen = True + except TypeError: + if k not in seen_list: + seen_list.append(k) + is_unique_everseen = True + yield element, is_unique_justseen, is_unique_everseen + + +def minmax(iterable_or_value, *others, key=None, default=_marker): + """Returns both the smallest and largest items in an iterable + or the largest of two or more arguments. + + >>> minmax([3, 1, 5]) + (1, 5) + + >>> minmax(4, 2, 6) + (2, 6) + + If a *key* function is provided, it will be used to transform the input + items for comparison. + + >>> minmax([5, 30], key=str) # '30' sorts before '5' + (30, 5) + + If a *default* value is provided, it will be returned if there are no + input items. + + >>> minmax([], default=(0, 0)) + (0, 0) + + Otherwise ``ValueError`` is raised. + + This function is based on the + `recipe `__ by + Raymond Hettinger and takes care to minimize the number of comparisons + performed. + """ + iterable = (iterable_or_value, *others) if others else iterable_or_value + + it = iter(iterable) + + try: + lo = hi = next(it) + except StopIteration as e: + if default is _marker: + raise ValueError( + '`minmax()` argument is an empty iterable. ' + 'Provide a `default` value to suppress this error.' + ) from e + return default + + # Different branches depending on the presence of key. This saves a lot + # of unimportant copies which would slow the "key=None" branch + # significantly down. + if key is None: + for x, y in zip_longest(it, it, fillvalue=lo): + if y < x: + x, y = y, x + if x < lo: + lo = x + if hi < y: + hi = y + + else: + lo_key = hi_key = key(lo) + + for x, y in zip_longest(it, it, fillvalue=lo): + x_key, y_key = key(x), key(y) + + if y_key < x_key: + x, y, x_key, y_key = y, x, y_key, x_key + if x_key < lo_key: + lo, lo_key = x, x_key + if hi_key < y_key: + hi, hi_key = y, y_key + + return lo, hi + + +def constrained_batches( + iterable, max_size, max_count=None, get_len=len, strict=True +): + """Yield batches of items from *iterable* with a combined size limited by + *max_size*. + + >>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1'] + >>> list(constrained_batches(iterable, 10)) + [(b'12345', b'123'), (b'12345678', b'1', b'1'), (b'12', b'1')] + + If a *max_count* is supplied, the number of items per batch is also + limited: + + >>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1'] + >>> list(constrained_batches(iterable, 10, max_count = 2)) + [(b'12345', b'123'), (b'12345678', b'1'), (b'1', b'12'), (b'1',)] + + If a *get_len* function is supplied, use that instead of :func:`len` to + determine item size. + + If *strict* is ``True``, raise ``ValueError`` if any single item is bigger + than *max_size*. Otherwise, allow single items to exceed *max_size*. + """ + if max_size <= 0: + raise ValueError('maximum size must be greater than zero') + + batch = [] + batch_size = 0 + batch_count = 0 + for item in iterable: + item_len = get_len(item) + if strict and item_len > max_size: + raise ValueError('item size exceeds maximum size') + + reached_count = batch_count == max_count + reached_size = item_len + batch_size > max_size + if batch_count and (reached_size or reached_count): + yield tuple(batch) + batch.clear() + batch_size = 0 + batch_count = 0 + + batch.append(item) + batch_size += item_len + batch_count += 1 + + if batch: + yield tuple(batch) + + +def gray_product(*iterables): + """Like :func:`itertools.product`, but return tuples in an order such + that only one element in the generated tuple changes from one iteration + to the next. + + >>> list(gray_product('AB','CD')) + [('A', 'C'), ('B', 'C'), ('B', 'D'), ('A', 'D')] + + This function consumes all of the input iterables before producing output. + If any of the input iterables have fewer than two items, ``ValueError`` + is raised. + + For information on the algorithm, see + `this section `__ + of Donald Knuth's *The Art of Computer Programming*. + """ + all_iterables = tuple(tuple(x) for x in iterables) + iterable_count = len(all_iterables) + for iterable in all_iterables: + if len(iterable) < 2: + raise ValueError("each iterable must have two or more items") + + # This is based on "Algorithm H" from section 7.2.1.1, page 20. + # a holds the indexes of the source iterables for the n-tuple to be yielded + # f is the array of "focus pointers" + # o is the array of "directions" + a = [0] * iterable_count + f = list(range(iterable_count + 1)) + o = [1] * iterable_count + while True: + yield tuple(all_iterables[i][a[i]] for i in range(iterable_count)) + j = f[0] + f[0] = 0 + if j == iterable_count: + break + a[j] = a[j] + o[j] + if a[j] == 0 or a[j] == len(all_iterables[j]) - 1: + o[j] = -o[j] + f[j] = f[j + 1] + f[j + 1] = j + 1 + + +def partial_product(*iterables): + """Yields tuples containing one item from each iterator, with subsequent + tuples changing a single item at a time by advancing each iterator until it + is exhausted. This sequence guarantees every value in each iterable is + output at least once without generating all possible combinations. + + This may be useful, for example, when testing an expensive function. + + >>> list(partial_product('AB', 'C', 'DEF')) + [('A', 'C', 'D'), ('B', 'C', 'D'), ('B', 'C', 'E'), ('B', 'C', 'F')] + """ + + iterators = list(map(iter, iterables)) + + try: + prod = [next(it) for it in iterators] + except StopIteration: + return + yield tuple(prod) + + for i, it in enumerate(iterators): + for prod[i] in it: + yield tuple(prod) + + +def takewhile_inclusive(predicate, iterable): + """A variant of :func:`takewhile` that yields one additional element. + + >>> list(takewhile_inclusive(lambda x: x < 5, [1, 4, 6, 4, 1])) + [1, 4, 6] + + :func:`takewhile` would return ``[1, 4]``. + """ + for x in iterable: + yield x + if not predicate(x): + break + + +def outer_product(func, xs, ys, *args, **kwargs): + """A generalized outer product that applies a binary function to all + pairs of items. Returns a 2D matrix with ``len(xs)`` rows and ``len(ys)`` + columns. + Also accepts ``*args`` and ``**kwargs`` that are passed to ``func``. + + Multiplication table: + + >>> list(outer_product(mul, range(1, 4), range(1, 6))) + [(1, 2, 3, 4, 5), (2, 4, 6, 8, 10), (3, 6, 9, 12, 15)] + + Cross tabulation: + + >>> xs = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B'] + >>> ys = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z'] + >>> rows = list(zip(xs, ys)) + >>> count_rows = lambda x, y: rows.count((x, y)) + >>> list(outer_product(count_rows, sorted(set(xs)), sorted(set(ys)))) + [(2, 3, 0), (1, 0, 4)] + + Usage with ``*args`` and ``**kwargs``: + + >>> animals = ['cat', 'wolf', 'mouse'] + >>> list(outer_product(min, animals, animals, key=len)) + [('cat', 'cat', 'cat'), ('cat', 'wolf', 'wolf'), ('cat', 'wolf', 'mouse')] + """ + ys = tuple(ys) + return batched( + starmap(lambda x, y: func(x, y, *args, **kwargs), product(xs, ys)), + n=len(ys), + ) + + +def iter_suppress(iterable, *exceptions): + """Yield each of the items from *iterable*. If the iteration raises one of + the specified *exceptions*, that exception will be suppressed and iteration + will stop. + + >>> from itertools import chain + >>> def breaks_at_five(x): + ... while True: + ... if x >= 5: + ... raise RuntimeError + ... yield x + ... x += 1 + >>> it_1 = iter_suppress(breaks_at_five(1), RuntimeError) + >>> it_2 = iter_suppress(breaks_at_five(2), RuntimeError) + >>> list(chain(it_1, it_2)) + [1, 2, 3, 4, 2, 3, 4] + """ + try: + yield from iterable + except exceptions: + return + + +def filter_map(func, iterable): + """Apply *func* to every element of *iterable*, yielding only those which + are not ``None``. + + >>> elems = ['1', 'a', '2', 'b', '3'] + >>> list(filter_map(lambda s: int(s) if s.isnumeric() else None, elems)) + [1, 2, 3] + """ + for x in iterable: + y = func(x) + if y is not None: + yield y diff --git a/env-llmeval/lib/python3.10/site-packages/more_itertools/more.pyi b/env-llmeval/lib/python3.10/site-packages/more_itertools/more.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9a5fc911a3ed7249b95cbda2433924b6aced2ae7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/more_itertools/more.pyi @@ -0,0 +1,695 @@ +"""Stubs for more_itertools.more""" +from __future__ import annotations + +from types import TracebackType +from typing import ( + Any, + Callable, + Container, + ContextManager, + Generic, + Hashable, + Iterable, + Iterator, + overload, + Reversible, + Sequence, + Sized, + Type, + TypeVar, + type_check_only, +) +from typing_extensions import Protocol + +# Type and type variable definitions +_T = TypeVar('_T') +_T1 = TypeVar('_T1') +_T2 = TypeVar('_T2') +_U = TypeVar('_U') +_V = TypeVar('_V') +_W = TypeVar('_W') +_T_co = TypeVar('_T_co', covariant=True) +_GenFn = TypeVar('_GenFn', bound=Callable[..., Iterator[Any]]) +_Raisable = BaseException | Type[BaseException] + +@type_check_only +class _SizedIterable(Protocol[_T_co], Sized, Iterable[_T_co]): ... + +@type_check_only +class _SizedReversible(Protocol[_T_co], Sized, Reversible[_T_co]): ... + +@type_check_only +class _SupportsSlicing(Protocol[_T_co]): + def __getitem__(self, __k: slice) -> _T_co: ... + +def chunked( + iterable: Iterable[_T], n: int | None, strict: bool = ... +) -> Iterator[list[_T]]: ... +@overload +def first(iterable: Iterable[_T]) -> _T: ... +@overload +def first(iterable: Iterable[_T], default: _U) -> _T | _U: ... +@overload +def last(iterable: Iterable[_T]) -> _T: ... +@overload +def last(iterable: Iterable[_T], default: _U) -> _T | _U: ... +@overload +def nth_or_last(iterable: Iterable[_T], n: int) -> _T: ... +@overload +def nth_or_last(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ... + +class peekable(Generic[_T], Iterator[_T]): + def __init__(self, iterable: Iterable[_T]) -> None: ... + def __iter__(self) -> peekable[_T]: ... + def __bool__(self) -> bool: ... + @overload + def peek(self) -> _T: ... + @overload + def peek(self, default: _U) -> _T | _U: ... + def prepend(self, *items: _T) -> None: ... + def __next__(self) -> _T: ... + @overload + def __getitem__(self, index: int) -> _T: ... + @overload + def __getitem__(self, index: slice) -> list[_T]: ... + +def consumer(func: _GenFn) -> _GenFn: ... +def ilen(iterable: Iterable[_T]) -> int: ... +def iterate(func: Callable[[_T], _T], start: _T) -> Iterator[_T]: ... +def with_iter( + context_manager: ContextManager[Iterable[_T]], +) -> Iterator[_T]: ... +def one( + iterable: Iterable[_T], + too_short: _Raisable | None = ..., + too_long: _Raisable | None = ..., +) -> _T: ... +def raise_(exception: _Raisable, *args: Any) -> None: ... +def strictly_n( + iterable: Iterable[_T], + n: int, + too_short: _GenFn | None = ..., + too_long: _GenFn | None = ..., +) -> list[_T]: ... +def distinct_permutations( + iterable: Iterable[_T], r: int | None = ... +) -> Iterator[tuple[_T, ...]]: ... +def intersperse( + e: _U, iterable: Iterable[_T], n: int = ... +) -> Iterator[_T | _U]: ... +def unique_to_each(*iterables: Iterable[_T]) -> list[list[_T]]: ... +@overload +def windowed( + seq: Iterable[_T], n: int, *, step: int = ... +) -> Iterator[tuple[_T | None, ...]]: ... +@overload +def windowed( + seq: Iterable[_T], n: int, fillvalue: _U, step: int = ... +) -> Iterator[tuple[_T | _U, ...]]: ... +def substrings(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ... +def substrings_indexes( + seq: Sequence[_T], reverse: bool = ... +) -> Iterator[tuple[Sequence[_T], int, int]]: ... + +class bucket(Generic[_T, _U], Container[_U]): + def __init__( + self, + iterable: Iterable[_T], + key: Callable[[_T], _U], + validator: Callable[[_U], object] | None = ..., + ) -> None: ... + def __contains__(self, value: object) -> bool: ... + def __iter__(self) -> Iterator[_U]: ... + def __getitem__(self, value: object) -> Iterator[_T]: ... + +def spy( + iterable: Iterable[_T], n: int = ... +) -> tuple[list[_T], Iterator[_T]]: ... +def interleave(*iterables: Iterable[_T]) -> Iterator[_T]: ... +def interleave_longest(*iterables: Iterable[_T]) -> Iterator[_T]: ... +def interleave_evenly( + iterables: list[Iterable[_T]], lengths: list[int] | None = ... +) -> Iterator[_T]: ... +def collapse( + iterable: Iterable[Any], + base_type: type | None = ..., + levels: int | None = ..., +) -> Iterator[Any]: ... +@overload +def side_effect( + func: Callable[[_T], object], + iterable: Iterable[_T], + chunk_size: None = ..., + before: Callable[[], object] | None = ..., + after: Callable[[], object] | None = ..., +) -> Iterator[_T]: ... +@overload +def side_effect( + func: Callable[[list[_T]], object], + iterable: Iterable[_T], + chunk_size: int, + before: Callable[[], object] | None = ..., + after: Callable[[], object] | None = ..., +) -> Iterator[_T]: ... +def sliced( + seq: _SupportsSlicing[_T], n: int, strict: bool = ... +) -> Iterator[_T]: ... +def split_at( + iterable: Iterable[_T], + pred: Callable[[_T], object], + maxsplit: int = ..., + keep_separator: bool = ..., +) -> Iterator[list[_T]]: ... +def split_before( + iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ... +) -> Iterator[list[_T]]: ... +def split_after( + iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ... +) -> Iterator[list[_T]]: ... +def split_when( + iterable: Iterable[_T], + pred: Callable[[_T, _T], object], + maxsplit: int = ..., +) -> Iterator[list[_T]]: ... +def split_into( + iterable: Iterable[_T], sizes: Iterable[int | None] +) -> Iterator[list[_T]]: ... +@overload +def padded( + iterable: Iterable[_T], + *, + n: int | None = ..., + next_multiple: bool = ..., +) -> Iterator[_T | None]: ... +@overload +def padded( + iterable: Iterable[_T], + fillvalue: _U, + n: int | None = ..., + next_multiple: bool = ..., +) -> Iterator[_T | _U]: ... +@overload +def repeat_last(iterable: Iterable[_T]) -> Iterator[_T]: ... +@overload +def repeat_last(iterable: Iterable[_T], default: _U) -> Iterator[_T | _U]: ... +def distribute(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ... +@overload +def stagger( + iterable: Iterable[_T], + offsets: _SizedIterable[int] = ..., + longest: bool = ..., +) -> Iterator[tuple[_T | None, ...]]: ... +@overload +def stagger( + iterable: Iterable[_T], + offsets: _SizedIterable[int] = ..., + longest: bool = ..., + fillvalue: _U = ..., +) -> Iterator[tuple[_T | _U, ...]]: ... + +class UnequalIterablesError(ValueError): + def __init__(self, details: tuple[int, int, int] | None = ...) -> None: ... + +@overload +def zip_equal(__iter1: Iterable[_T1]) -> Iterator[tuple[_T1]]: ... +@overload +def zip_equal( + __iter1: Iterable[_T1], __iter2: Iterable[_T2] +) -> Iterator[tuple[_T1, _T2]]: ... +@overload +def zip_equal( + __iter1: Iterable[_T], + __iter2: Iterable[_T], + __iter3: Iterable[_T], + *iterables: Iterable[_T], +) -> Iterator[tuple[_T, ...]]: ... +@overload +def zip_offset( + __iter1: Iterable[_T1], + *, + offsets: _SizedIterable[int], + longest: bool = ..., + fillvalue: None = None, +) -> Iterator[tuple[_T1 | None]]: ... +@overload +def zip_offset( + __iter1: Iterable[_T1], + __iter2: Iterable[_T2], + *, + offsets: _SizedIterable[int], + longest: bool = ..., + fillvalue: None = None, +) -> Iterator[tuple[_T1 | None, _T2 | None]]: ... +@overload +def zip_offset( + __iter1: Iterable[_T], + __iter2: Iterable[_T], + __iter3: Iterable[_T], + *iterables: Iterable[_T], + offsets: _SizedIterable[int], + longest: bool = ..., + fillvalue: None = None, +) -> Iterator[tuple[_T | None, ...]]: ... +@overload +def zip_offset( + __iter1: Iterable[_T1], + *, + offsets: _SizedIterable[int], + longest: bool = ..., + fillvalue: _U, +) -> Iterator[tuple[_T1 | _U]]: ... +@overload +def zip_offset( + __iter1: Iterable[_T1], + __iter2: Iterable[_T2], + *, + offsets: _SizedIterable[int], + longest: bool = ..., + fillvalue: _U, +) -> Iterator[tuple[_T1 | _U, _T2 | _U]]: ... +@overload +def zip_offset( + __iter1: Iterable[_T], + __iter2: Iterable[_T], + __iter3: Iterable[_T], + *iterables: Iterable[_T], + offsets: _SizedIterable[int], + longest: bool = ..., + fillvalue: _U, +) -> Iterator[tuple[_T | _U, ...]]: ... +def sort_together( + iterables: Iterable[Iterable[_T]], + key_list: Iterable[int] = ..., + key: Callable[..., Any] | None = ..., + reverse: bool = ..., +) -> list[tuple[_T, ...]]: ... +def unzip(iterable: Iterable[Sequence[_T]]) -> tuple[Iterator[_T], ...]: ... +def divide(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ... +def always_iterable( + obj: object, + base_type: type | tuple[type | tuple[Any, ...], ...] | None = ..., +) -> Iterator[Any]: ... +def adjacent( + predicate: Callable[[_T], bool], + iterable: Iterable[_T], + distance: int = ..., +) -> Iterator[tuple[bool, _T]]: ... +@overload +def groupby_transform( + iterable: Iterable[_T], + keyfunc: None = None, + valuefunc: None = None, + reducefunc: None = None, +) -> Iterator[tuple[_T, Iterator[_T]]]: ... +@overload +def groupby_transform( + iterable: Iterable[_T], + keyfunc: Callable[[_T], _U], + valuefunc: None, + reducefunc: None, +) -> Iterator[tuple[_U, Iterator[_T]]]: ... +@overload +def groupby_transform( + iterable: Iterable[_T], + keyfunc: None, + valuefunc: Callable[[_T], _V], + reducefunc: None, +) -> Iterable[tuple[_T, Iterable[_V]]]: ... +@overload +def groupby_transform( + iterable: Iterable[_T], + keyfunc: Callable[[_T], _U], + valuefunc: Callable[[_T], _V], + reducefunc: None, +) -> Iterable[tuple[_U, Iterator[_V]]]: ... +@overload +def groupby_transform( + iterable: Iterable[_T], + keyfunc: None, + valuefunc: None, + reducefunc: Callable[[Iterator[_T]], _W], +) -> Iterable[tuple[_T, _W]]: ... +@overload +def groupby_transform( + iterable: Iterable[_T], + keyfunc: Callable[[_T], _U], + valuefunc: None, + reducefunc: Callable[[Iterator[_T]], _W], +) -> Iterable[tuple[_U, _W]]: ... +@overload +def groupby_transform( + iterable: Iterable[_T], + keyfunc: None, + valuefunc: Callable[[_T], _V], + reducefunc: Callable[[Iterable[_V]], _W], +) -> Iterable[tuple[_T, _W]]: ... +@overload +def groupby_transform( + iterable: Iterable[_T], + keyfunc: Callable[[_T], _U], + valuefunc: Callable[[_T], _V], + reducefunc: Callable[[Iterable[_V]], _W], +) -> Iterable[tuple[_U, _W]]: ... + +class numeric_range(Generic[_T, _U], Sequence[_T], Hashable, Reversible[_T]): + @overload + def __init__(self, __stop: _T) -> None: ... + @overload + def __init__(self, __start: _T, __stop: _T) -> None: ... + @overload + def __init__(self, __start: _T, __stop: _T, __step: _U) -> None: ... + def __bool__(self) -> bool: ... + def __contains__(self, elem: object) -> bool: ... + def __eq__(self, other: object) -> bool: ... + @overload + def __getitem__(self, key: int) -> _T: ... + @overload + def __getitem__(self, key: slice) -> numeric_range[_T, _U]: ... + def __hash__(self) -> int: ... + def __iter__(self) -> Iterator[_T]: ... + def __len__(self) -> int: ... + def __reduce__( + self, + ) -> tuple[Type[numeric_range[_T, _U]], tuple[_T, _T, _U]]: ... + def __repr__(self) -> str: ... + def __reversed__(self) -> Iterator[_T]: ... + def count(self, value: _T) -> int: ... + def index(self, value: _T) -> int: ... # type: ignore + +def count_cycle( + iterable: Iterable[_T], n: int | None = ... +) -> Iterable[tuple[int, _T]]: ... +def mark_ends( + iterable: Iterable[_T], +) -> Iterable[tuple[bool, bool, _T]]: ... +def locate( + iterable: Iterable[_T], + pred: Callable[..., Any] = ..., + window_size: int | None = ..., +) -> Iterator[int]: ... +def lstrip( + iterable: Iterable[_T], pred: Callable[[_T], object] +) -> Iterator[_T]: ... +def rstrip( + iterable: Iterable[_T], pred: Callable[[_T], object] +) -> Iterator[_T]: ... +def strip( + iterable: Iterable[_T], pred: Callable[[_T], object] +) -> Iterator[_T]: ... + +class islice_extended(Generic[_T], Iterator[_T]): + def __init__(self, iterable: Iterable[_T], *args: int | None) -> None: ... + def __iter__(self) -> islice_extended[_T]: ... + def __next__(self) -> _T: ... + def __getitem__(self, index: slice) -> islice_extended[_T]: ... + +def always_reversible(iterable: Iterable[_T]) -> Iterator[_T]: ... +def consecutive_groups( + iterable: Iterable[_T], ordering: Callable[[_T], int] = ... +) -> Iterator[Iterator[_T]]: ... +@overload +def difference( + iterable: Iterable[_T], + func: Callable[[_T, _T], _U] = ..., + *, + initial: None = ..., +) -> Iterator[_T | _U]: ... +@overload +def difference( + iterable: Iterable[_T], func: Callable[[_T, _T], _U] = ..., *, initial: _U +) -> Iterator[_U]: ... + +class SequenceView(Generic[_T], Sequence[_T]): + def __init__(self, target: Sequence[_T]) -> None: ... + @overload + def __getitem__(self, index: int) -> _T: ... + @overload + def __getitem__(self, index: slice) -> Sequence[_T]: ... + def __len__(self) -> int: ... + +class seekable(Generic[_T], Iterator[_T]): + def __init__( + self, iterable: Iterable[_T], maxlen: int | None = ... + ) -> None: ... + def __iter__(self) -> seekable[_T]: ... + def __next__(self) -> _T: ... + def __bool__(self) -> bool: ... + @overload + def peek(self) -> _T: ... + @overload + def peek(self, default: _U) -> _T | _U: ... + def elements(self) -> SequenceView[_T]: ... + def seek(self, index: int) -> None: ... + def relative_seek(self, count: int) -> None: ... + +class run_length: + @staticmethod + def encode(iterable: Iterable[_T]) -> Iterator[tuple[_T, int]]: ... + @staticmethod + def decode(iterable: Iterable[tuple[_T, int]]) -> Iterator[_T]: ... + +def exactly_n( + iterable: Iterable[_T], n: int, predicate: Callable[[_T], object] = ... +) -> bool: ... +def circular_shifts(iterable: Iterable[_T]) -> list[tuple[_T, ...]]: ... +def make_decorator( + wrapping_func: Callable[..., _U], result_index: int = ... +) -> Callable[..., Callable[[Callable[..., Any]], Callable[..., _U]]]: ... +@overload +def map_reduce( + iterable: Iterable[_T], + keyfunc: Callable[[_T], _U], + valuefunc: None = ..., + reducefunc: None = ..., +) -> dict[_U, list[_T]]: ... +@overload +def map_reduce( + iterable: Iterable[_T], + keyfunc: Callable[[_T], _U], + valuefunc: Callable[[_T], _V], + reducefunc: None = ..., +) -> dict[_U, list[_V]]: ... +@overload +def map_reduce( + iterable: Iterable[_T], + keyfunc: Callable[[_T], _U], + valuefunc: None = ..., + reducefunc: Callable[[list[_T]], _W] = ..., +) -> dict[_U, _W]: ... +@overload +def map_reduce( + iterable: Iterable[_T], + keyfunc: Callable[[_T], _U], + valuefunc: Callable[[_T], _V], + reducefunc: Callable[[list[_V]], _W], +) -> dict[_U, _W]: ... +def rlocate( + iterable: Iterable[_T], + pred: Callable[..., object] = ..., + window_size: int | None = ..., +) -> Iterator[int]: ... +def replace( + iterable: Iterable[_T], + pred: Callable[..., object], + substitutes: Iterable[_U], + count: int | None = ..., + window_size: int = ..., +) -> Iterator[_T | _U]: ... +def partitions(iterable: Iterable[_T]) -> Iterator[list[list[_T]]]: ... +def set_partitions( + iterable: Iterable[_T], k: int | None = ... +) -> Iterator[list[list[_T]]]: ... + +class time_limited(Generic[_T], Iterator[_T]): + def __init__( + self, limit_seconds: float, iterable: Iterable[_T] + ) -> None: ... + def __iter__(self) -> islice_extended[_T]: ... + def __next__(self) -> _T: ... + +@overload +def only( + iterable: Iterable[_T], *, too_long: _Raisable | None = ... +) -> _T | None: ... +@overload +def only( + iterable: Iterable[_T], default: _U, too_long: _Raisable | None = ... +) -> _T | _U: ... +def ichunked(iterable: Iterable[_T], n: int) -> Iterator[Iterator[_T]]: ... +def distinct_combinations( + iterable: Iterable[_T], r: int +) -> Iterator[tuple[_T, ...]]: ... +def filter_except( + validator: Callable[[Any], object], + iterable: Iterable[_T], + *exceptions: Type[BaseException], +) -> Iterator[_T]: ... +def map_except( + function: Callable[[Any], _U], + iterable: Iterable[_T], + *exceptions: Type[BaseException], +) -> Iterator[_U]: ... +def map_if( + iterable: Iterable[Any], + pred: Callable[[Any], bool], + func: Callable[[Any], Any], + func_else: Callable[[Any], Any] | None = ..., +) -> Iterator[Any]: ... +def sample( + iterable: Iterable[_T], + k: int, + weights: Iterable[float] | None = ..., +) -> list[_T]: ... +def is_sorted( + iterable: Iterable[_T], + key: Callable[[_T], _U] | None = ..., + reverse: bool = False, + strict: bool = False, +) -> bool: ... + +class AbortThread(BaseException): + pass + +class callback_iter(Generic[_T], Iterator[_T]): + def __init__( + self, + func: Callable[..., Any], + callback_kwd: str = ..., + wait_seconds: float = ..., + ) -> None: ... + def __enter__(self) -> callback_iter[_T]: ... + def __exit__( + self, + exc_type: Type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> bool | None: ... + def __iter__(self) -> callback_iter[_T]: ... + def __next__(self) -> _T: ... + def _reader(self) -> Iterator[_T]: ... + @property + def done(self) -> bool: ... + @property + def result(self) -> Any: ... + +def windowed_complete( + iterable: Iterable[_T], n: int +) -> Iterator[tuple[_T, ...]]: ... +def all_unique( + iterable: Iterable[_T], key: Callable[[_T], _U] | None = ... +) -> bool: ... +def nth_product(index: int, *args: Iterable[_T]) -> tuple[_T, ...]: ... +def nth_combination_with_replacement( + iterable: Iterable[_T], r: int, index: int +) -> tuple[_T, ...]: ... +def nth_permutation( + iterable: Iterable[_T], r: int, index: int +) -> tuple[_T, ...]: ... +def value_chain(*args: _T | Iterable[_T]) -> Iterable[_T]: ... +def product_index(element: Iterable[_T], *args: Iterable[_T]) -> int: ... +def combination_index( + element: Iterable[_T], iterable: Iterable[_T] +) -> int: ... +def combination_with_replacement_index( + element: Iterable[_T], iterable: Iterable[_T] +) -> int: ... +def permutation_index( + element: Iterable[_T], iterable: Iterable[_T] +) -> int: ... +def repeat_each(iterable: Iterable[_T], n: int = ...) -> Iterator[_T]: ... + +class countable(Generic[_T], Iterator[_T]): + def __init__(self, iterable: Iterable[_T]) -> None: ... + def __iter__(self) -> countable[_T]: ... + def __next__(self) -> _T: ... + +def chunked_even(iterable: Iterable[_T], n: int) -> Iterator[list[_T]]: ... +def zip_broadcast( + *objects: _T | Iterable[_T], + scalar_types: type | tuple[type | tuple[Any, ...], ...] | None = ..., + strict: bool = ..., +) -> Iterable[tuple[_T, ...]]: ... +def unique_in_window( + iterable: Iterable[_T], n: int, key: Callable[[_T], _U] | None = ... +) -> Iterator[_T]: ... +def duplicates_everseen( + iterable: Iterable[_T], key: Callable[[_T], _U] | None = ... +) -> Iterator[_T]: ... +def duplicates_justseen( + iterable: Iterable[_T], key: Callable[[_T], _U] | None = ... +) -> Iterator[_T]: ... +def classify_unique( + iterable: Iterable[_T], key: Callable[[_T], _U] | None = ... +) -> Iterator[tuple[_T, bool, bool]]: ... + +class _SupportsLessThan(Protocol): + def __lt__(self, __other: Any) -> bool: ... + +_SupportsLessThanT = TypeVar("_SupportsLessThanT", bound=_SupportsLessThan) + +@overload +def minmax( + iterable_or_value: Iterable[_SupportsLessThanT], *, key: None = None +) -> tuple[_SupportsLessThanT, _SupportsLessThanT]: ... +@overload +def minmax( + iterable_or_value: Iterable[_T], *, key: Callable[[_T], _SupportsLessThan] +) -> tuple[_T, _T]: ... +@overload +def minmax( + iterable_or_value: Iterable[_SupportsLessThanT], + *, + key: None = None, + default: _U, +) -> _U | tuple[_SupportsLessThanT, _SupportsLessThanT]: ... +@overload +def minmax( + iterable_or_value: Iterable[_T], + *, + key: Callable[[_T], _SupportsLessThan], + default: _U, +) -> _U | tuple[_T, _T]: ... +@overload +def minmax( + iterable_or_value: _SupportsLessThanT, + __other: _SupportsLessThanT, + *others: _SupportsLessThanT, +) -> tuple[_SupportsLessThanT, _SupportsLessThanT]: ... +@overload +def minmax( + iterable_or_value: _T, + __other: _T, + *others: _T, + key: Callable[[_T], _SupportsLessThan], +) -> tuple[_T, _T]: ... +def longest_common_prefix( + iterables: Iterable[Iterable[_T]], +) -> Iterator[_T]: ... +def iequals(*iterables: Iterable[Any]) -> bool: ... +def constrained_batches( + iterable: Iterable[_T], + max_size: int, + max_count: int | None = ..., + get_len: Callable[[_T], object] = ..., + strict: bool = ..., +) -> Iterator[tuple[_T]]: ... +def gray_product(*iterables: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ... +def partial_product(*iterables: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ... +def takewhile_inclusive( + predicate: Callable[[_T], bool], iterable: Iterable[_T] +) -> Iterator[_T]: ... +def outer_product( + func: Callable[[_T, _U], _V], + xs: Iterable[_T], + ys: Iterable[_U], + *args: Any, + **kwargs: Any, +) -> Iterator[tuple[_V, ...]]: ... +def iter_suppress( + iterable: Iterable[_T], + *exceptions: Type[BaseException], +) -> Iterator[_T]: ... +def filter_map( + func: Callable[[_T], _V | None], + iterable: Iterable[_T], +) -> Iterator[_V]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/more_itertools/py.typed b/env-llmeval/lib/python3.10/site-packages/more_itertools/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/more_itertools/recipes.py b/env-llmeval/lib/python3.10/site-packages/more_itertools/recipes.py new file mode 100644 index 0000000000000000000000000000000000000000..145e3cb5bd6bd5b916e3544e4b042a7ed203621a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/more_itertools/recipes.py @@ -0,0 +1,1012 @@ +"""Imported from the recipes section of the itertools documentation. + +All functions taken from the recipes section of the itertools library docs +[1]_. +Some backward-compatible usability improvements have been made. + +.. [1] http://docs.python.org/library/itertools.html#recipes + +""" +import math +import operator + +from collections import deque +from collections.abc import Sized +from functools import partial, reduce +from itertools import ( + chain, + combinations, + compress, + count, + cycle, + groupby, + islice, + product, + repeat, + starmap, + tee, + zip_longest, +) +from random import randrange, sample, choice +from sys import hexversion + +__all__ = [ + 'all_equal', + 'batched', + 'before_and_after', + 'consume', + 'convolve', + 'dotproduct', + 'first_true', + 'factor', + 'flatten', + 'grouper', + 'iter_except', + 'iter_index', + 'matmul', + 'ncycles', + 'nth', + 'nth_combination', + 'padnone', + 'pad_none', + 'pairwise', + 'partition', + 'polynomial_eval', + 'polynomial_from_roots', + 'polynomial_derivative', + 'powerset', + 'prepend', + 'quantify', + 'reshape', + 'random_combination_with_replacement', + 'random_combination', + 'random_permutation', + 'random_product', + 'repeatfunc', + 'roundrobin', + 'sieve', + 'sliding_window', + 'subslices', + 'sum_of_squares', + 'tabulate', + 'tail', + 'take', + 'totient', + 'transpose', + 'triplewise', + 'unique_everseen', + 'unique_justseen', +] + +_marker = object() + + +# zip with strict is available for Python 3.10+ +try: + zip(strict=True) +except TypeError: + _zip_strict = zip +else: + _zip_strict = partial(zip, strict=True) + +# math.sumprod is available for Python 3.12+ +_sumprod = getattr(math, 'sumprod', lambda x, y: dotproduct(x, y)) + + +def take(n, iterable): + """Return first *n* items of the iterable as a list. + + >>> take(3, range(10)) + [0, 1, 2] + + If there are fewer than *n* items in the iterable, all of them are + returned. + + >>> take(10, range(3)) + [0, 1, 2] + + """ + return list(islice(iterable, n)) + + +def tabulate(function, start=0): + """Return an iterator over the results of ``func(start)``, + ``func(start + 1)``, ``func(start + 2)``... + + *func* should be a function that accepts one integer argument. + + If *start* is not specified it defaults to 0. It will be incremented each + time the iterator is advanced. + + >>> square = lambda x: x ** 2 + >>> iterator = tabulate(square, -3) + >>> take(4, iterator) + [9, 4, 1, 0] + + """ + return map(function, count(start)) + + +def tail(n, iterable): + """Return an iterator over the last *n* items of *iterable*. + + >>> t = tail(3, 'ABCDEFG') + >>> list(t) + ['E', 'F', 'G'] + + """ + # If the given iterable has a length, then we can use islice to get its + # final elements. Note that if the iterable is not actually Iterable, + # either islice or deque will throw a TypeError. This is why we don't + # check if it is Iterable. + if isinstance(iterable, Sized): + yield from islice(iterable, max(0, len(iterable) - n), None) + else: + yield from iter(deque(iterable, maxlen=n)) + + +def consume(iterator, n=None): + """Advance *iterable* by *n* steps. If *n* is ``None``, consume it + entirely. + + Efficiently exhausts an iterator without returning values. Defaults to + consuming the whole iterator, but an optional second argument may be + provided to limit consumption. + + >>> i = (x for x in range(10)) + >>> next(i) + 0 + >>> consume(i, 3) + >>> next(i) + 4 + >>> consume(i) + >>> next(i) + Traceback (most recent call last): + File "", line 1, in + StopIteration + + If the iterator has fewer items remaining than the provided limit, the + whole iterator will be consumed. + + >>> i = (x for x in range(3)) + >>> consume(i, 5) + >>> next(i) + Traceback (most recent call last): + File "", line 1, in + StopIteration + + """ + # Use functions that consume iterators at C speed. + if n is None: + # feed the entire iterator into a zero-length deque + deque(iterator, maxlen=0) + else: + # advance to the empty slice starting at position n + next(islice(iterator, n, n), None) + + +def nth(iterable, n, default=None): + """Returns the nth item or a default value. + + >>> l = range(10) + >>> nth(l, 3) + 3 + >>> nth(l, 20, "zebra") + 'zebra' + + """ + return next(islice(iterable, n, None), default) + + +def all_equal(iterable): + """ + Returns ``True`` if all the elements are equal to each other. + + >>> all_equal('aaaa') + True + >>> all_equal('aaab') + False + + """ + g = groupby(iterable) + return next(g, True) and not next(g, False) + + +def quantify(iterable, pred=bool): + """Return the how many times the predicate is true. + + >>> quantify([True, False, True]) + 2 + + """ + return sum(map(pred, iterable)) + + +def pad_none(iterable): + """Returns the sequence of elements and then returns ``None`` indefinitely. + + >>> take(5, pad_none(range(3))) + [0, 1, 2, None, None] + + Useful for emulating the behavior of the built-in :func:`map` function. + + See also :func:`padded`. + + """ + return chain(iterable, repeat(None)) + + +padnone = pad_none + + +def ncycles(iterable, n): + """Returns the sequence elements *n* times + + >>> list(ncycles(["a", "b"], 3)) + ['a', 'b', 'a', 'b', 'a', 'b'] + + """ + return chain.from_iterable(repeat(tuple(iterable), n)) + + +def dotproduct(vec1, vec2): + """Returns the dot product of the two iterables. + + >>> dotproduct([10, 10], [20, 20]) + 400 + + """ + return sum(map(operator.mul, vec1, vec2)) + + +def flatten(listOfLists): + """Return an iterator flattening one level of nesting in a list of lists. + + >>> list(flatten([[0, 1], [2, 3]])) + [0, 1, 2, 3] + + See also :func:`collapse`, which can flatten multiple levels of nesting. + + """ + return chain.from_iterable(listOfLists) + + +def repeatfunc(func, times=None, *args): + """Call *func* with *args* repeatedly, returning an iterable over the + results. + + If *times* is specified, the iterable will terminate after that many + repetitions: + + >>> from operator import add + >>> times = 4 + >>> args = 3, 5 + >>> list(repeatfunc(add, times, *args)) + [8, 8, 8, 8] + + If *times* is ``None`` the iterable will not terminate: + + >>> from random import randrange + >>> times = None + >>> args = 1, 11 + >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP + [2, 4, 8, 1, 8, 4] + + """ + if times is None: + return starmap(func, repeat(args)) + return starmap(func, repeat(args, times)) + + +def _pairwise(iterable): + """Returns an iterator of paired items, overlapping, from the original + + >>> take(4, pairwise(count())) + [(0, 1), (1, 2), (2, 3), (3, 4)] + + On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`. + + """ + a, b = tee(iterable) + next(b, None) + return zip(a, b) + + +try: + from itertools import pairwise as itertools_pairwise +except ImportError: + pairwise = _pairwise +else: + + def pairwise(iterable): + return itertools_pairwise(iterable) + + pairwise.__doc__ = _pairwise.__doc__ + + +class UnequalIterablesError(ValueError): + def __init__(self, details=None): + msg = 'Iterables have different lengths' + if details is not None: + msg += (': index 0 has length {}; index {} has length {}').format( + *details + ) + + super().__init__(msg) + + +def _zip_equal_generator(iterables): + for combo in zip_longest(*iterables, fillvalue=_marker): + for val in combo: + if val is _marker: + raise UnequalIterablesError() + yield combo + + +def _zip_equal(*iterables): + # Check whether the iterables are all the same size. + try: + first_size = len(iterables[0]) + for i, it in enumerate(iterables[1:], 1): + size = len(it) + if size != first_size: + raise UnequalIterablesError(details=(first_size, i, size)) + # All sizes are equal, we can use the built-in zip. + return zip(*iterables) + # If any one of the iterables didn't have a length, start reading + # them until one runs out. + except TypeError: + return _zip_equal_generator(iterables) + + +def grouper(iterable, n, incomplete='fill', fillvalue=None): + """Group elements from *iterable* into fixed-length groups of length *n*. + + >>> list(grouper('ABCDEF', 3)) + [('A', 'B', 'C'), ('D', 'E', 'F')] + + The keyword arguments *incomplete* and *fillvalue* control what happens for + iterables whose length is not a multiple of *n*. + + When *incomplete* is `'fill'`, the last group will contain instances of + *fillvalue*. + + >>> list(grouper('ABCDEFG', 3, incomplete='fill', fillvalue='x')) + [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] + + When *incomplete* is `'ignore'`, the last group will not be emitted. + + >>> list(grouper('ABCDEFG', 3, incomplete='ignore', fillvalue='x')) + [('A', 'B', 'C'), ('D', 'E', 'F')] + + When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised. + + >>> it = grouper('ABCDEFG', 3, incomplete='strict') + >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + UnequalIterablesError + + """ + args = [iter(iterable)] * n + if incomplete == 'fill': + return zip_longest(*args, fillvalue=fillvalue) + if incomplete == 'strict': + return _zip_equal(*args) + if incomplete == 'ignore': + return zip(*args) + else: + raise ValueError('Expected fill, strict, or ignore') + + +def roundrobin(*iterables): + """Yields an item from each iterable, alternating between them. + + >>> list(roundrobin('ABC', 'D', 'EF')) + ['A', 'D', 'E', 'B', 'F', 'C'] + + This function produces the same output as :func:`interleave_longest`, but + may perform better for some inputs (in particular when the number of + iterables is small). + + """ + # Recipe credited to George Sakkis + pending = len(iterables) + nexts = cycle(iter(it).__next__ for it in iterables) + while pending: + try: + for next in nexts: + yield next() + except StopIteration: + pending -= 1 + nexts = cycle(islice(nexts, pending)) + + +def partition(pred, iterable): + """ + Returns a 2-tuple of iterables derived from the input iterable. + The first yields the items that have ``pred(item) == False``. + The second yields the items that have ``pred(item) == True``. + + >>> is_odd = lambda x: x % 2 != 0 + >>> iterable = range(10) + >>> even_items, odd_items = partition(is_odd, iterable) + >>> list(even_items), list(odd_items) + ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]) + + If *pred* is None, :func:`bool` is used. + + >>> iterable = [0, 1, False, True, '', ' '] + >>> false_items, true_items = partition(None, iterable) + >>> list(false_items), list(true_items) + ([0, False, ''], [1, True, ' ']) + + """ + if pred is None: + pred = bool + + t1, t2, p = tee(iterable, 3) + p1, p2 = tee(map(pred, p)) + return (compress(t1, map(operator.not_, p1)), compress(t2, p2)) + + +def powerset(iterable): + """Yields all possible subsets of the iterable. + + >>> list(powerset([1, 2, 3])) + [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] + + :func:`powerset` will operate on iterables that aren't :class:`set` + instances, so repeated elements in the input will produce repeated elements + in the output. Use :func:`unique_everseen` on the input to avoid generating + duplicates: + + >>> seq = [1, 1, 0] + >>> list(powerset(seq)) + [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)] + >>> from more_itertools import unique_everseen + >>> list(powerset(unique_everseen(seq))) + [(), (1,), (0,), (1, 0)] + + """ + s = list(iterable) + return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) + + +def unique_everseen(iterable, key=None): + """ + Yield unique elements, preserving order. + + >>> list(unique_everseen('AAAABBBCCDAABBB')) + ['A', 'B', 'C', 'D'] + >>> list(unique_everseen('ABBCcAD', str.lower)) + ['A', 'B', 'C', 'D'] + + Sequences with a mix of hashable and unhashable items can be used. + The function will be slower (i.e., `O(n^2)`) for unhashable items. + + Remember that ``list`` objects are unhashable - you can use the *key* + parameter to transform the list to a tuple (which is hashable) to + avoid a slowdown. + + >>> iterable = ([1, 2], [2, 3], [1, 2]) + >>> list(unique_everseen(iterable)) # Slow + [[1, 2], [2, 3]] + >>> list(unique_everseen(iterable, key=tuple)) # Faster + [[1, 2], [2, 3]] + + Similarly, you may want to convert unhashable ``set`` objects with + ``key=frozenset``. For ``dict`` objects, + ``key=lambda x: frozenset(x.items())`` can be used. + + """ + seenset = set() + seenset_add = seenset.add + seenlist = [] + seenlist_add = seenlist.append + use_key = key is not None + + for element in iterable: + k = key(element) if use_key else element + try: + if k not in seenset: + seenset_add(k) + yield element + except TypeError: + if k not in seenlist: + seenlist_add(k) + yield element + + +def unique_justseen(iterable, key=None): + """Yields elements in order, ignoring serial duplicates + + >>> list(unique_justseen('AAAABBBCCDAABBB')) + ['A', 'B', 'C', 'D', 'A', 'B'] + >>> list(unique_justseen('ABBCcAD', str.lower)) + ['A', 'B', 'C', 'A', 'D'] + + """ + if key is None: + return map(operator.itemgetter(0), groupby(iterable)) + + return map(next, map(operator.itemgetter(1), groupby(iterable, key))) + + +def iter_except(func, exception, first=None): + """Yields results from a function repeatedly until an exception is raised. + + Converts a call-until-exception interface to an iterator interface. + Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel + to end the loop. + + >>> l = [0, 1, 2] + >>> list(iter_except(l.pop, IndexError)) + [2, 1, 0] + + Multiple exceptions can be specified as a stopping condition: + + >>> l = [1, 2, 3, '...', 4, 5, 6] + >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) + [7, 6, 5] + >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) + [4, 3, 2] + >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) + [] + + """ + try: + if first is not None: + yield first() + while 1: + yield func() + except exception: + pass + + +def first_true(iterable, default=None, pred=None): + """ + Returns the first true value in the iterable. + + If no true value is found, returns *default* + + If *pred* is not None, returns the first item for which + ``pred(item) == True`` . + + >>> first_true(range(10)) + 1 + >>> first_true(range(10), pred=lambda x: x > 5) + 6 + >>> first_true(range(10), default='missing', pred=lambda x: x > 9) + 'missing' + + """ + return next(filter(pred, iterable), default) + + +def random_product(*args, repeat=1): + """Draw an item at random from each of the input iterables. + + >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP + ('c', 3, 'Z') + + If *repeat* is provided as a keyword argument, that many items will be + drawn from each iterable. + + >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP + ('a', 2, 'd', 3) + + This equivalent to taking a random selection from + ``itertools.product(*args, **kwarg)``. + + """ + pools = [tuple(pool) for pool in args] * repeat + return tuple(choice(pool) for pool in pools) + + +def random_permutation(iterable, r=None): + """Return a random *r* length permutation of the elements in *iterable*. + + If *r* is not specified or is ``None``, then *r* defaults to the length of + *iterable*. + + >>> random_permutation(range(5)) # doctest:+SKIP + (3, 4, 0, 1, 2) + + This equivalent to taking a random selection from + ``itertools.permutations(iterable, r)``. + + """ + pool = tuple(iterable) + r = len(pool) if r is None else r + return tuple(sample(pool, r)) + + +def random_combination(iterable, r): + """Return a random *r* length subsequence of the elements in *iterable*. + + >>> random_combination(range(5), 3) # doctest:+SKIP + (2, 3, 4) + + This equivalent to taking a random selection from + ``itertools.combinations(iterable, r)``. + + """ + pool = tuple(iterable) + n = len(pool) + indices = sorted(sample(range(n), r)) + return tuple(pool[i] for i in indices) + + +def random_combination_with_replacement(iterable, r): + """Return a random *r* length subsequence of elements in *iterable*, + allowing individual elements to be repeated. + + >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP + (0, 0, 1, 2, 2) + + This equivalent to taking a random selection from + ``itertools.combinations_with_replacement(iterable, r)``. + + """ + pool = tuple(iterable) + n = len(pool) + indices = sorted(randrange(n) for i in range(r)) + return tuple(pool[i] for i in indices) + + +def nth_combination(iterable, r, index): + """Equivalent to ``list(combinations(iterable, r))[index]``. + + The subsequences of *iterable* that are of length *r* can be ordered + lexicographically. :func:`nth_combination` computes the subsequence at + sort position *index* directly, without computing the previous + subsequences. + + >>> nth_combination(range(5), 3, 5) + (0, 3, 4) + + ``ValueError`` will be raised If *r* is negative or greater than the length + of *iterable*. + ``IndexError`` will be raised if the given *index* is invalid. + """ + pool = tuple(iterable) + n = len(pool) + if (r < 0) or (r > n): + raise ValueError + + c = 1 + k = min(r, n - r) + for i in range(1, k + 1): + c = c * (n - k + i) // i + + if index < 0: + index += c + + if (index < 0) or (index >= c): + raise IndexError + + result = [] + while r: + c, n, r = c * r // n, n - 1, r - 1 + while index >= c: + index -= c + c, n = c * (n - r) // n, n - 1 + result.append(pool[-1 - n]) + + return tuple(result) + + +def prepend(value, iterator): + """Yield *value*, followed by the elements in *iterator*. + + >>> value = '0' + >>> iterator = ['1', '2', '3'] + >>> list(prepend(value, iterator)) + ['0', '1', '2', '3'] + + To prepend multiple values, see :func:`itertools.chain` + or :func:`value_chain`. + + """ + return chain([value], iterator) + + +def convolve(signal, kernel): + """Convolve the iterable *signal* with the iterable *kernel*. + + >>> signal = (1, 2, 3, 4, 5) + >>> kernel = [3, 2, 1] + >>> list(convolve(signal, kernel)) + [3, 8, 14, 20, 26, 14, 5] + + Note: the input arguments are not interchangeable, as the *kernel* + is immediately consumed and stored. + + """ + # This implementation intentionally doesn't match the one in the itertools + # documentation. + kernel = tuple(kernel)[::-1] + n = len(kernel) + window = deque([0], maxlen=n) * n + for x in chain(signal, repeat(0, n - 1)): + window.append(x) + yield _sumprod(kernel, window) + + +def before_and_after(predicate, it): + """A variant of :func:`takewhile` that allows complete access to the + remainder of the iterator. + + >>> it = iter('ABCdEfGhI') + >>> all_upper, remainder = before_and_after(str.isupper, it) + >>> ''.join(all_upper) + 'ABC' + >>> ''.join(remainder) # takewhile() would lose the 'd' + 'dEfGhI' + + Note that the first iterator must be fully consumed before the second + iterator can generate valid results. + """ + it = iter(it) + transition = [] + + def true_iterator(): + for elem in it: + if predicate(elem): + yield elem + else: + transition.append(elem) + return + + # Note: this is different from itertools recipes to allow nesting + # before_and_after remainders into before_and_after again. See tests + # for an example. + remainder_iterator = chain(transition, it) + + return true_iterator(), remainder_iterator + + +def triplewise(iterable): + """Return overlapping triplets from *iterable*. + + >>> list(triplewise('ABCDE')) + [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')] + + """ + for (a, _), (b, c) in pairwise(pairwise(iterable)): + yield a, b, c + + +def sliding_window(iterable, n): + """Return a sliding window of width *n* over *iterable*. + + >>> list(sliding_window(range(6), 4)) + [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)] + + If *iterable* has fewer than *n* items, then nothing is yielded: + + >>> list(sliding_window(range(3), 4)) + [] + + For a variant with more features, see :func:`windowed`. + """ + it = iter(iterable) + window = deque(islice(it, n - 1), maxlen=n) + for x in it: + window.append(x) + yield tuple(window) + + +def subslices(iterable): + """Return all contiguous non-empty subslices of *iterable*. + + >>> list(subslices('ABC')) + [['A'], ['A', 'B'], ['A', 'B', 'C'], ['B'], ['B', 'C'], ['C']] + + This is similar to :func:`substrings`, but emits items in a different + order. + """ + seq = list(iterable) + slices = starmap(slice, combinations(range(len(seq) + 1), 2)) + return map(operator.getitem, repeat(seq), slices) + + +def polynomial_from_roots(roots): + """Compute a polynomial's coefficients from its roots. + + >>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3) + >>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60 + [1, -4, -17, 60] + """ + factors = zip(repeat(1), map(operator.neg, roots)) + return list(reduce(convolve, factors, [1])) + + +def iter_index(iterable, value, start=0, stop=None): + """Yield the index of each place in *iterable* that *value* occurs, + beginning with index *start* and ending before index *stop*. + + See :func:`locate` for a more general means of finding the indexes + associated with particular values. + + >>> list(iter_index('AABCADEAF', 'A')) + [0, 1, 4, 7] + >>> list(iter_index('AABCADEAF', 'A', 1)) # start index is inclusive + [1, 4, 7] + >>> list(iter_index('AABCADEAF', 'A', 1, 7)) # stop index is not inclusive + [1, 4] + """ + seq_index = getattr(iterable, 'index', None) + if seq_index is None: + # Slow path for general iterables + it = islice(iterable, start, stop) + for i, element in enumerate(it, start): + if element is value or element == value: + yield i + else: + # Fast path for sequences + stop = len(iterable) if stop is None else stop + i = start - 1 + try: + while True: + yield (i := seq_index(value, i + 1, stop)) + except ValueError: + pass + + +def sieve(n): + """Yield the primes less than n. + + >>> list(sieve(30)) + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + """ + if n > 2: + yield 2 + start = 3 + data = bytearray((0, 1)) * (n // 2) + limit = math.isqrt(n) + 1 + for p in iter_index(data, 1, start, limit): + yield from iter_index(data, 1, start, p * p) + data[p * p : n : p + p] = bytes(len(range(p * p, n, p + p))) + start = p * p + yield from iter_index(data, 1, start) + + +def _batched(iterable, n, *, strict=False): + """Batch data into tuples of length *n*. If the number of items in + *iterable* is not divisible by *n*: + * The last batch will be shorter if *strict* is ``False``. + * :exc:`ValueError` will be raised if *strict* is ``True``. + + >>> list(batched('ABCDEFG', 3)) + [('A', 'B', 'C'), ('D', 'E', 'F'), ('G',)] + + On Python 3.13 and above, this is an alias for :func:`itertools.batched`. + """ + if n < 1: + raise ValueError('n must be at least one') + it = iter(iterable) + while batch := tuple(islice(it, n)): + if strict and len(batch) != n: + raise ValueError('batched(): incomplete batch') + yield batch + + +if hexversion >= 0x30D00A2: + from itertools import batched as itertools_batched + + def batched(iterable, n, *, strict=False): + return itertools_batched(iterable, n, strict=strict) + +else: + batched = _batched + + batched.__doc__ = _batched.__doc__ + + +def transpose(it): + """Swap the rows and columns of the input matrix. + + >>> list(transpose([(1, 2, 3), (11, 22, 33)])) + [(1, 11), (2, 22), (3, 33)] + + The caller should ensure that the dimensions of the input are compatible. + If the input is empty, no output will be produced. + """ + return _zip_strict(*it) + + +def reshape(matrix, cols): + """Reshape the 2-D input *matrix* to have a column count given by *cols*. + + >>> matrix = [(0, 1), (2, 3), (4, 5)] + >>> cols = 3 + >>> list(reshape(matrix, cols)) + [(0, 1, 2), (3, 4, 5)] + """ + return batched(chain.from_iterable(matrix), cols) + + +def matmul(m1, m2): + """Multiply two matrices. + + >>> list(matmul([(7, 5), (3, 5)], [(2, 5), (7, 9)])) + [(49, 80), (41, 60)] + + The caller should ensure that the dimensions of the input matrices are + compatible with each other. + """ + n = len(m2[0]) + return batched(starmap(_sumprod, product(m1, transpose(m2))), n) + + +def factor(n): + """Yield the prime factors of n. + + >>> list(factor(360)) + [2, 2, 2, 3, 3, 5] + """ + for prime in sieve(math.isqrt(n) + 1): + while not n % prime: + yield prime + n //= prime + if n == 1: + return + if n > 1: + yield n + + +def polynomial_eval(coefficients, x): + """Evaluate a polynomial at a specific value. + + Example: evaluating x^3 - 4 * x^2 - 17 * x + 60 at x = 2.5: + + >>> coefficients = [1, -4, -17, 60] + >>> x = 2.5 + >>> polynomial_eval(coefficients, x) + 8.125 + """ + n = len(coefficients) + if n == 0: + return x * 0 # coerce zero to the type of x + powers = map(pow, repeat(x), reversed(range(n))) + return _sumprod(coefficients, powers) + + +def sum_of_squares(it): + """Return the sum of the squares of the input values. + + >>> sum_of_squares([10, 20, 30]) + 1400 + """ + return _sumprod(*tee(it)) + + +def polynomial_derivative(coefficients): + """Compute the first derivative of a polynomial. + + Example: evaluating the derivative of x^3 - 4 * x^2 - 17 * x + 60 + + >>> coefficients = [1, -4, -17, 60] + >>> derivative_coefficients = polynomial_derivative(coefficients) + >>> derivative_coefficients + [3, -8, -17] + """ + n = len(coefficients) + powers = reversed(range(1, n)) + return list(map(operator.mul, coefficients, powers)) + + +def totient(n): + """Return the count of natural numbers up to *n* that are coprime with *n*. + + >>> totient(9) + 6 + >>> totient(12) + 4 + """ + for p in unique_justseen(factor(n)): + n = n // p * (p - 1) + + return n diff --git a/env-llmeval/lib/python3.10/site-packages/more_itertools/recipes.pyi b/env-llmeval/lib/python3.10/site-packages/more_itertools/recipes.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ed4c19db49b6d9bd4905bcf4242f677987fc5a27 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/more_itertools/recipes.pyi @@ -0,0 +1,128 @@ +"""Stubs for more_itertools.recipes""" +from __future__ import annotations + +from typing import ( + Any, + Callable, + Iterable, + Iterator, + overload, + Sequence, + Type, + TypeVar, +) + +# Type and type variable definitions +_T = TypeVar('_T') +_T1 = TypeVar('_T1') +_T2 = TypeVar('_T2') +_U = TypeVar('_U') + +def take(n: int, iterable: Iterable[_T]) -> list[_T]: ... +def tabulate( + function: Callable[[int], _T], start: int = ... +) -> Iterator[_T]: ... +def tail(n: int, iterable: Iterable[_T]) -> Iterator[_T]: ... +def consume(iterator: Iterable[_T], n: int | None = ...) -> None: ... +@overload +def nth(iterable: Iterable[_T], n: int) -> _T | None: ... +@overload +def nth(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ... +def all_equal(iterable: Iterable[_T]) -> bool: ... +def quantify( + iterable: Iterable[_T], pred: Callable[[_T], bool] = ... +) -> int: ... +def pad_none(iterable: Iterable[_T]) -> Iterator[_T | None]: ... +def padnone(iterable: Iterable[_T]) -> Iterator[_T | None]: ... +def ncycles(iterable: Iterable[_T], n: int) -> Iterator[_T]: ... +def dotproduct(vec1: Iterable[_T1], vec2: Iterable[_T2]) -> Any: ... +def flatten(listOfLists: Iterable[Iterable[_T]]) -> Iterator[_T]: ... +def repeatfunc( + func: Callable[..., _U], times: int | None = ..., *args: Any +) -> Iterator[_U]: ... +def pairwise(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T]]: ... +def grouper( + iterable: Iterable[_T], + n: int, + incomplete: str = ..., + fillvalue: _U = ..., +) -> Iterator[tuple[_T | _U, ...]]: ... +def roundrobin(*iterables: Iterable[_T]) -> Iterator[_T]: ... +def partition( + pred: Callable[[_T], object] | None, iterable: Iterable[_T] +) -> tuple[Iterator[_T], Iterator[_T]]: ... +def powerset(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ... +def unique_everseen( + iterable: Iterable[_T], key: Callable[[_T], _U] | None = ... +) -> Iterator[_T]: ... +def unique_justseen( + iterable: Iterable[_T], key: Callable[[_T], object] | None = ... +) -> Iterator[_T]: ... +@overload +def iter_except( + func: Callable[[], _T], + exception: Type[BaseException] | tuple[Type[BaseException], ...], + first: None = ..., +) -> Iterator[_T]: ... +@overload +def iter_except( + func: Callable[[], _T], + exception: Type[BaseException] | tuple[Type[BaseException], ...], + first: Callable[[], _U], +) -> Iterator[_T | _U]: ... +@overload +def first_true( + iterable: Iterable[_T], *, pred: Callable[[_T], object] | None = ... +) -> _T | None: ... +@overload +def first_true( + iterable: Iterable[_T], + default: _U, + pred: Callable[[_T], object] | None = ..., +) -> _T | _U: ... +def random_product( + *args: Iterable[_T], repeat: int = ... +) -> tuple[_T, ...]: ... +def random_permutation( + iterable: Iterable[_T], r: int | None = ... +) -> tuple[_T, ...]: ... +def random_combination(iterable: Iterable[_T], r: int) -> tuple[_T, ...]: ... +def random_combination_with_replacement( + iterable: Iterable[_T], r: int +) -> tuple[_T, ...]: ... +def nth_combination( + iterable: Iterable[_T], r: int, index: int +) -> tuple[_T, ...]: ... +def prepend(value: _T, iterator: Iterable[_U]) -> Iterator[_T | _U]: ... +def convolve(signal: Iterable[_T], kernel: Iterable[_T]) -> Iterator[_T]: ... +def before_and_after( + predicate: Callable[[_T], bool], it: Iterable[_T] +) -> tuple[Iterator[_T], Iterator[_T]]: ... +def triplewise(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T, _T]]: ... +def sliding_window( + iterable: Iterable[_T], n: int +) -> Iterator[tuple[_T, ...]]: ... +def subslices(iterable: Iterable[_T]) -> Iterator[list[_T]]: ... +def polynomial_from_roots(roots: Sequence[_T]) -> list[_T]: ... +def iter_index( + iterable: Iterable[_T], + value: Any, + start: int | None = ..., + stop: int | None = ..., +) -> Iterator[int]: ... +def sieve(n: int) -> Iterator[int]: ... +def batched( + iterable: Iterable[_T], n: int, *, strict: bool = False +) -> Iterator[tuple[_T]]: ... +def transpose( + it: Iterable[Iterable[_T]], +) -> Iterator[tuple[_T, ...]]: ... +def reshape( + matrix: Iterable[Iterable[_T]], cols: int +) -> Iterator[tuple[_T, ...]]: ... +def matmul(m1: Sequence[_T], m2: Sequence[_T]) -> Iterator[tuple[_T]]: ... +def factor(n: int) -> Iterator[int]: ... +def polynomial_eval(coefficients: Sequence[_T], x: _U) -> _U: ... +def sum_of_squares(it: Iterable[_T]) -> _T: ... +def polynomial_derivative(coefficients: Sequence[_T]) -> list[_T]: ... +def totient(n: int) -> int: ... diff --git a/env-llmeval/lib/python3.10/site-packages/multidict/__init__.py b/env-llmeval/lib/python3.10/site-packages/multidict/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..23142eeafdc7d8d4f9797dd88b25f1e8d357f4c4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multidict/__init__.py @@ -0,0 +1,48 @@ +"""Multidict implementation. + +HTTP Headers and URL query string require specific data structure: +multidict. It behaves mostly like a dict but it can have +several values for the same key. +""" + +from ._abc import MultiMapping, MutableMultiMapping +from ._compat import USE_EXTENSIONS + +__all__ = ( + "MultiMapping", + "MutableMultiMapping", + "MultiDictProxy", + "CIMultiDictProxy", + "MultiDict", + "CIMultiDict", + "upstr", + "istr", + "getversion", +) + +__version__ = "6.0.5" + + +try: + if not USE_EXTENSIONS: + raise ImportError + from ._multidict import ( + CIMultiDict, + CIMultiDictProxy, + MultiDict, + MultiDictProxy, + getversion, + istr, + ) +except ImportError: # pragma: no cover + from ._multidict_py import ( + CIMultiDict, + CIMultiDictProxy, + MultiDict, + MultiDictProxy, + getversion, + istr, + ) + + +upstr = istr diff --git a/env-llmeval/lib/python3.10/site-packages/multidict/__init__.pyi b/env-llmeval/lib/python3.10/site-packages/multidict/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0940340f81e602cf74e74e9384f15d7d12d1389b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multidict/__init__.pyi @@ -0,0 +1,152 @@ +import abc +from typing import ( + Generic, + Iterable, + Iterator, + Mapping, + MutableMapping, + TypeVar, + overload, +) + +class istr(str): ... + +upstr = istr + +_S = str | istr + +_T = TypeVar("_T") + +_T_co = TypeVar("_T_co", covariant=True) + +_D = TypeVar("_D") + +class MultiMapping(Mapping[_S, _T_co]): + @overload + @abc.abstractmethod + def getall(self, key: _S) -> list[_T_co]: ... + @overload + @abc.abstractmethod + def getall(self, key: _S, default: _D) -> list[_T_co] | _D: ... + @overload + @abc.abstractmethod + def getone(self, key: _S) -> _T_co: ... + @overload + @abc.abstractmethod + def getone(self, key: _S, default: _D) -> _T_co | _D: ... + +_Arg = ( + Mapping[str, _T] + | Mapping[istr, _T] + | dict[str, _T] + | dict[istr, _T] + | MultiMapping[_T] + | Iterable[tuple[str, _T]] + | Iterable[tuple[istr, _T]] +) + +class MutableMultiMapping(MultiMapping[_T], MutableMapping[_S, _T], Generic[_T]): + @abc.abstractmethod + def add(self, key: _S, value: _T) -> None: ... + @abc.abstractmethod + def extend(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ... + @overload + @abc.abstractmethod + def popone(self, key: _S) -> _T: ... + @overload + @abc.abstractmethod + def popone(self, key: _S, default: _D) -> _T | _D: ... + @overload + @abc.abstractmethod + def popall(self, key: _S) -> list[_T]: ... + @overload + @abc.abstractmethod + def popall(self, key: _S, default: _D) -> list[_T] | _D: ... + +class MultiDict(MutableMultiMapping[_T], Generic[_T]): + def __init__(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ... + def copy(self) -> MultiDict[_T]: ... + def __getitem__(self, k: _S) -> _T: ... + def __setitem__(self, k: _S, v: _T) -> None: ... + def __delitem__(self, v: _S) -> None: ... + def __iter__(self) -> Iterator[_S]: ... + def __len__(self) -> int: ... + @overload + def getall(self, key: _S) -> list[_T]: ... + @overload + def getall(self, key: _S, default: _D) -> list[_T] | _D: ... + @overload + def getone(self, key: _S) -> _T: ... + @overload + def getone(self, key: _S, default: _D) -> _T | _D: ... + def add(self, key: _S, value: _T) -> None: ... + def extend(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ... + @overload + def popone(self, key: _S) -> _T: ... + @overload + def popone(self, key: _S, default: _D) -> _T | _D: ... + @overload + def popall(self, key: _S) -> list[_T]: ... + @overload + def popall(self, key: _S, default: _D) -> list[_T] | _D: ... + +class CIMultiDict(MutableMultiMapping[_T], Generic[_T]): + def __init__(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ... + def copy(self) -> CIMultiDict[_T]: ... + def __getitem__(self, k: _S) -> _T: ... + def __setitem__(self, k: _S, v: _T) -> None: ... + def __delitem__(self, v: _S) -> None: ... + def __iter__(self) -> Iterator[_S]: ... + def __len__(self) -> int: ... + @overload + def getall(self, key: _S) -> list[_T]: ... + @overload + def getall(self, key: _S, default: _D) -> list[_T] | _D: ... + @overload + def getone(self, key: _S) -> _T: ... + @overload + def getone(self, key: _S, default: _D) -> _T | _D: ... + def add(self, key: _S, value: _T) -> None: ... + def extend(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ... + @overload + def popone(self, key: _S) -> _T: ... + @overload + def popone(self, key: _S, default: _D) -> _T | _D: ... + @overload + def popall(self, key: _S) -> list[_T]: ... + @overload + def popall(self, key: _S, default: _D) -> list[_T] | _D: ... + +class MultiDictProxy(MultiMapping[_T], Generic[_T]): + def __init__(self, arg: MultiMapping[_T] | MutableMultiMapping[_T]) -> None: ... + def copy(self) -> MultiDict[_T]: ... + def __getitem__(self, k: _S) -> _T: ... + def __iter__(self) -> Iterator[_S]: ... + def __len__(self) -> int: ... + @overload + def getall(self, key: _S) -> list[_T]: ... + @overload + def getall(self, key: _S, default: _D) -> list[_T] | _D: ... + @overload + def getone(self, key: _S) -> _T: ... + @overload + def getone(self, key: _S, default: _D) -> _T | _D: ... + +class CIMultiDictProxy(MultiMapping[_T], Generic[_T]): + def __init__(self, arg: MultiMapping[_T] | MutableMultiMapping[_T]) -> None: ... + def __getitem__(self, k: _S) -> _T: ... + def __iter__(self) -> Iterator[_S]: ... + def __len__(self) -> int: ... + @overload + def getall(self, key: _S) -> list[_T]: ... + @overload + def getall(self, key: _S, default: _D) -> list[_T] | _D: ... + @overload + def getone(self, key: _S) -> _T: ... + @overload + def getone(self, key: _S, default: _D) -> _T | _D: ... + def copy(self) -> CIMultiDict[_T]: ... + +def getversion( + md: MultiDict[_T] | CIMultiDict[_T] | MultiDictProxy[_T] | CIMultiDictProxy[_T], +) -> int: ... diff --git a/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9756fb30203d99f9c6f5a137a9995ccf79db46c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/_abc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/_abc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15ae2fb50b3007c812e98c8950fb1ff750ffd296 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/_abc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/_compat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91b96cec652181e45233565dd292fa8e5c7c0a5e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/_compat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/_multidict_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/_multidict_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b0f94d4a2e0e19f61f5af761bac4510bf48550d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/_multidict_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/_multidict_py.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/_multidict_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b64526dc5ad69dc126c8d92ffa023919304afce Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multidict/__pycache__/_multidict_py.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/multidict/_abc.py b/env-llmeval/lib/python3.10/site-packages/multidict/_abc.py new file mode 100644 index 0000000000000000000000000000000000000000..0603cdd2447eb8a55584802a7b7c475b12bfc9c5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multidict/_abc.py @@ -0,0 +1,48 @@ +import abc +import sys +import types +from collections.abc import Mapping, MutableMapping + + +class _TypingMeta(abc.ABCMeta): + # A fake metaclass to satisfy typing deps in runtime + # basically MultiMapping[str] and other generic-like type instantiations + # are emulated. + # Note: real type hints are provided by __init__.pyi stub file + if sys.version_info >= (3, 9): + + def __getitem__(self, key): + return types.GenericAlias(self, key) + + else: + + def __getitem__(self, key): + return self + + +class MultiMapping(Mapping, metaclass=_TypingMeta): + @abc.abstractmethod + def getall(self, key, default=None): + raise KeyError + + @abc.abstractmethod + def getone(self, key, default=None): + raise KeyError + + +class MutableMultiMapping(MultiMapping, MutableMapping): + @abc.abstractmethod + def add(self, key, value): + raise NotImplementedError + + @abc.abstractmethod + def extend(self, *args, **kwargs): + raise NotImplementedError + + @abc.abstractmethod + def popone(self, key, default=None): + raise KeyError + + @abc.abstractmethod + def popall(self, key, default=None): + raise KeyError diff --git a/env-llmeval/lib/python3.10/site-packages/multidict/_compat.py b/env-llmeval/lib/python3.10/site-packages/multidict/_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..d1ff392b25904a78c84292af3c12a46ad4767943 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multidict/_compat.py @@ -0,0 +1,14 @@ +import os +import platform + +NO_EXTENSIONS = bool(os.environ.get("MULTIDICT_NO_EXTENSIONS")) + +PYPY = platform.python_implementation() == "PyPy" + +USE_EXTENSIONS = not NO_EXTENSIONS and not PYPY + +if USE_EXTENSIONS: + try: + from . import _multidict # noqa + except ImportError: + USE_EXTENSIONS = False diff --git a/env-llmeval/lib/python3.10/site-packages/multidict/_multidict.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/multidict/_multidict.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..040bb2051eefb95d8a6035dcffecf60d981a1662 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/multidict/_multidict.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/multidict/_multidict_base.py b/env-llmeval/lib/python3.10/site-packages/multidict/_multidict_base.py new file mode 100644 index 0000000000000000000000000000000000000000..394466548cb2693f0972f1a581079a07f87bf3e3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multidict/_multidict_base.py @@ -0,0 +1,144 @@ +from collections.abc import ItemsView, Iterable, KeysView, Set, ValuesView + + +def _abc_itemsview_register(view_cls): + ItemsView.register(view_cls) + + +def _abc_keysview_register(view_cls): + KeysView.register(view_cls) + + +def _abc_valuesview_register(view_cls): + ValuesView.register(view_cls) + + +def _viewbaseset_richcmp(view, other, op): + if op == 0: # < + if not isinstance(other, Set): + return NotImplemented + return len(view) < len(other) and view <= other + elif op == 1: # <= + if not isinstance(other, Set): + return NotImplemented + if len(view) > len(other): + return False + for elem in view: + if elem not in other: + return False + return True + elif op == 2: # == + if not isinstance(other, Set): + return NotImplemented + return len(view) == len(other) and view <= other + elif op == 3: # != + return not view == other + elif op == 4: # > + if not isinstance(other, Set): + return NotImplemented + return len(view) > len(other) and view >= other + elif op == 5: # >= + if not isinstance(other, Set): + return NotImplemented + if len(view) < len(other): + return False + for elem in other: + if elem not in view: + return False + return True + + +def _viewbaseset_and(view, other): + if not isinstance(other, Iterable): + return NotImplemented + if isinstance(view, Set): + view = set(iter(view)) + if isinstance(other, Set): + other = set(iter(other)) + if not isinstance(other, Set): + other = set(iter(other)) + return view & other + + +def _viewbaseset_or(view, other): + if not isinstance(other, Iterable): + return NotImplemented + if isinstance(view, Set): + view = set(iter(view)) + if isinstance(other, Set): + other = set(iter(other)) + if not isinstance(other, Set): + other = set(iter(other)) + return view | other + + +def _viewbaseset_sub(view, other): + if not isinstance(other, Iterable): + return NotImplemented + if isinstance(view, Set): + view = set(iter(view)) + if isinstance(other, Set): + other = set(iter(other)) + if not isinstance(other, Set): + other = set(iter(other)) + return view - other + + +def _viewbaseset_xor(view, other): + if not isinstance(other, Iterable): + return NotImplemented + if isinstance(view, Set): + view = set(iter(view)) + if isinstance(other, Set): + other = set(iter(other)) + if not isinstance(other, Set): + other = set(iter(other)) + return view ^ other + + +def _itemsview_isdisjoint(view, other): + "Return True if two sets have a null intersection." + for v in other: + if v in view: + return False + return True + + +def _itemsview_repr(view): + lst = [] + for k, v in view: + lst.append("{!r}: {!r}".format(k, v)) + body = ", ".join(lst) + return "{}({})".format(view.__class__.__name__, body) + + +def _keysview_isdisjoint(view, other): + "Return True if two sets have a null intersection." + for k in other: + if k in view: + return False + return True + + +def _keysview_repr(view): + lst = [] + for k in view: + lst.append("{!r}".format(k)) + body = ", ".join(lst) + return "{}({})".format(view.__class__.__name__, body) + + +def _valuesview_repr(view): + lst = [] + for v in view: + lst.append("{!r}".format(v)) + body = ", ".join(lst) + return "{}({})".format(view.__class__.__name__, body) + + +def _mdrepr(md): + lst = [] + for k, v in md.items(): + lst.append("'{}': {!r}".format(k, v)) + body = ", ".join(lst) + return "<{}({})>".format(md.__class__.__name__, body) diff --git a/env-llmeval/lib/python3.10/site-packages/multidict/_multidict_py.py b/env-llmeval/lib/python3.10/site-packages/multidict/_multidict_py.py new file mode 100644 index 0000000000000000000000000000000000000000..79c45aa19c52080c1a15d783accd5bbfdd416489 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multidict/_multidict_py.py @@ -0,0 +1,527 @@ +import sys +import types +from array import array +from collections import abc + +from ._abc import MultiMapping, MutableMultiMapping + +_marker = object() + +if sys.version_info >= (3, 9): + GenericAlias = types.GenericAlias +else: + + def GenericAlias(cls): + return cls + + +class istr(str): + + """Case insensitive str.""" + + __is_istr__ = True + + +upstr = istr # for relaxing backward compatibility problems + + +def getversion(md): + if not isinstance(md, _Base): + raise TypeError("Parameter should be multidict or proxy") + return md._impl._version + + +_version = array("Q", [0]) + + +class _Impl: + __slots__ = ("_items", "_version") + + def __init__(self): + self._items = [] + self.incr_version() + + def incr_version(self): + global _version + v = _version + v[0] += 1 + self._version = v[0] + + if sys.implementation.name != "pypy": + + def __sizeof__(self): + return object.__sizeof__(self) + sys.getsizeof(self._items) + + +class _Base: + def _title(self, key): + return key + + def getall(self, key, default=_marker): + """Return a list of all values matching the key.""" + identity = self._title(key) + res = [v for i, k, v in self._impl._items if i == identity] + if res: + return res + if not res and default is not _marker: + return default + raise KeyError("Key not found: %r" % key) + + def getone(self, key, default=_marker): + """Get first value matching the key. + + Raises KeyError if the key is not found and no default is provided. + """ + identity = self._title(key) + for i, k, v in self._impl._items: + if i == identity: + return v + if default is not _marker: + return default + raise KeyError("Key not found: %r" % key) + + # Mapping interface # + + def __getitem__(self, key): + return self.getone(key) + + def get(self, key, default=None): + """Get first value matching the key. + + If the key is not found, returns the default (or None if no default is provided) + """ + return self.getone(key, default) + + def __iter__(self): + return iter(self.keys()) + + def __len__(self): + return len(self._impl._items) + + def keys(self): + """Return a new view of the dictionary's keys.""" + return _KeysView(self._impl) + + def items(self): + """Return a new view of the dictionary's items *(key, value) pairs).""" + return _ItemsView(self._impl) + + def values(self): + """Return a new view of the dictionary's values.""" + return _ValuesView(self._impl) + + def __eq__(self, other): + if not isinstance(other, abc.Mapping): + return NotImplemented + if isinstance(other, _Base): + lft = self._impl._items + rht = other._impl._items + if len(lft) != len(rht): + return False + for (i1, k2, v1), (i2, k2, v2) in zip(lft, rht): + if i1 != i2 or v1 != v2: + return False + return True + if len(self._impl._items) != len(other): + return False + for k, v in self.items(): + nv = other.get(k, _marker) + if v != nv: + return False + return True + + def __contains__(self, key): + identity = self._title(key) + for i, k, v in self._impl._items: + if i == identity: + return True + return False + + def __repr__(self): + body = ", ".join("'{}': {!r}".format(k, v) for k, v in self.items()) + return "<{}({})>".format(self.__class__.__name__, body) + + __class_getitem__ = classmethod(GenericAlias) + + +class MultiDictProxy(_Base, MultiMapping): + """Read-only proxy for MultiDict instance.""" + + def __init__(self, arg): + if not isinstance(arg, (MultiDict, MultiDictProxy)): + raise TypeError( + "ctor requires MultiDict or MultiDictProxy instance" + ", not {}".format(type(arg)) + ) + + self._impl = arg._impl + + def __reduce__(self): + raise TypeError("can't pickle {} objects".format(self.__class__.__name__)) + + def copy(self): + """Return a copy of itself.""" + return MultiDict(self.items()) + + +class CIMultiDictProxy(MultiDictProxy): + """Read-only proxy for CIMultiDict instance.""" + + def __init__(self, arg): + if not isinstance(arg, (CIMultiDict, CIMultiDictProxy)): + raise TypeError( + "ctor requires CIMultiDict or CIMultiDictProxy instance" + ", not {}".format(type(arg)) + ) + + self._impl = arg._impl + + def _title(self, key): + return key.title() + + def copy(self): + """Return a copy of itself.""" + return CIMultiDict(self.items()) + + +class MultiDict(_Base, MutableMultiMapping): + """Dictionary with the support for duplicate keys.""" + + def __init__(self, *args, **kwargs): + self._impl = _Impl() + + self._extend(args, kwargs, self.__class__.__name__, self._extend_items) + + if sys.implementation.name != "pypy": + + def __sizeof__(self): + return object.__sizeof__(self) + sys.getsizeof(self._impl) + + def __reduce__(self): + return (self.__class__, (list(self.items()),)) + + def _title(self, key): + return key + + def _key(self, key): + if isinstance(key, str): + return key + else: + raise TypeError( + "MultiDict keys should be either str " "or subclasses of str" + ) + + def add(self, key, value): + identity = self._title(key) + self._impl._items.append((identity, self._key(key), value)) + self._impl.incr_version() + + def copy(self): + """Return a copy of itself.""" + cls = self.__class__ + return cls(self.items()) + + __copy__ = copy + + def extend(self, *args, **kwargs): + """Extend current MultiDict with more values. + + This method must be used instead of update. + """ + self._extend(args, kwargs, "extend", self._extend_items) + + def _extend(self, args, kwargs, name, method): + if len(args) > 1: + raise TypeError( + "{} takes at most 1 positional argument" + " ({} given)".format(name, len(args)) + ) + if args: + arg = args[0] + if isinstance(args[0], (MultiDict, MultiDictProxy)) and not kwargs: + items = arg._impl._items + else: + if hasattr(arg, "items"): + arg = arg.items() + if kwargs: + arg = list(arg) + arg.extend(list(kwargs.items())) + items = [] + for item in arg: + if not len(item) == 2: + raise TypeError( + "{} takes either dict or list of (key, value) " + "tuples".format(name) + ) + items.append((self._title(item[0]), self._key(item[0]), item[1])) + + method(items) + else: + method( + [ + (self._title(key), self._key(key), value) + for key, value in kwargs.items() + ] + ) + + def _extend_items(self, items): + for identity, key, value in items: + self.add(key, value) + + def clear(self): + """Remove all items from MultiDict.""" + self._impl._items.clear() + self._impl.incr_version() + + # Mapping interface # + + def __setitem__(self, key, value): + self._replace(key, value) + + def __delitem__(self, key): + identity = self._title(key) + items = self._impl._items + found = False + for i in range(len(items) - 1, -1, -1): + if items[i][0] == identity: + del items[i] + found = True + if not found: + raise KeyError(key) + else: + self._impl.incr_version() + + def setdefault(self, key, default=None): + """Return value for key, set value to default if key is not present.""" + identity = self._title(key) + for i, k, v in self._impl._items: + if i == identity: + return v + self.add(key, default) + return default + + def popone(self, key, default=_marker): + """Remove specified key and return the corresponding value. + + If key is not found, d is returned if given, otherwise + KeyError is raised. + + """ + identity = self._title(key) + for i in range(len(self._impl._items)): + if self._impl._items[i][0] == identity: + value = self._impl._items[i][2] + del self._impl._items[i] + self._impl.incr_version() + return value + if default is _marker: + raise KeyError(key) + else: + return default + + pop = popone # type: ignore + + def popall(self, key, default=_marker): + """Remove all occurrences of key and return the list of corresponding + values. + + If key is not found, default is returned if given, otherwise + KeyError is raised. + + """ + found = False + identity = self._title(key) + ret = [] + for i in range(len(self._impl._items) - 1, -1, -1): + item = self._impl._items[i] + if item[0] == identity: + ret.append(item[2]) + del self._impl._items[i] + self._impl.incr_version() + found = True + if not found: + if default is _marker: + raise KeyError(key) + else: + return default + else: + ret.reverse() + return ret + + def popitem(self): + """Remove and return an arbitrary (key, value) pair.""" + if self._impl._items: + i = self._impl._items.pop(0) + self._impl.incr_version() + return i[1], i[2] + else: + raise KeyError("empty multidict") + + def update(self, *args, **kwargs): + """Update the dictionary from *other*, overwriting existing keys.""" + self._extend(args, kwargs, "update", self._update_items) + + def _update_items(self, items): + if not items: + return + used_keys = {} + for identity, key, value in items: + start = used_keys.get(identity, 0) + for i in range(start, len(self._impl._items)): + item = self._impl._items[i] + if item[0] == identity: + used_keys[identity] = i + 1 + self._impl._items[i] = (identity, key, value) + break + else: + self._impl._items.append((identity, key, value)) + used_keys[identity] = len(self._impl._items) + + # drop tails + i = 0 + while i < len(self._impl._items): + item = self._impl._items[i] + identity = item[0] + pos = used_keys.get(identity) + if pos is None: + i += 1 + continue + if i >= pos: + del self._impl._items[i] + else: + i += 1 + + self._impl.incr_version() + + def _replace(self, key, value): + key = self._key(key) + identity = self._title(key) + items = self._impl._items + + for i in range(len(items)): + item = items[i] + if item[0] == identity: + items[i] = (identity, key, value) + # i points to last found item + rgt = i + self._impl.incr_version() + break + else: + self._impl._items.append((identity, key, value)) + self._impl.incr_version() + return + + # remove all tail items + i = rgt + 1 + while i < len(items): + item = items[i] + if item[0] == identity: + del items[i] + else: + i += 1 + + +class CIMultiDict(MultiDict): + """Dictionary with the support for duplicate case-insensitive keys.""" + + def _title(self, key): + return key.title() + + +class _Iter: + __slots__ = ("_size", "_iter") + + def __init__(self, size, iterator): + self._size = size + self._iter = iterator + + def __iter__(self): + return self + + def __next__(self): + return next(self._iter) + + def __length_hint__(self): + return self._size + + +class _ViewBase: + def __init__(self, impl): + self._impl = impl + + def __len__(self): + return len(self._impl._items) + + +class _ItemsView(_ViewBase, abc.ItemsView): + def __contains__(self, item): + assert isinstance(item, tuple) or isinstance(item, list) + assert len(item) == 2 + for i, k, v in self._impl._items: + if item[0] == k and item[1] == v: + return True + return False + + def __iter__(self): + return _Iter(len(self), self._iter(self._impl._version)) + + def _iter(self, version): + for i, k, v in self._impl._items: + if version != self._impl._version: + raise RuntimeError("Dictionary changed during iteration") + yield k, v + + def __repr__(self): + lst = [] + for item in self._impl._items: + lst.append("{!r}: {!r}".format(item[1], item[2])) + body = ", ".join(lst) + return "{}({})".format(self.__class__.__name__, body) + + +class _ValuesView(_ViewBase, abc.ValuesView): + def __contains__(self, value): + for item in self._impl._items: + if item[2] == value: + return True + return False + + def __iter__(self): + return _Iter(len(self), self._iter(self._impl._version)) + + def _iter(self, version): + for item in self._impl._items: + if version != self._impl._version: + raise RuntimeError("Dictionary changed during iteration") + yield item[2] + + def __repr__(self): + lst = [] + for item in self._impl._items: + lst.append("{!r}".format(item[2])) + body = ", ".join(lst) + return "{}({})".format(self.__class__.__name__, body) + + +class _KeysView(_ViewBase, abc.KeysView): + def __contains__(self, key): + for item in self._impl._items: + if item[1] == key: + return True + return False + + def __iter__(self): + return _Iter(len(self), self._iter(self._impl._version)) + + def _iter(self, version): + for item in self._impl._items: + if version != self._impl._version: + raise RuntimeError("Dictionary changed during iteration") + yield item[1] + + def __repr__(self): + lst = [] + for item in self._impl._items: + lst.append("{!r}".format(item[1])) + body = ", ".join(lst) + return "{}({})".format(self.__class__.__name__, body) diff --git a/env-llmeval/lib/python3.10/site-packages/multidict/py.typed b/env-llmeval/lib/python3.10/site-packages/multidict/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..dfe8cc048e7100a97025b954fffa31e4ff859a7d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/multidict/py.typed @@ -0,0 +1 @@ +PEP-561 marker. \ No newline at end of file diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/License.txt b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/License.txt new file mode 100644 index 0000000000000000000000000000000000000000..b491c70e0aef319022ded661e111ddbd45b8a17f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/License.txt @@ -0,0 +1,1568 @@ +End User License Agreement +-------------------------- + + +Preface +------- + +The Software License Agreement in Chapter 1 and the Supplement +in Chapter 2 contain license terms and conditions that govern +the use of NVIDIA software. By accepting this agreement, you +agree to comply with all the terms and conditions applicable +to the product(s) included herein. + + +NVIDIA Driver + + +Description + +This package contains the operating system driver and +fundamental system software components for NVIDIA GPUs. + + +NVIDIA CUDA Toolkit + + +Description + +The NVIDIA CUDA Toolkit provides command-line and graphical +tools for building, debugging and optimizing the performance +of applications accelerated by NVIDIA GPUs, runtime and math +libraries, and documentation including programming guides, +user manuals, and API references. + + +Default Install Location of CUDA Toolkit + +Windows platform: + +%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.# + +Linux platform: + +/usr/local/cuda-#.# + +Mac platform: + +/Developer/NVIDIA/CUDA-#.# + + +NVIDIA CUDA Samples + + +Description + +This package includes over 100+ CUDA examples that demonstrate +various CUDA programming principles, and efficient CUDA +implementation of algorithms in specific application domains. + + +Default Install Location of CUDA Samples + +Windows platform: + +%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.# + +Linux platform: + +/usr/local/cuda-#.#/samples + +and + +$HOME/NVIDIA_CUDA-#.#_Samples + +Mac platform: + +/Developer/NVIDIA/CUDA-#.#/samples + + +NVIDIA Nsight Visual Studio Edition (Windows only) + + +Description + +NVIDIA Nsight Development Platform, Visual Studio Edition is a +development environment integrated into Microsoft Visual +Studio that provides tools for debugging, profiling, analyzing +and optimizing your GPU computing and graphics applications. + + +Default Install Location of Nsight Visual Studio Edition + +Windows platform: + +%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.# + + +1. License Agreement for NVIDIA Software Development Kits +--------------------------------------------------------- + + +Release Date: July 26, 2018 +--------------------------- + + +Important NoticeRead before downloading, installing, +copying or using the licensed software: +------------------------------------------------------- + +This license agreement, including exhibits attached +("Agreement”) is a legal agreement between you and NVIDIA +Corporation ("NVIDIA") and governs your use of a NVIDIA +software development kit (“SDK”). + +Each SDK has its own set of software and materials, but here +is a description of the types of items that may be included in +a SDK: source code, header files, APIs, data sets and assets +(examples include images, textures, models, scenes, videos, +native API input/output files), binary software, sample code, +libraries, utility programs, programming code and +documentation. + +This Agreement can be accepted only by an adult of legal age +of majority in the country in which the SDK is used. + +If you are entering into this Agreement on behalf of a company +or other legal entity, you represent that you have the legal +authority to bind the entity to this Agreement, in which case +“you” will mean the entity you represent. + +If you don’t have the required age or authority to accept +this Agreement, or if you don’t accept all the terms and +conditions of this Agreement, do not download, install or use +the SDK. + +You agree to use the SDK only for purposes that are permitted +by (a) this Agreement, and (b) any applicable law, regulation +or generally accepted practices or guidelines in the relevant +jurisdictions. + + +1.1. License + + +1.1.1. License Grant + +Subject to the terms of this Agreement, NVIDIA hereby grants +you a non-exclusive, non-transferable license, without the +right to sublicense (except as expressly provided in this +Agreement) to: + + 1. Install and use the SDK, + + 2. Modify and create derivative works of sample source code + delivered in the SDK, and + + 3. Distribute those portions of the SDK that are identified + in this Agreement as distributable, as incorporated in + object code format into a software application that meets + the distribution requirements indicated in this Agreement. + + +1.1.2. Distribution Requirements + +These are the distribution requirements for you to exercise +the distribution grant: + + 1. Your application must have material additional + functionality, beyond the included portions of the SDK. + + 2. The distributable portions of the SDK shall only be + accessed by your application. + + 3. The following notice shall be included in modifications + and derivative works of sample source code distributed: + “This software contains source code provided by NVIDIA + Corporation.” + + 4. Unless a developer tool is identified in this Agreement + as distributable, it is delivered for your internal use + only. + + 5. The terms under which you distribute your application + must be consistent with the terms of this Agreement, + including (without limitation) terms relating to the + license grant and license restrictions and protection of + NVIDIA’s intellectual property rights. Additionally, you + agree that you will protect the privacy, security and + legal rights of your application users. + + 6. You agree to notify NVIDIA in writing of any known or + suspected distribution or use of the SDK not in compliance + with the requirements of this Agreement, and to enforce + the terms of your agreements with respect to distributed + SDK. + + +1.1.3. Authorized Users + +You may allow employees and contractors of your entity or of +your subsidiary(ies) to access and use the SDK from your +secure network to perform work on your behalf. + +If you are an academic institution you may allow users +enrolled or employed by the academic institution to access and +use the SDK from your secure network. + +You are responsible for the compliance with the terms of this +Agreement by your authorized users. If you become aware that +your authorized users didn’t follow the terms of this +Agreement, you agree to take reasonable steps to resolve the +non-compliance and prevent new occurrences. + + +1.1.4. Pre-Release SDK + +The SDK versions identified as alpha, beta, preview or +otherwise as pre-release, may not be fully functional, may +contain errors or design flaws, and may have reduced or +different security, privacy, accessibility, availability, and +reliability standards relative to commercial versions of +NVIDIA software and materials. Use of a pre-release SDK may +result in unexpected results, loss of data, project delays or +other unpredictable damage or loss. + +You may use a pre-release SDK at your own risk, understanding +that pre-release SDKs are not intended for use in production +or business-critical systems. + +NVIDIA may choose not to make available a commercial version +of any pre-release SDK. NVIDIA may also choose to abandon +development and terminate the availability of a pre-release +SDK at any time without liability. + + +1.1.5. Updates + +NVIDIA may, at its option, make available patches, workarounds +or other updates to this SDK. Unless the updates are provided +with their separate governing terms, they are deemed part of +the SDK licensed to you as provided in this Agreement. You +agree that the form and content of the SDK that NVIDIA +provides may change without prior notice to you. While NVIDIA +generally maintains compatibility between versions, NVIDIA may +in some cases make changes that introduce incompatibilities in +future versions of the SDK. + + +1.1.6. Third Party Licenses + +The SDK may come bundled with, or otherwise include or be +distributed with, third party software licensed by a NVIDIA +supplier and/or open source software provided under an open +source license. Use of third party software is subject to the +third-party license terms, or in the absence of third party +terms, the terms of this Agreement. Copyright to third party +software is held by the copyright holders indicated in the +third-party software or license. + + +1.1.7. Reservation of Rights + +NVIDIA reserves all rights, title, and interest in and to the +SDK, not expressly granted to you under this Agreement. + + +1.2. Limitations + +The following license limitations apply to your use of the +SDK: + + 1. You may not reverse engineer, decompile or disassemble, + or remove copyright or other proprietary notices from any + portion of the SDK or copies of the SDK. + + 2. Except as expressly provided in this Agreement, you may + not copy, sell, rent, sublicense, transfer, distribute, + modify, or create derivative works of any portion of the + SDK. For clarity, you may not distribute or sublicense the + SDK as a stand-alone product. + + 3. Unless you have an agreement with NVIDIA for this + purpose, you may not indicate that an application created + with the SDK is sponsored or endorsed by NVIDIA. + + 4. You may not bypass, disable, or circumvent any + encryption, security, digital rights management or + authentication mechanism in the SDK. + + 5. You may not use the SDK in any manner that would cause it + to become subject to an open source software license. As + examples, licenses that require as a condition of use, + modification, and/or distribution that the SDK be: + + a. Disclosed or distributed in source code form; + + b. Licensed for the purpose of making derivative works; + or + + c. Redistributable at no charge. + + 6. Unless you have an agreement with NVIDIA for this + purpose, you may not use the SDK with any system or + application where the use or failure of the system or + application can reasonably be expected to threaten or + result in personal injury, death, or catastrophic loss. + Examples include use in avionics, navigation, military, + medical, life support or other life critical applications. + NVIDIA does not design, test or manufacture the SDK for + these critical uses and NVIDIA shall not be liable to you + or any third party, in whole or in part, for any claims or + damages arising from such uses. + + 7. You agree to defend, indemnify and hold harmless NVIDIA + and its affiliates, and their respective employees, + contractors, agents, officers and directors, from and + against any and all claims, damages, obligations, losses, + liabilities, costs or debt, fines, restitutions and + expenses (including but not limited to attorney’s fees + and costs incident to establishing the right of + indemnification) arising out of or related to your use of + the SDK outside of the scope of this Agreement, or not in + compliance with its terms. + + +1.3. Ownership + + 1. NVIDIA or its licensors hold all rights, title and + interest in and to the SDK and its modifications and + derivative works, including their respective intellectual + property rights, subject to your rights described in this + section. This SDK may include software and materials from + NVIDIA’s licensors, and these licensors are intended + third party beneficiaries that may enforce this Agreement + with respect to their intellectual property rights. + + 2. You hold all rights, title and interest in and to your + applications and your derivative works of the sample + source code delivered in the SDK, including their + respective intellectual property rights, subject to + NVIDIA’s rights described in this section. + + 3. You may, but don’t have to, provide to NVIDIA + suggestions, feature requests or other feedback regarding + the SDK, including possible enhancements or modifications + to the SDK. For any feedback that you voluntarily provide, + you hereby grant NVIDIA and its affiliates a perpetual, + non-exclusive, worldwide, irrevocable license to use, + reproduce, modify, license, sublicense (through multiple + tiers of sublicensees), and distribute (through multiple + tiers of distributors) it without the payment of any + royalties or fees to you. NVIDIA will use feedback at its + choice. NVIDIA is constantly looking for ways to improve + its products, so you may send feedback to NVIDIA through + the developer portal at https://developer.nvidia.com. + + +1.4. No Warranties + +THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL +FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND +ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND +OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, +BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE +ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO +WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF +DEALING OR COURSE OF TRADE. + + +1.5. Limitation of Liability + +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS +AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, +PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS +OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF +PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION +WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, +WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH +OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), +PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF +LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES +TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS +AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE +NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS +LIMIT. + +These exclusions and limitations of liability shall apply +regardless if NVIDIA or its affiliates have been advised of +the possibility of such damages, and regardless of whether a +remedy fails its essential purpose. These exclusions and +limitations of liability form an essential basis of the +bargain between the parties, and, absent any of these +exclusions or limitations of liability, the provisions of this +Agreement, including, without limitation, the economic terms, +would be substantially different. + + +1.6. Termination + + 1. This Agreement will continue to apply until terminated by + either you or NVIDIA as described below. + + 2. If you want to terminate this Agreement, you may do so by + stopping to use the SDK. + + 3. NVIDIA may, at any time, terminate this Agreement if: + + a. (i) you fail to comply with any term of this + Agreement and the non-compliance is not fixed within + thirty (30) days following notice from NVIDIA (or + immediately if you violate NVIDIA’s intellectual + property rights); + + b. (ii) you commence or participate in any legal + proceeding against NVIDIA with respect to the SDK; or + + c. (iii) NVIDIA decides to no longer provide the SDK in + a country or, in NVIDIA’s sole discretion, the + continued use of it is no longer commercially viable. + + 4. Upon any termination of this Agreement, you agree to + promptly discontinue use of the SDK and destroy all copies + in your possession or control. Your prior distributions in + accordance with this Agreement are not affected by the + termination of this Agreement. Upon written request, you + will certify in writing that you have complied with your + commitments under this section. Upon any termination of + this Agreement all provisions survive except for the + license grant provisions. + + +1.7. General + +If you wish to assign this Agreement or your rights and +obligations, including by merger, consolidation, dissolution +or operation of law, contact NVIDIA to ask for permission. Any +attempted assignment not approved by NVIDIA in writing shall +be void and of no effect. NVIDIA may assign, delegate or +transfer this Agreement and its rights and obligations, and if +to a non-affiliate you will be notified. + +You agree to cooperate with NVIDIA and provide reasonably +requested information to verify your compliance with this +Agreement. + +This Agreement will be governed in all respects by the laws of +the United States and of the State of Delaware as those laws +are applied to contracts entered into and performed entirely +within Delaware by Delaware residents, without regard to the +conflicts of laws principles. The United Nations Convention on +Contracts for the International Sale of Goods is specifically +disclaimed. You agree to all terms of this Agreement in the +English language. + +The state or federal courts residing in Santa Clara County, +California shall have exclusive jurisdiction over any dispute +or claim arising out of this Agreement. Notwithstanding this, +you agree that NVIDIA shall still be allowed to apply for +injunctive remedies or an equivalent type of urgent legal +relief in any jurisdiction. + +If any court of competent jurisdiction determines that any +provision of this Agreement is illegal, invalid or +unenforceable, such provision will be construed as limited to +the extent necessary to be consistent with and fully +enforceable under the law and the remaining provisions will +remain in full force and effect. Unless otherwise specified, +remedies are cumulative. + +Each party acknowledges and agrees that the other is an +independent contractor in the performance of this Agreement. + +The SDK has been developed entirely at private expense and is +“commercial items” consisting of “commercial computer +software” and “commercial computer software +documentation” provided with RESTRICTED RIGHTS. Use, +duplication or disclosure by the U.S. Government or a U.S. +Government subcontractor is subject to the restrictions in +this Agreement pursuant to DFARS 227.7202-3(a) or as set forth +in subparagraphs (c)(1) and (2) of the Commercial Computer +Software - Restricted Rights clause at FAR 52.227-19, as +applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas +Expressway, Santa Clara, CA 95051. + +The SDK is subject to United States export laws and +regulations. You agree that you will not ship, transfer or +export the SDK into any country, or use the SDK in any manner, +prohibited by the United States Bureau of Industry and +Security or economic sanctions regulations administered by the +U.S. Department of Treasury’s Office of Foreign Assets +Control (OFAC), or any applicable export laws, restrictions or +regulations. These laws include restrictions on destinations, +end users and end use. By accepting this Agreement, you +confirm that you are not a resident or citizen of any country +currently embargoed by the U.S. and that you are not otherwise +prohibited from receiving the SDK. + +Any notice delivered by NVIDIA to you under this Agreement +will be delivered via mail, email or fax. You agree that any +notices that NVIDIA sends you electronically will satisfy any +legal communication requirements. Please direct your legal +notices or other correspondence to NVIDIA Corporation, 2788 +San Tomas Expressway, Santa Clara, California 95051, United +States of America, Attention: Legal Department. + +This Agreement and any exhibits incorporated into this +Agreement constitute the entire agreement of the parties with +respect to the subject matter of this Agreement and supersede +all prior negotiations or documentation exchanged between the +parties relating to this SDK license. Any additional and/or +conflicting terms on documents issued by you are null, void, +and invalid. Any amendment or waiver under this Agreement +shall be in writing and signed by representatives of both +parties. + + +2. CUDA Toolkit Supplement to Software License Agreement for +NVIDIA Software Development Kits +------------------------------------------------------------ + + +Release date: August 16, 2018 +----------------------------- + +The terms in this supplement govern your use of the NVIDIA +CUDA Toolkit SDK under the terms of your license agreement +(“Agreement”) as modified by this supplement. Capitalized +terms used but not defined below have the meaning assigned to +them in the Agreement. + +This supplement is an exhibit to the Agreement and is +incorporated as an integral part of the Agreement. In the +event of conflict between the terms in this supplement and the +terms in the Agreement, the terms in this supplement govern. + + +2.1. License Scope + +The SDK is licensed for you to develop applications only for +use in systems with NVIDIA GPUs. + + +2.2. Distribution + +The portions of the SDK that are distributable under the +Agreement are listed in Attachment A. + + +2.3. Operating Systems + +Those portions of the SDK designed exclusively for use on the +Linux or FreeBSD operating systems, or other operating systems +derived from the source code to these operating systems, may +be copied and redistributed for use in accordance with this +Agreement, provided that the object code files are not +modified in any way (except for unzipping of compressed +files). + + +2.4. Audio and Video Encoders and Decoders + +You acknowledge and agree that it is your sole responsibility +to obtain any additional third-party licenses required to +make, have made, use, have used, sell, import, and offer for +sale your products or services that include or incorporate any +third-party software and content relating to audio and/or +video encoders and decoders from, including but not limited +to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A., +MPEG-LA, and Coding Technologies. NVIDIA does not grant to you +under this Agreement any necessary patent or other rights with +respect to any audio and/or video encoders and decoders. + + +2.5. Licensing + +If the distribution terms in this Agreement are not suitable +for your organization, or for any questions regarding this +Agreement, please contact NVIDIA at +nvidia-compute-license-questions@nvidia.com. + + +2.6. Attachment A + +The following portions of the SDK are distributable under the +Agreement: + +Component + +CUDA Runtime + +Windows + +cudart.dll, cudart_static.lib, cudadevrt.lib + +Mac OSX + +libcudart.dylib, libcudart_static.a, libcudadevrt.a + +Linux + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Android + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Component + +CUDA FFT Library + +Windows + +cufft.dll, cufftw.dll, cufft.lib, cufftw.lib + +Mac OSX + +libcufft.dylib, libcufft_static.a, libcufftw.dylib, +libcufftw_static.a + +Linux + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Android + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Component + +CUDA BLAS Library + +Windows + +cublas.dll, cublasLt.dll + +Mac OSX + +libcublas.dylib, libcublasLt.dylib, libcublas_static.a, +libcublasLt_static.a + +Linux + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Android + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Component + +NVIDIA "Drop-in" BLAS Library + +Windows + +nvblas.dll + +Mac OSX + +libnvblas.dylib + +Linux + +libnvblas.so + +Component + +CUDA Sparse Matrix Library + +Windows + +cusparse.dll, cusparse.lib + +Mac OSX + +libcusparse.dylib, libcusparse_static.a + +Linux + +libcusparse.so, libcusparse_static.a + +Android + +libcusparse.so, libcusparse_static.a + +Component + +CUDA Linear Solver Library + +Windows + +cusolver.dll, cusolver.lib + +Mac OSX + +libcusolver.dylib, libcusolver_static.a + +Linux + +libcusolver.so, libcusolver_static.a + +Android + +libcusolver.so, libcusolver_static.a + +Component + +CUDA Random Number Generation Library + +Windows + +curand.dll, curand.lib + +Mac OSX + +libcurand.dylib, libcurand_static.a + +Linux + +libcurand.so, libcurand_static.a + +Android + +libcurand.so, libcurand_static.a + +Component + +CUDA Accelerated Graph Library + +Component + +NVIDIA Performance Primitives Library + +Windows + +nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll, +nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll, +nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib, +nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll, +nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib + +Mac OSX + +libnppc.dylib, libnppc_static.a, libnppial.dylib, +libnppial_static.a, libnppicc.dylib, libnppicc_static.a, +libnppicom.dylib, libnppicom_static.a, libnppidei.dylib, +libnppidei_static.a, libnppif.dylib, libnppif_static.a, +libnppig.dylib, libnppig_static.a, libnppim.dylib, +libnppisu_static.a, libnppitc.dylib, libnppitc_static.a, +libnpps.dylib, libnpps_static.a + +Linux + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Android + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Component + +NVIDIA JPEG Library + +Linux + +libnvjpeg.so, libnvjpeg_static.a + +Component + +Internal common library required for statically linking to +cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP + +Mac OSX + +libculibos.a + +Linux + +libculibos.a + +Component + +NVIDIA Runtime Compilation Library and Header + +All + +nvrtc.h + +Windows + +nvrtc.dll, nvrtc-builtins.dll + +Mac OSX + +libnvrtc.dylib, libnvrtc-builtins.dylib + +Linux + +libnvrtc.so, libnvrtc-builtins.so + +Component + +NVIDIA Optimizing Compiler Library + +Windows + +nvvm.dll + +Mac OSX + +libnvvm.dylib + +Linux + +libnvvm.so + +Component + +NVIDIA Common Device Math Functions Library + +Windows + +libdevice.10.bc + +Mac OSX + +libdevice.10.bc + +Linux + +libdevice.10.bc + +Component + +CUDA Occupancy Calculation Header Library + +All + +cuda_occupancy.h + +Component + +CUDA Half Precision Headers + +All + +cuda_fp16.h, cuda_fp16.hpp + +Component + +CUDA Profiling Tools Interface (CUPTI) Library + +Windows + +cupti.dll + +Mac OSX + +libcupti.dylib + +Linux + +libcupti.so + +Component + +NVIDIA Tools Extension Library + +Windows + +nvToolsExt.dll, nvToolsExt.lib + +Mac OSX + +libnvToolsExt.dylib + +Linux + +libnvToolsExt.so + +Component + +NVIDIA CUDA Driver Libraries + +Linux + +libcuda.so, libnvidia-fatbinaryloader.so, +libnvidia-ptxjitcompiler.so + +The NVIDIA CUDA Driver Libraries are only distributable in +applications that meet this criteria: + + 1. The application was developed starting from a NVIDIA CUDA + container obtained from Docker Hub or the NVIDIA GPU + Cloud, and + + 2. The resulting application is packaged as a Docker + container and distributed to users on Docker Hub or the + NVIDIA GPU Cloud only. + + +2.7. Attachment B + + +Additional Licensing Obligations + +The following third party components included in the SOFTWARE +are licensed to Licensee pursuant to the following terms and +conditions: + + 1. Licensee's use of the GDB third party component is + subject to the terms and conditions of GNU GPL v3: + + This product includes copyrighted third-party software licensed + under the terms of the GNU General Public License v3 ("GPL v3"). + All third-party software packages are copyright by their respective + authors. GPL v3 terms and conditions are hereby incorporated into + the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt + + Consistent with these licensing requirements, the software + listed below is provided under the terms of the specified + open source software licenses. To obtain source code for + software provided under licenses that require + redistribution of source code, including the GNU General + Public License (GPL) and GNU Lesser General Public License + (LGPL), contact oss-requests@nvidia.com. This offer is + valid for a period of three (3) years from the date of the + distribution of this product by NVIDIA CORPORATION. + + Component License + CUDA-GDB GPL v3 + + 2. Licensee represents and warrants that any and all third + party licensing and/or royalty payment obligations in + connection with Licensee's use of the H.264 video codecs + are solely the responsibility of Licensee. + + 3. Licensee's use of the Thrust library is subject to the + terms and conditions of the Apache License Version 2.0. + All third-party software packages are copyright by their + respective authors. Apache License Version 2.0 terms and + conditions are hereby incorporated into the Agreement by + this reference. + http://www.apache.org/licenses/LICENSE-2.0.html + + In addition, Licensee acknowledges the following notice: + Thrust includes source code from the Boost Iterator, + Tuple, System, and Random Number libraries. + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 4. Licensee's use of the LLVM third party component is + subject to the following terms and conditions: + + ====================================================== + LLVM Release License + ====================================================== + University of Illinois/NCSA + Open Source License + + Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign. + All rights reserved. + + Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal with the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at Urbana- + Champaign, nor the names of its contributors may be used to endorse or + promote products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS WITH THE SOFTWARE. + + 5. Licensee's use (e.g. nvprof) of the PCRE third party + component is subject to the following terms and + conditions: + + ------------ + PCRE LICENCE + ------------ + PCRE is a library of functions to support regular expressions whose syntax + and semantics are as close as possible to those of the Perl 5 language. + Release 8 of PCRE is distributed under the terms of the "BSD" licence, as + specified below. The documentation for PCRE, supplied in the "doc" + directory, is distributed under the same terms as the software itself. The + basic library functions are written in C and are freestanding. Also + included in the distribution is a set of C++ wrapper functions, and a just- + in-time compiler that can be used to optimize pattern matching. These are + both optional features that can be omitted when the library is built. + + THE BASIC LIBRARY FUNCTIONS + --------------------------- + Written by: Philip Hazel + Email local part: ph10 + Email domain: cam.ac.uk + University of Cambridge Computing Service, + Cambridge, England. + Copyright (c) 1997-2012 University of Cambridge + All rights reserved. + + PCRE JUST-IN-TIME COMPILATION SUPPORT + ------------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2010-2012 Zoltan Herczeg + All rights reserved. + + STACK-LESS JUST-IN-TIME COMPILER + -------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2009-2012 Zoltan Herczeg + All rights reserved. + + THE C++ WRAPPER FUNCTIONS + ------------------------- + Contributed by: Google Inc. + Copyright (c) 2007-2012, Google Inc. + All rights reserved. + + THE "BSD" LICENCE + ----------------- + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the University of Cambridge nor the name of Google + Inc. nor the names of their contributors may be used to endorse or + promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 6. Some of the cuBLAS library routines were written by or + derived from code written by Vasily Volkov and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2007-2009, Regents of the University of California + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the University of California, Berkeley nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 7. Some of the cuBLAS library routines were written by or + derived from code written by Davide Barbieri and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 8. Some of the cuBLAS library routines were derived from + code developed by the University of Tennessee and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2010 The University of Tennessee. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer listed in this license in the documentation and/or + other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 9. Some of the cuBLAS library routines were written by or + derived from code written by Jonathan Hogg and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2012, The Science and Technology Facilities Council (STFC). + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the STFC nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 10. Some of the cuBLAS library routines were written by or + derived from code written by Ahmad M. Abdelfattah, David + Keyes, and Hatem Ltaief, and are subject to the Apache + License, Version 2.0, as follows: + + -- (C) Copyright 2013 King Abdullah University of Science and Technology + Authors: + Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa) + David Keyes (david.keyes@kaust.edu.sa) + Hatem Ltaief (hatem.ltaief@kaust.edu.sa) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the King Abdullah University of Science and + Technology nor the names of its contributors may be used to endorse + or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE + + 11. Some of the cuSPARSE library routines were written by or + derived from code written by Li-Wen Chang and are subject + to the NCSA Open Source License as follows: + + Copyright (c) 2012, University of Illinois. + + All rights reserved. + + Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal with the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimers in the documentation and/or other materials provided + with the distribution. + * Neither the names of IMPACT Group, University of Illinois, nor + the names of its contributors may be used to endorse or promote + products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + 12. Some of the cuRAND library routines were written by or + derived from code written by Mutsuo Saito and Makoto + Matsumoto and are subject to the following license: + + Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + University. All rights reserved. + + Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + University and University of Tokyo. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the Hiroshima University nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 13. Some of the cuRAND library routines were derived from + code developed by D. E. Shaw Research and are subject to + the following license: + + Copyright 2010-2011, D. E. Shaw Research. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of D. E. Shaw Research nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 14. Some of the Math library routines were written by or + derived from code developed by Norbert Juffa and are + subject to the following license: + + Copyright (c) 2015-2017, Norbert Juffa + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 15. Licensee's use of the lz4 third party component is + subject to the following terms and conditions: + + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 16. The NPP library uses code from the Boost Math Toolkit, + and is subject to the following license: + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 17. Portions of the Nsight Eclipse Edition is subject to the + following license: + + The Eclipse Foundation makes available all content in this plug-in + ("Content"). Unless otherwise indicated below, the Content is provided + to you under the terms and conditions of the Eclipse Public License + Version 1.0 ("EPL"). A copy of the EPL is available at http:// + www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program" + will mean the Content. + + If you did not receive this Content directly from the Eclipse + Foundation, the Content is being redistributed by another party + ("Redistributor") and different terms and conditions may apply to your + use of any object code in the Content. Check the Redistributor's + license that was provided with the Content. If no such license exists, + contact the Redistributor. Unless otherwise indicated below, the terms + and conditions of the EPL still apply to any source code in the + Content and such source code may be obtained at http://www.eclipse.org. + + 18. Some of the cuBLAS library routines uses code from + OpenAI, which is subject to the following license: + + License URL + https://github.com/openai/openai-gemm/blob/master/LICENSE + + License Text + The MIT License + + Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + 19. Licensee's use of the Visual Studio Setup Configuration + Samples is subject to the following license: + + The MIT License (MIT) + Copyright (C) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of the Software, + and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + 20. Licensee's use of linmath.h header for CPU functions for + GL vector/matrix operations from lunarG is subject to the + Apache License Version 2.0. + + 21. The DX12-CUDA sample uses the d3dx12.h header, which is + subject to the MIT license . + +----------------- diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..468bc77c74622511aeaf7188c5f42b82819ce831 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/METADATA @@ -0,0 +1,35 @@ +Metadata-Version: 2.1 +Name: nvidia-cuda-nvrtc-cu12 +Version: 12.1.105 +Summary: NVRTC native runtime libraries +Home-page: https://developer.nvidia.com/cuda-zone +Author: Nvidia CUDA Installer Team +Author-email: cuda_installer@nvidia.com +License: NVIDIA Proprietary Software +Keywords: cuda,nvidia,runtime,machine learning,deep learning +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: Other/Proprietary License +Classifier: Natural Language :: English +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Requires-Python: >=3 +License-File: License.txt + +NVRTC native runtime libraries diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..74a7af8f265dc7185a048f89cae5dae98cc6c48f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/RECORD @@ -0,0 +1,17 @@ +nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/__pycache__/__init__.cpython-310.pyc,, +nvidia/cuda_nvrtc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cuda_nvrtc/__pycache__/__init__.cpython-310.pyc,, +nvidia/cuda_nvrtc/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cuda_nvrtc/include/__pycache__/__init__.cpython-310.pyc,, +nvidia/cuda_nvrtc/include/nvrtc.h,sha256=iNclpPXz5S0Gu4RUfVpW30spNLF9VcuhNLg-JS3gxKs,34200 +nvidia/cuda_nvrtc/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cuda_nvrtc/lib/__pycache__/__init__.cpython-310.pyc,, +nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1,sha256=bFY5zjl6n1uCzSd0MtFGNwZ0NYM0pM4NM_qaXKCQrIo,6842248 +nvidia/cuda_nvrtc/lib/libnvrtc.so.12,sha256=g-ya13deifYoAoa6EeudKMr-ScL3d6PgUbzIgd50Sfw,56875328 +nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262 +nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/METADATA,sha256=GunIC2DggSlw3gc15QJPreXg5CdAN6HOqdvSLF9Y62s,1507 +nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/RECORD,, +nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106 +nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7 diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..06e355fe0e3ed7077903f119ae6928a17da8eb6f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-manylinux1_x86_64 + diff --git a/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu12-12.1.105.dist-info/top_level.txt @@ -0,0 +1 @@ +nvidia diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..209eaf3505f087ed72d58b083175395b2a62036f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/__main__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ad6a3576fcdcd77c8d8cb83a6aebf332ab423ac Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/__main__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/compat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69892f19246303da6e056c65e5be143f4dc09a57 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/compat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/sacrebleu.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/sacrebleu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e27a54d1bc3e6c2af7d869ad5d8815a2def6573 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/sacrebleu.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/significance.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/significance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f093e4c12cdbba5d02a194cfb4bd001eebbb64e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/significance.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c98c5006de3ba2f9be93749f5e0a04084b9e810f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__init__.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..247839551770ff8ae24a7f51c282d1e07fbe8d77 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__init__.py @@ -0,0 +1,1961 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not +# use this file except in compliance with the License. A copy of the License +# is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed on +# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + + +# This defines data locations. +# Right below are test sets. +# Beneath each test set, we define the location to download the test data. +# The other keys are each language pair contained in the tarball, and the respective locations of the source and reference data within each. +# Many of these are *.sgm files, which are processed to produced plain text that can be used by this script. +# The canonical location of unpacked, processed data is $SACREBLEU_DIR/$TEST/$SOURCE-$TARGET.{$SOURCE,$TARGET} +from .fake_sgml import FakeSGMLDataset, WMTAdditionDataset +from .iwslt_xml import IWSLTXMLDataset +from .plain_text import PlainTextDataset +from .tsv import TSVDataset +from .wmt_xml import WMTXMLDataset + +# Detailed document metadata annotation in form DocumentID -> CountryCode - Domain - OptionalFinegrainedCountryCode +# While the annotation is subjective with many unclear cases, it may provide useful insights +# when applied on large data (TODO: annotate all documents from recent WMT years, at least for origlang=en, consider renaming "world" to "other"). +_SUBSETS = { + "wmt18": "rt.com.68098=US-crime guardian.181611=US-politics bbc.310963=GB-sport washpost.116881=US-politics scotsman.104228=GB-sport timemagazine.75207=OTHER-world-ID " + "euronews-en.117981=OTHER-crime-AE smh.com.au.242810=US-crime msnbc.53726=US-politics euronews-en.117983=US-politics msnbc.53894=US-crime theglobeandmail.com.62700=US-business " + "bbc.310870=OTHER-world-AF reuters.196698=US-politics latimes.231739=US-sport thelocal.51929=OTHER-world-SE cbsnews.198694=US-politics reuters.196718=OTHER-sport-RU " + "abcnews.255599=EU-sport nytimes.127256=US-entertainment scotsman.104225=GB-politics dailymail.co.uk.233026=GB-scitech independent.181088=GB-entertainment " + "brisbanetimes.com.au.181614=OTHER-business-AU washpost.116837=US-politics dailymail.co.uk.232928=GB-world thelocal.51916=OTHER-politics-IT bbc.310871=US-crime " + "nytimes.127392=EU-business-DE euronews-en.118001=EU-scitech-FR washpost.116866=OTHER-crime-MX dailymail.co.uk.233025=OTHER-scitech-CA latimes.231829=US-crime " + "guardian.181662=US-entertainment msnbc.53731=US-crime rt.com.68127=OTHER-sport-RU latimes.231782=US-business latimes.231840=US-sport reuters.196711=OTHER-scitech " + "guardian.181666=GB-entertainment novinite.com.24019=US-politics smh.com.au.242750=OTHER-scitech guardian.181610=US-politics telegraph.364393=OTHER-crime-ZA " + "novinite.com.23995=EU-world dailymail.co.uk.233028=GB-scitech independent.181071=GB-sport telegraph.364538=GB-scitech timemagazine.75193=US-politics " + "independent.181096=US-entertainment upi.140602=OTHER-world-AF bbc.310946=GB-business independent.181052=EU-sport ", + "wmt19": "bbc.381790=GB-politics rt.com.91337=OTHER-politics-MK nytimes.184853=US-world upi.176266=US-crime guardian.221754=GB-business dailymail.co.uk.298595=GB-business " + "cnbc.com.6790=US-politics nytimes.184837=OTHER-world-ID upi.176249=GB-sport euronews-en.153835=OTHER-world-ID dailymail.co.uk.298732=GB-crime telegraph.405401=GB-politics " + "newsweek.51331=OTHER-crime-CN abcnews.306815=US-world cbsnews.248384=US-politics reuters.218882=GB-politics cbsnews.248387=US-crime abcnews.306764=OTHER-world-MX " + "reuters.218888=EU-politics bbc.381780=GB-crime bbc.381746=GB-sport euronews-en.153800=EU-politics bbc.381679=GB-crime bbc.381735=GB-crime newsweek.51338=US-world " + "bbc.381765=GB-crime cnn.304489=US-politics reuters.218863=OTHER-world-ID nytimes.184860=OTHER-world-ID cnn.304404=US-crime bbc.381647=US-entertainment " + "abcnews.306758=OTHER-politics-MX cnbc.com.6772=US-business reuters.218932=OTHER-politics-MK upi.176251=GB-sport reuters.218921=US-sport cnn.304447=US-politics " + "guardian.221679=GB-politics scotsman.133765=GB-sport scotsman.133804=GB-entertainment guardian.221762=OTHER-politics-BO cnbc.com.6769=US-politics " + "dailymail.co.uk.298692=EU-entertainment scotsman.133744=GB-world reuters.218911=US-sport newsweek.51310=US-politics independent.226301=US-sport reuters.218923=EU-sport " + "reuters.218861=US-politics dailymail.co.uk.298759=US-world scotsman.133791=GB-sport cbsnews.248484=EU-scitech dailymail.co.uk.298630=US-scitech " + "newsweek.51329=US-entertainment bbc.381701=GB-crime dailymail.co.uk.298738=GB-entertainment bbc.381669=OTHER-world-CN foxnews.94512=US-politics " + "guardian.221718=GB-entertainment dailymail.co.uk.298686=GB-politics cbsnews.248471=US-politics newsweek.51318=US-entertainment rt.com.91335=US-politics " + "newsweek.51300=US-politics cnn.304478=US-politics upi.176275=US-politics telegraph.405422=OTHER-world-ID reuters.218933=US-politics newsweek.51328=US-politics " + "newsweek.51307=US-business bbc.381692=GB-world independent.226346=GB-entertainment bbc.381646=GB-sport reuters.218914=US-sport scotsman.133758=EU-sport " + "rt.com.91350=EU-world scotsman.133773=GB-scitech rt.com.91334=EU-crime bbc.381680=GB-politics guardian.221756=US-politics scotsman.133783=GB-politics cnn.304521=US-sport " + "dailymail.co.uk.298622=GB-politics bbc.381789=GB-sport dailymail.co.uk.298644=GB-business dailymail.co.uk.298602=GB-world scotsman.133753=GB-sport " + "independent.226317=GB-entertainment nytimes.184862=US-politics thelocal.65969=OTHER-world-SY nytimes.184825=US-politics cnbc.com.6784=US-politics nytimes.184804=US-politics " + "nytimes.184830=US-politics scotsman.133801=GB-sport cnbc.com.6770=US-business bbc.381760=GB-crime reuters.218865=OTHER-world-ID newsweek.51339=US-crime " + "euronews-en.153797=OTHER-world-ID abcnews.306774=US-crime dailymail.co.uk.298696=GB-politics abcnews.306755=US-politics reuters.218909=US-crime " + "independent.226349=OTHER-sport-RU newsweek.51330=US-politics bbc.381705=GB-sport newsweek.51340=OTHER-world-ID cbsnews.248411=OTHER-world-FM abcnews.306776=US-crime " + "bbc.381694=GB-entertainment rt.com.91356=US-world telegraph.405430=GB-entertainment telegraph.405404=EU-world bbc.381749=GB-world telegraph.405413=US-politics " + "bbc.381736=OTHER-politics-KP cbsnews.248394=US-politics nytimes.184822=US-world telegraph.405408=US-politics euronews-en.153799=OTHER-politics-SY " + "euronews-en.153826=EU-sport cnn.304400=US-world", +} + +SUBSETS = { + k: {d.split("=")[0]: d.split("=")[1] for d in v.split()} + for (k, v) in _SUBSETS.items() +} +COUNTRIES = sorted(list({v.split("-")[0] for v in SUBSETS["wmt19"].values()})) +DOMAINS = sorted(list({v.split("-")[1] for v in SUBSETS["wmt19"].values()})) + +DATASETS = { + # wmt + "wmt23": WMTXMLDataset( + "wmt23", + data=["https://github.com/wmt-conference/wmt23-news-systems/archive/refs/tags/v.0.1.tar.gz"], + description="Official evaluation and system data for WMT23.", + md5=["63576405e4ce07130a19ad76ba7eb75b"], + langpairs={ + "cs-uk": ["wmt23-news-systems-v.0.1/xml/wmttest2023.cs-uk.all.xml"], + "de-en": ["wmt23-news-systems-v.0.1/xml/wmttest2023.de-en.all.xml"], + "en-cs": ["wmt23-news-systems-v.0.1/xml/wmttest2023.en-cs.all.xml"], + "en-de": ["wmt23-news-systems-v.0.1/xml/wmttest2023.en-de.all.xml"], + "en-he": { + "path": "wmt23-news-systems-v.0.1/xml/wmttest2023.en-he.all.xml", + "refs": ["refB"], + }, + "en-ja": ["wmt23-news-systems-v.0.1/xml/wmttest2023.en-ja.all.xml"], + "en-ru": ["wmt23-news-systems-v.0.1/xml/wmttest2023.en-ru.all.xml"], + "en-uk": ["wmt23-news-systems-v.0.1/xml/wmttest2023.en-uk.all.xml"], + "en-zh": ["wmt23-news-systems-v.0.1/xml/wmttest2023.en-zh.all.xml"], + "he-en": { + "path": "wmt23-news-systems-v.0.1/xml/wmttest2023.he-en.all.xml", + "refs": ["refB"], + }, + "ja-en": ["wmt23-news-systems-v.0.1/xml/wmttest2023.ja-en.all.xml"], + "ru-en": ["wmt23-news-systems-v.0.1/xml/wmttest2023.ru-en.all.xml"], + "uk-en": ["wmt23-news-systems-v.0.1/xml/wmttest2023.uk-en.all.xml"], + "zh-en": ["wmt23-news-systems-v.0.1/xml/wmttest2023.zh-en.all.xml"], + }, + refs=["refA"], + ), + "wmt22": WMTXMLDataset( + "wmt22", + data=["https://github.com/wmt-conference/wmt22-news-systems/archive/refs/tags/v1.1.tar.gz"], + description="Official evaluation and system data for WMT22.", + md5=["0840978b9b50b9ac3b2b081e37d620b9"], + langpairs={ + "cs-en": { + "path": "wmt22-news-systems-1.1/xml/wmttest2022.cs-en.all.xml", + "refs": ["B"], + }, + "cs-uk": ["wmt22-news-systems-1.1/xml/wmttest2022.cs-uk.all.xml"], + "de-en": ["wmt22-news-systems-1.1/xml/wmttest2022.de-en.all.xml"], + "de-fr": ["wmt22-news-systems-1.1/xml/wmttest2022.de-fr.all.xml"], + "en-cs": { + "path": "wmt22-news-systems-1.1/xml/wmttest2022.en-cs.all.xml", + "refs": ["B"], + }, + "en-de": ["wmt22-news-systems-1.1/xml/wmttest2022.en-de.all.xml"], + "en-hr": ["wmt22-news-systems-1.1/xml/wmttest2022.en-hr.all.xml"], + "en-ja": ["wmt22-news-systems-1.1/xml/wmttest2022.en-ja.all.xml"], + "en-liv": ["wmt22-news-systems-1.1/xml/wmttest2022.en-liv.all.xml"], + "en-ru": ["wmt22-news-systems-1.1/xml/wmttest2022.en-ru.all.xml"], + "en-uk": ["wmt22-news-systems-1.1/xml/wmttest2022.en-uk.all.xml"], + "en-zh": ["wmt22-news-systems-1.1/xml/wmttest2022.en-zh.all.xml"], + "fr-de": ["wmt22-news-systems-1.1/xml/wmttest2022.fr-de.all.xml"], + "ja-en": ["wmt22-news-systems-1.1/xml/wmttest2022.ja-en.all.xml"], + "liv-en": { + "path": "wmt22-news-systems-1.1/xml/wmttest2022.liv-en.all.xml", + # no translator because data is English-original + "refs": [""], + }, + "ru-en": ["wmt22-news-systems-1.1/xml/wmttest2022.ru-en.all.xml"], + "ru-sah": { + "path": "wmt22-news-systems-1.1/xml/wmttest2022.ru-sah.all.xml", + # no translator because data is Yakut-original + "refs": [""], + }, + "sah-ru": ["wmt22-news-systems-1.1/xml/wmttest2022.sah-ru.all.xml"], + "uk-cs": ["wmt22-news-systems-1.1/xml/wmttest2022.uk-cs.all.xml"], + "uk-en": ["wmt22-news-systems-1.1/xml/wmttest2022.uk-en.all.xml"], + "zh-en": ["wmt22-news-systems-1.1/xml/wmttest2022.zh-en.all.xml"], + }, + # the default reference to use with this dataset + refs=["A"], + ), + "wmt21/systems": WMTXMLDataset( + "wmt21/systems", + data=["https://github.com/wmt-conference/wmt21-news-systems/archive/refs/tags/v1.3.tar.gz"], + description="WMT21 system output.", + md5=["a6aee4099da58f98f71eb3fac1694237"], + langpairs={ + "de-fr": ["wmt21-news-systems-1.3/xml/newstest2021.de-fr.all.xml"], + "en-de": ["wmt21-news-systems-1.3/xml/newstest2021.en-de.all.xml"], + "en-ha": ["wmt21-news-systems-1.3/xml/newstest2021.en-ha.all.xml"], + "en-is": ["wmt21-news-systems-1.3/xml/newstest2021.en-is.all.xml"], + "en-ja": ["wmt21-news-systems-1.3/xml/newstest2021.en-ja.all.xml"], + "fr-de": ["wmt21-news-systems-1.3/xml/newstest2021.fr-de.all.xml"], + "ha-en": ["wmt21-news-systems-1.3/xml/newstest2021.ha-en.all.xml"], + "is-en": ["wmt21-news-systems-1.3/xml/newstest2021.is-en.all.xml"], + "ja-en": ["wmt21-news-systems-1.3/xml/newstest2021.ja-en.all.xml"], + "zh-en": ["wmt21-news-systems-1.3/xml/newstest2021.zh-en.all.xml"], + "en-zh": ["wmt21-news-systems-1.3/xml/newstest2021.en-zh.all.xml"], + "cs-en": ["wmt21-news-systems-1.3/xml/newstest2021.cs-en.all.xml"], + "de-en": ["wmt21-news-systems-1.3/xml/newstest2021.de-en.all.xml"], + "en-cs": ["wmt21-news-systems-1.3/xml/newstest2021.en-cs.all.xml"], + "en-ru": ["wmt21-news-systems-1.3/xml/newstest2021.en-ru.all.xml"], + "ru-en": ["wmt21-news-systems-1.3/xml/newstest2021.ru-en.all.xml"], + "bn-hi": ["wmt21-news-systems-1.3/xml/florestest2021.bn-hi.all.xml"], + "hi-bn": ["wmt21-news-systems-1.3/xml/florestest2021.hi-bn.all.xml"], + "xh-zu": ["wmt21-news-systems-1.3/xml/florestest2021.xh-zu.all.xml"], + "zu-xh": ["wmt21-news-systems-1.3/xml/florestest2021.zu-xh.all.xml"], + }, + # the reference to use with this dataset + refs=["A"], + ), + "wmt21": WMTXMLDataset( + "wmt21", + data=["https://data.statmt.org/wmt21/translation-task/test.tgz"], + description="Official evaluation data for WMT21.", + md5=["32e7ab995bc318414375d60f0269af92"], + langpairs={ + "de-fr": ["test/newstest2021.de-fr.xml"], + "en-de": ["test/newstest2021.en-de.xml"], + "en-ha": ["test/newstest2021.en-ha.xml"], + "en-is": ["test/newstest2021.en-is.xml"], + "en-ja": ["test/newstest2021.en-ja.xml"], + "fr-de": ["test/newstest2021.fr-de.xml"], + "ha-en": ["test/newstest2021.ha-en.xml"], + "is-en": ["test/newstest2021.is-en.xml"], + "ja-en": ["test/newstest2021.ja-en.xml"], + "zh-en": ["test/newstest2021.zh-en.xml"], + "en-zh": ["test/newstest2021.en-zh.xml"], + "cs-en": ["test/newstest2021.cs-en.xml"], + "de-en": ["test/newstest2021.de-en.xml"], + "en-cs": ["test/newstest2021.en-cs.xml"], + "en-ru": ["test/newstest2021.en-ru.xml"], + "ru-en": ["test/newstest2021.ru-en.xml"], + "bn-hi": ["test/florestest2021.bn-hi.xml"], + "hi-bn": ["test/florestest2021.hi-bn.xml"], + "xh-zu": ["test/florestest2021.xh-zu.xml"], + "zu-xh": ["test/florestest2021.zu-xh.xml"], + }, + # the reference to use with this dataset + refs=["A"], + ), + "wmt21/B": WMTXMLDataset( + "wmt21/B", + data=["https://data.statmt.org/wmt21/translation-task/test.tgz"], + description="Official evaluation data for WMT21 with reference B.", + md5=["32e7ab995bc318414375d60f0269af92"], + langpairs={ + "cs-en": ["test/newstest2021.cs-en.xml"], + "de-en": ["test/newstest2021.de-en.xml"], + "en-cs": ["test/newstest2021.en-cs.xml"], + "en-ru": ["test/newstest2021.en-ru.xml"], + "en-zh": ["test/newstest2021.en-zh.xml"], + "ru-en": ["test/newstest2021.ru-en.xml"], + }, + # the reference to use with this dataset + refs=["B"], + ), + "wmt21/AB": WMTXMLDataset( + "wmt21/AB", + data=["https://data.statmt.org/wmt21/translation-task/test.tgz"], + description="Official evaluation data for WMT21 with references A and B.", + md5=["32e7ab995bc318414375d60f0269af92"], + langpairs={ + "cs-en": ["test/newstest2021.cs-en.xml"], + "de-en": ["test/newstest2021.de-en.xml"], + "en-de": ["test/newstest2021.en-de.xml"], + "en-cs": ["test/newstest2021.en-cs.xml"], + "en-ru": ["test/newstest2021.en-ru.xml"], + "en-zh": ["test/newstest2021.en-zh.xml"], + "ru-en": ["test/newstest2021.ru-en.xml"], + }, + # the reference to use with this dataset + refs=["A", "B"], + ), + "wmt21/C": WMTXMLDataset( + "wmt21/C", + data=["https://data.statmt.org/wmt21/translation-task/test.tgz"], + description="Official evaluation data for WMT21 with reference C", + md5=["32e7ab995bc318414375d60f0269af92"], + langpairs={ + "en-de": ["test/newstest2021.en-de.xml"], + }, + # the reference to use with this dataset + refs=["C"], + ), + "wmt21/AC": WMTXMLDataset( + "wmt21/AC", + data=["https://data.statmt.org/wmt21/translation-task/test.tgz"], + description="Official evaluation data for WMT21 with references A and C", + md5=["32e7ab995bc318414375d60f0269af92"], + langpairs={ + "en-de": ["test/newstest2021.en-de.xml"], + }, + # the reference to use with this dataset + refs=["A", "C"], + ), + "wmt21/D": WMTXMLDataset( + "wmt21/D", + data=["https://data.statmt.org/wmt21/translation-task/test.tgz"], + description="Official evaluation data for WMT21 with reference D", + md5=["32e7ab995bc318414375d60f0269af92"], + langpairs={ + "en-de": ["test/newstest2021.en-de.xml"], + }, + # the reference to use with this dataset + refs=["D"], + ), + "wmt21/dev": WMTXMLDataset( + "wmt21/dev", + data=["https://data.statmt.org/wmt21/translation-task/dev.tgz"], + description="Development data for WMT21,if multiple references are available, the first one is used.", + md5=["165da59ac8dfb5b7cafd7e90b1cac672"], + langpairs={ + "en-ha": ["dev/xml/newsdev2021.en-ha.xml"], + "ha-en": ["dev/xml/newsdev2021.ha-en.xml"], + "en-is": ["dev/xml/newsdev2021.en-is.xml"], + "is-en": ["dev/xml/newsdev2021.is-en.xml"], + }, + # datasets are bidirectional in origin, so use both refs + refs=["A", ""], + ), + "wmt20/tworefs": FakeSGMLDataset( + "wmt20/tworefs", + data=["https://data.statmt.org/wmt20/translation-task/test.tgz"], + description="WMT20 news test sets with two references", + md5=["3b1f777cfd2fb15ccf66e9bfdb2b1699"], + langpairs={ + "de-en": [ + "sgm/newstest2020-deen-src.de.sgm", + "sgm/newstest2020-deen-ref.en.sgm", + "sgm/newstestB2020-deen-ref.en.sgm", + ], + "en-de": [ + "sgm/newstest2020-ende-src.en.sgm", + "sgm/newstest2020-ende-ref.de.sgm", + "sgm/newstestB2020-ende-ref.de.sgm", + ], + "en-zh": [ + "sgm/newstest2020-enzh-src.en.sgm", + "sgm/newstest2020-enzh-ref.zh.sgm", + "sgm/newstestB2020-enzh-ref.zh.sgm", + ], + "ru-en": [ + "sgm/newstest2020-ruen-src.ru.sgm", + "sgm/newstest2020-ruen-ref.en.sgm", + "sgm/newstestB2020-ruen-ref.en.sgm", + ], + "zh-en": [ + "sgm/newstest2020-zhen-src.zh.sgm", + "sgm/newstest2020-zhen-ref.en.sgm", + "sgm/newstestB2020-zhen-ref.en.sgm", + ], + }, + ), + "wmt20": FakeSGMLDataset( + "wmt20", + data=["https://data.statmt.org/wmt20/translation-task/test.tgz"], + description="Official evaluation data for WMT20", + md5=["3b1f777cfd2fb15ccf66e9bfdb2b1699"], + langpairs={ + "cs-en": [ + "sgm/newstest2020-csen-src.cs.sgm", + "sgm/newstest2020-csen-ref.en.sgm", + ], + "de-en": [ + "sgm/newstest2020-deen-src.de.sgm", + "sgm/newstest2020-deen-ref.en.sgm", + ], + "de-fr": [ + "sgm/newstest2020-defr-src.de.sgm", + "sgm/newstest2020-defr-ref.fr.sgm", + ], + "en-cs": [ + "sgm/newstest2020-encs-src.en.sgm", + "sgm/newstest2020-encs-ref.cs.sgm", + ], + "en-de": [ + "sgm/newstest2020-ende-src.en.sgm", + "sgm/newstest2020-ende-ref.de.sgm", + ], + "en-iu": [ + "sgm/newstest2020-eniu-src.en.sgm", + "sgm/newstest2020-eniu-ref.iu.sgm", + ], + "en-ja": [ + "sgm/newstest2020-enja-src.en.sgm", + "sgm/newstest2020-enja-ref.ja.sgm", + ], + "en-km": [ + "sgm/newstest2020-enkm-src.en.sgm", + "sgm/newstest2020-enkm-ref.km.sgm", + ], + "en-pl": [ + "sgm/newstest2020-enpl-src.en.sgm", + "sgm/newstest2020-enpl-ref.pl.sgm", + ], + "en-ps": [ + "sgm/newstest2020-enps-src.en.sgm", + "sgm/newstest2020-enps-ref.ps.sgm", + ], + "en-ru": [ + "sgm/newstest2020-enru-src.en.sgm", + "sgm/newstest2020-enru-ref.ru.sgm", + ], + "en-ta": [ + "sgm/newstest2020-enta-src.en.sgm", + "sgm/newstest2020-enta-ref.ta.sgm", + ], + "en-zh": [ + "sgm/newstest2020-enzh-src.en.sgm", + "sgm/newstest2020-enzh-ref.zh.sgm", + ], + "fr-de": [ + "sgm/newstest2020-frde-src.fr.sgm", + "sgm/newstest2020-frde-ref.de.sgm", + ], + "iu-en": [ + "sgm/newstest2020-iuen-src.iu.sgm", + "sgm/newstest2020-iuen-ref.en.sgm", + ], + "ja-en": [ + "sgm/newstest2020-jaen-src.ja.sgm", + "sgm/newstest2020-jaen-ref.en.sgm", + ], + "km-en": [ + "sgm/newstest2020-kmen-src.km.sgm", + "sgm/newstest2020-kmen-ref.en.sgm", + ], + "pl-en": [ + "sgm/newstest2020-plen-src.pl.sgm", + "sgm/newstest2020-plen-ref.en.sgm", + ], + "ps-en": [ + "sgm/newstest2020-psen-src.ps.sgm", + "sgm/newstest2020-psen-ref.en.sgm", + ], + "ru-en": [ + "sgm/newstest2020-ruen-src.ru.sgm", + "sgm/newstest2020-ruen-ref.en.sgm", + ], + "ta-en": [ + "sgm/newstest2020-taen-src.ta.sgm", + "sgm/newstest2020-taen-ref.en.sgm", + ], + "zh-en": [ + "sgm/newstest2020-zhen-src.zh.sgm", + "sgm/newstest2020-zhen-ref.en.sgm", + ], + }, + ), + "wmt20/dev": FakeSGMLDataset( + "wmt20/dev", + data=["https://data.statmt.org/wmt20/translation-task/dev.tgz"], + description="Development data for tasks new to 2020.", + md5=["037f2b37aab74febbb1b2307dc2afb54"], + langpairs={ + "iu-en": [ + "dev/newsdev2020-iuen-src.iu.sgm", + "dev/newsdev2020-iuen-ref.en.sgm", + ], + "en-iu": [ + "dev/newsdev2020-eniu-src.en.sgm", + "dev/newsdev2020-eniu-ref.iu.sgm", + ], + "ja-en": [ + "dev/newsdev2020-jaen-src.ja.sgm", + "dev/newsdev2020-jaen-ref.en.sgm", + ], + "en-ja": [ + "dev/newsdev2020-enja-src.en.sgm", + "dev/newsdev2020-enja-ref.ja.sgm", + ], + "pl-en": [ + "dev/newsdev2020-plen-src.pl.sgm", + "dev/newsdev2020-plen-ref.en.sgm", + ], + "en-pl": [ + "dev/newsdev2020-enpl-src.en.sgm", + "dev/newsdev2020-enpl-ref.pl.sgm", + ], + "ta-en": [ + "dev/newsdev2020-taen-src.ta.sgm", + "dev/newsdev2020-taen-ref.en.sgm", + ], + "en-ta": [ + "dev/newsdev2020-enta-src.en.sgm", + "dev/newsdev2020-enta-ref.ta.sgm", + ], + }, + ), + "wmt20/robust/set1": PlainTextDataset( + "wmt20/robust/set1", + data=["https://data.statmt.org/wmt20/robustness-task/robustness20-3-sets.zip"], + md5=["a12ac9ebe89b72195041518dffc4a9d5"], + description="WMT20 robustness task, set 1", + langpairs={ + "en-ja": [ + "robustness20-3-sets/robustness20-set1-enja.en", + "robustness20-3-sets/robustness20-set1-enja.ja", + ], + "en-de": [ + "robustness20-3-sets/robustness20-set1-ende.en", + "robustness20-3-sets/robustness20-set1-ende.de", + ], + }, + ), + "wmt20/robust/set2": PlainTextDataset( + "wmt20/robust/set2", + data=["https://data.statmt.org/wmt20/robustness-task/robustness20-3-sets.zip"], + md5=["a12ac9ebe89b72195041518dffc4a9d5"], + description="WMT20 robustness task, set 2", + langpairs={ + "en-ja": [ + "robustness20-3-sets/robustness20-set2-enja.en", + "robustness20-3-sets/robustness20-set2-enja.ja", + ], + "ja-en": [ + "robustness20-3-sets/robustness20-set2-jaen.ja", + "robustness20-3-sets/robustness20-set2-jaen.en", + ], + }, + ), + "wmt20/robust/set3": PlainTextDataset( + "wmt20/robust/set3", + data=["https://data.statmt.org/wmt20/robustness-task/robustness20-3-sets.zip"], + md5=["a12ac9ebe89b72195041518dffc4a9d5"], + description="WMT20 robustness task, set 3", + langpairs={ + "de-en": [ + "robustness20-3-sets/robustness20-set3-deen.de", + "robustness20-3-sets/robustness20-set3-deen.en", + ], + }, + ), + "wmt19": FakeSGMLDataset( + "wmt19", + data=["https://data.statmt.org/wmt19/translation-task/test.tgz"], + description="Official evaluation data.", + md5=["84de7162d158e28403103b01aeefc39a"], + citation=r"""@proceedings{ws-2019-machine, + title = "Proceedings of the Fourth Conference on Machine Translation (Volume 1: Research Papers)", + editor = "Bojar, Ond{\v{r}}ej and + Chatterjee, Rajen and + Federmann, Christian and + Fishel, Mark and + Graham, Yvette and + Haddow, Barry and + Huck, Matthias and + Yepes, Antonio Jimeno and + Koehn, Philipp and + Martins, Andr{\'e} and + Monz, Christof and + Negri, Matteo and + N{\'e}v{\'e}ol, Aur{\'e}lie and + Neves, Mariana and + Post, Matt and + Turchi, Marco and + Verspoor, Karin", + month = aug, + year = "2019", + address = "Florence, Italy", + publisher = "Association for Computational Linguistics", + url = "https://www.aclweb.org/anthology/W19-5200", +}""", + langpairs={ + "cs-de": [ + "sgm/newstest2019-csde-src.cs.sgm", + "sgm/newstest2019-csde-ref.de.sgm", + ], + "de-cs": [ + "sgm/newstest2019-decs-src.de.sgm", + "sgm/newstest2019-decs-ref.cs.sgm", + ], + "de-en": [ + "sgm/newstest2019-deen-src.de.sgm", + "sgm/newstest2019-deen-ref.en.sgm", + ], + "de-fr": [ + "sgm/newstest2019-defr-src.de.sgm", + "sgm/newstest2019-defr-ref.fr.sgm", + ], + "en-cs": [ + "sgm/newstest2019-encs-src.en.sgm", + "sgm/newstest2019-encs-ref.cs.sgm", + ], + "en-de": [ + "sgm/newstest2019-ende-src.en.sgm", + "sgm/newstest2019-ende-ref.de.sgm", + ], + "en-fi": [ + "sgm/newstest2019-enfi-src.en.sgm", + "sgm/newstest2019-enfi-ref.fi.sgm", + ], + "en-gu": [ + "sgm/newstest2019-engu-src.en.sgm", + "sgm/newstest2019-engu-ref.gu.sgm", + ], + "en-kk": [ + "sgm/newstest2019-enkk-src.en.sgm", + "sgm/newstest2019-enkk-ref.kk.sgm", + ], + "en-lt": [ + "sgm/newstest2019-enlt-src.en.sgm", + "sgm/newstest2019-enlt-ref.lt.sgm", + ], + "en-ru": [ + "sgm/newstest2019-enru-src.en.sgm", + "sgm/newstest2019-enru-ref.ru.sgm", + ], + "en-zh": [ + "sgm/newstest2019-enzh-src.en.sgm", + "sgm/newstest2019-enzh-ref.zh.sgm", + ], + "fi-en": [ + "sgm/newstest2019-fien-src.fi.sgm", + "sgm/newstest2019-fien-ref.en.sgm", + ], + "fr-de": [ + "sgm/newstest2019-frde-src.fr.sgm", + "sgm/newstest2019-frde-ref.de.sgm", + ], + "gu-en": [ + "sgm/newstest2019-guen-src.gu.sgm", + "sgm/newstest2019-guen-ref.en.sgm", + ], + "kk-en": [ + "sgm/newstest2019-kken-src.kk.sgm", + "sgm/newstest2019-kken-ref.en.sgm", + ], + "lt-en": [ + "sgm/newstest2019-lten-src.lt.sgm", + "sgm/newstest2019-lten-ref.en.sgm", + ], + "ru-en": [ + "sgm/newstest2019-ruen-src.ru.sgm", + "sgm/newstest2019-ruen-ref.en.sgm", + ], + "zh-en": [ + "sgm/newstest2019-zhen-src.zh.sgm", + "sgm/newstest2019-zhen-ref.en.sgm", + ], + }, + ), + "wmt19/dev": FakeSGMLDataset( + "wmt19/dev", + data=["https://data.statmt.org/wmt19/translation-task/dev.tgz"], + description="Development data for tasks new to 2019.", + md5=["f2ec7af5947c19e0cacb3882eb208002"], + langpairs={ + "lt-en": [ + "dev/newsdev2019-lten-src.lt.sgm", + "dev/newsdev2019-lten-ref.en.sgm", + ], + "en-lt": [ + "dev/newsdev2019-enlt-src.en.sgm", + "dev/newsdev2019-enlt-ref.lt.sgm", + ], + "gu-en": [ + "dev/newsdev2019-guen-src.gu.sgm", + "dev/newsdev2019-guen-ref.en.sgm", + ], + "en-gu": [ + "dev/newsdev2019-engu-src.en.sgm", + "dev/newsdev2019-engu-ref.gu.sgm", + ], + "kk-en": [ + "dev/newsdev2019-kken-src.kk.sgm", + "dev/newsdev2019-kken-ref.en.sgm", + ], + "en-kk": [ + "dev/newsdev2019-enkk-src.en.sgm", + "dev/newsdev2019-enkk-ref.kk.sgm", + ], + }, + ), + "wmt19/google/ar": WMTAdditionDataset( + "wmt19/google/ar", + data=[ + "https://data.statmt.org/wmt19/translation-task/test.tgz", + "https://raw.githubusercontent.com/google/wmt19-paraphrased-references/master/wmt19/ende/wmt19-ende-ar.ref", + ], + description="Additional high-quality reference for WMT19/en-de.", + md5=["84de7162d158e28403103b01aeefc39a", "d66d9e91548ced0ac476f2390e32e2de"], + citation="@misc{freitag2020bleu,\n title={{BLEU} might be Guilty but References are not Innocent},\n author={Markus Freitag and David Grangier and Isaac Caswell},\n year={2020},\n eprint={2004.06063},\n archivePrefix={arXiv},\n primaryClass={cs.CL}", + langpairs={ + "en-de": ["sgm/newstest2019-ende-src.en.sgm", "wmt19_google_ar.wmt19-ende-ar.ref"], + }, + ), + "wmt19/google/arp": WMTAdditionDataset( + "wmt19/google/arp", + data=[ + "https://data.statmt.org/wmt19/translation-task/test.tgz", + "https://raw.githubusercontent.com/google/wmt19-paraphrased-references/master/wmt19/ende/wmt19-ende-arp.ref", + ], + description="Additional paraphrase of wmt19/google/ar.", + md5=["84de7162d158e28403103b01aeefc39a", "c70ea808cf2bff621ad7a8fddd4deca9"], + citation="@misc{freitag2020bleu,\n title={{BLEU} might be Guilty but References are not Innocent},\n author={Markus Freitag and David Grangier and Isaac Caswell},\n year={2020},\n eprint={2004.06063},\n archivePrefix={arXiv},\n primaryClass={cs.CL}", + langpairs={ + "en-de": ["sgm/newstest2019-ende-src.en.sgm", "wmt19_google_arp.wmt19-ende-arp.ref"], + }, + ), + "wmt19/google/wmtp": WMTAdditionDataset( + "wmt19/google/wmtp", + data=[ + "https://data.statmt.org/wmt19/translation-task/test.tgz", + "https://raw.githubusercontent.com/google/wmt19-paraphrased-references/master/wmt19/ende/wmt19-ende-wmtp.ref", + ], + description="Additional paraphrase of the official WMT19 reference.", + md5=["84de7162d158e28403103b01aeefc39a", "587c660ee5fd44727f0db025b71c6a82"], + citation="@misc{freitag2020bleu,\n title={{BLEU} might be Guilty but References are not Innocent},\n author={Markus Freitag and David Grangier and Isaac Caswell},\n year={2020},\n eprint={2004.06063},\n archivePrefix={arXiv},\n primaryClass={cs.CL}", + langpairs={ + "en-de": ["sgm/newstest2019-ende-src.en.sgm", "wmt19_google_wmtp.wmt19-ende-wmtp.ref"], + }, + ), + "wmt19/google/hqr": WMTAdditionDataset( + "wmt19/google/hqr", + data=[ + "https://data.statmt.org/wmt19/translation-task/test.tgz", + "https://raw.githubusercontent.com/google/wmt19-paraphrased-references/master/wmt19/ende/wmt19-ende-hqr.ref", + ], + description="Best human selected-reference between wmt19 and wmt19/google/ar.", + md5=["84de7162d158e28403103b01aeefc39a", "d9221135f62d7152de041f5bfc8efaea"], + citation="@misc{freitag2020bleu,\n title={{BLEU} might be Guilty but References are not Innocent},\n author={Markus Freitag and David Grangier and Isaac Caswell},\n year={2020},\n eprint={2004.06063},\n archivePrefix={arXiv},\n primaryClass={cs.CL}", + langpairs={ + "en-de": ["sgm/newstest2019-ende-src.en.sgm", "wmt19_google_hqr.wmt19-ende-hqr.ref"], + }, + ), + "wmt19/google/hqp": WMTAdditionDataset( + "wmt19/google/hqp", + data=[ + "https://data.statmt.org/wmt19/translation-task/test.tgz", + "https://raw.githubusercontent.com/google/wmt19-paraphrased-references/master/wmt19/ende/wmt19-ende-hqp.ref", + ], + description="Best human-selected reference between wmt19/google/arp and wmt19/google/wmtp.", + md5=["84de7162d158e28403103b01aeefc39a", "b7c3a07a59c8eccea5367e9ec5417a8a"], + citation="@misc{freitag2020bleu,\n title={{BLEU} might be Guilty but References are not Innocent},\n author={Markus Freitag and David Grangier and Isaac Caswell},\n year={2020},\n eprint={2004.06063},\n archivePrefix={arXiv},\n primaryClass={cs.CL}", + langpairs={ + "en-de": ["sgm/newstest2019-ende-src.en.sgm", "wmt19_google_hqp.wmt19-ende-hqp.ref"], + }, + ), + "wmt19/google/hqall": WMTAdditionDataset( + "wmt19/google/hqall", + data=[ + "https://data.statmt.org/wmt19/translation-task/test.tgz", + "https://raw.githubusercontent.com/google/wmt19-paraphrased-references/master/wmt19/ende/wmt19-ende-hqall.ref", + ], + description="Best human-selected reference among original official reference and the Google reference and paraphrases.", + md5=["84de7162d158e28403103b01aeefc39a", "edecf10ced59e10b703a6fbcf1fa9dfa"], + citation="@misc{freitag2020bleu,\n title={{BLEU} might be Guilty but References are not Innocent},\n author={Markus Freitag and David Grangier and Isaac Caswell},\n year={2020},\n eprint={2004.06063},\n archivePrefix={arXiv},\n primaryClass={cs.CL}", + langpairs={ + "en-de": ["sgm/newstest2019-ende-src.en.sgm", "wmt19_google_hqall.wmt19-ende-hqall.ref"], + }, + ), + "wmt18": FakeSGMLDataset( + "wmt18", + data=["https://data.statmt.org/wmt18/translation-task/test.tgz"], + md5=["f996c245ecffea23d0006fa4c34e9064"], + description="Official evaluation data.", + citation='@inproceedings{bojar-etal-2018-findings,\n title = "Findings of the 2018 Conference on Machine Translation ({WMT}18)",\n author = "Bojar, Ond{\v{r}}ej and\n Federmann, Christian and\n Fishel, Mark and\n Graham, Yvette and\n Haddow, Barry and\n Koehn, Philipp and\n Monz, Christof",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Shared Task Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6401",\n pages = "272--303",\n}', + langpairs={ + "cs-en": [ + "test/newstest2018-csen-src.cs.sgm", + "test/newstest2018-csen-ref.en.sgm", + ], + "de-en": [ + "test/newstest2018-deen-src.de.sgm", + "test/newstest2018-deen-ref.en.sgm", + ], + "en-cs": [ + "test/newstest2018-encs-src.en.sgm", + "test/newstest2018-encs-ref.cs.sgm", + ], + "en-de": [ + "test/newstest2018-ende-src.en.sgm", + "test/newstest2018-ende-ref.de.sgm", + ], + "en-et": [ + "test/newstest2018-enet-src.en.sgm", + "test/newstest2018-enet-ref.et.sgm", + ], + "en-fi": [ + "test/newstest2018-enfi-src.en.sgm", + "test/newstest2018-enfi-ref.fi.sgm", + ], + "en-ru": [ + "test/newstest2018-enru-src.en.sgm", + "test/newstest2018-enru-ref.ru.sgm", + ], + "et-en": [ + "test/newstest2018-eten-src.et.sgm", + "test/newstest2018-eten-ref.en.sgm", + ], + "fi-en": [ + "test/newstest2018-fien-src.fi.sgm", + "test/newstest2018-fien-ref.en.sgm", + ], + "ru-en": [ + "test/newstest2018-ruen-src.ru.sgm", + "test/newstest2018-ruen-ref.en.sgm", + ], + "en-tr": [ + "test/newstest2018-entr-src.en.sgm", + "test/newstest2018-entr-ref.tr.sgm", + ], + "tr-en": [ + "test/newstest2018-tren-src.tr.sgm", + "test/newstest2018-tren-ref.en.sgm", + ], + "en-zh": [ + "test/newstest2018-enzh-src.en.sgm", + "test/newstest2018-enzh-ref.zh.sgm", + ], + "zh-en": [ + "test/newstest2018-zhen-src.zh.sgm", + "test/newstest2018-zhen-ref.en.sgm", + ], + }, + ), + "wmt18/test-ts": FakeSGMLDataset( + "wmt18/test-ts", + data=["https://data.statmt.org/wmt18/translation-task/test-ts.tgz"], + md5=["5c621a34d512cc2dd74162ae7d00b320"], + description="Official evaluation sources with extra test sets interleaved.", + langpairs={ + "cs-en": ["test-ts/newstest2018-csen-src-ts.cs.sgm", "test-ts/newstest2018-csen-ref-ts.en.sgm"], + "de-en": ["test-ts/newstest2018-deen-src-ts.de.sgm", "test-ts/newstest2018-deen-ref-ts.en.sgm"], + "en-cs": ["test-ts/newstest2018-encs-src-ts.en.sgm", "test-ts/newstest2018-encs-ref-ts.cs.sgm"], + "en-de": ["test-ts/newstest2018-ende-src-ts.en.sgm", "test-ts/newstest2018-ende-ref-ts.de.sgm"], + "en-et": ["test-ts/newstest2018-enet-src-ts.en.sgm", "test-ts/newstest2018-enet-ref-ts.et.sgm"], + "en-fi": ["test-ts/newstest2018-enfi-src-ts.en.sgm", "test-ts/newstest2018-enfi-ref-ts.fi.sgm"], + "en-ru": ["test-ts/newstest2018-enru-src-ts.en.sgm", "test-ts/newstest2018-enru-ref-ts.ru.sgm"], + "et-en": ["test-ts/newstest2018-eten-src-ts.et.sgm", "test-ts/newstest2018-eten-ref-ts.en.sgm"], + "fi-en": ["test-ts/newstest2018-fien-src-ts.fi.sgm", "test-ts/newstest2018-fien-ref-ts.en.sgm"], + "ru-en": ["test-ts/newstest2018-ruen-src-ts.ru.sgm", "test-ts/newstest2018-ruen-ref-ts.en.sgm"], + "en-tr": ["test-ts/newstest2018-entr-src-ts.en.sgm", "test-ts/newstest2018-entr-ref-ts.tr.sgm"], + "tr-en": ["test-ts/newstest2018-tren-src-ts.tr.sgm", "test-ts/newstest2018-tren-ref-ts.en.sgm"], + "en-zh": ["test-ts/newstest2018-enzh-src-ts.en.sgm", "test-ts/newstest2018-enzh-ref-ts.zh.sgm"], + "zh-en": ["test-ts/newstest2018-zhen-src-ts.zh.sgm", "test-ts/newstest2018-zhen-ref-ts.en.sgm"], + }, + ), + "wmt18/dev": FakeSGMLDataset( + "wmt18/dev", + data=["https://data.statmt.org/wmt18/translation-task/dev.tgz"], + md5=["486f391da54a7a3247f02ebd25996f24"], + description="Development data (Estonian<>English).", + langpairs={ + "et-en": [ + "dev/newsdev2018-eten-src.et.sgm", + "dev/newsdev2018-eten-ref.en.sgm", + ], + "en-et": [ + "dev/newsdev2018-enet-src.en.sgm", + "dev/newsdev2018-enet-ref.et.sgm", + ], + }, + ), + "wmt17": FakeSGMLDataset( + "wmt17", + data=["https://data.statmt.org/wmt17/translation-task/test.tgz"], + md5=["86a1724c276004aa25455ae2a04cef26"], + description="Official evaluation data.", + citation="@InProceedings{bojar-EtAl:2017:WMT1,\n author = {Bojar, Ond\\v{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huang, Shujian and Huck, Matthias and Koehn, Philipp and Liu, Qun and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Post, Matt and Rubino, Raphael and Specia, Lucia and Turchi, Marco},\n title = {Findings of the 2017 Conference on Machine Translation (WMT17)},\n booktitle = {Proceedings of the Second Conference on Machine Translation, Volume 2: Shared Task Papers},\n month = {September},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n pages = {169--214},\n url = {http://www.aclweb.org/anthology/W17-4717}\n}", + langpairs={ + "cs-en": [ + "test/newstest2017-csen-src.cs.sgm", + "test/newstest2017-csen-ref.en.sgm", + ], + "de-en": [ + "test/newstest2017-deen-src.de.sgm", + "test/newstest2017-deen-ref.en.sgm", + ], + "en-cs": [ + "test/newstest2017-encs-src.en.sgm", + "test/newstest2017-encs-ref.cs.sgm", + ], + "en-de": [ + "test/newstest2017-ende-src.en.sgm", + "test/newstest2017-ende-ref.de.sgm", + ], + "en-fi": [ + "test/newstest2017-enfi-src.en.sgm", + "test/newstest2017-enfi-ref.fi.sgm", + ], + "en-lv": [ + "test/newstest2017-enlv-src.en.sgm", + "test/newstest2017-enlv-ref.lv.sgm", + ], + "en-ru": [ + "test/newstest2017-enru-src.en.sgm", + "test/newstest2017-enru-ref.ru.sgm", + ], + "en-tr": [ + "test/newstest2017-entr-src.en.sgm", + "test/newstest2017-entr-ref.tr.sgm", + ], + "en-zh": [ + "test/newstest2017-enzh-src.en.sgm", + "test/newstest2017-enzh-ref.zh.sgm", + ], + "fi-en": [ + "test/newstest2017-fien-src.fi.sgm", + "test/newstest2017-fien-ref.en.sgm", + ], + "lv-en": [ + "test/newstest2017-lven-src.lv.sgm", + "test/newstest2017-lven-ref.en.sgm", + ], + "ru-en": [ + "test/newstest2017-ruen-src.ru.sgm", + "test/newstest2017-ruen-ref.en.sgm", + ], + "tr-en": [ + "test/newstest2017-tren-src.tr.sgm", + "test/newstest2017-tren-ref.en.sgm", + ], + "zh-en": [ + "test/newstest2017-zhen-src.zh.sgm", + "test/newstest2017-zhen-ref.en.sgm", + ], + }, + ), + "wmt17/B": FakeSGMLDataset( + "wmt17/B", + data=["https://data.statmt.org/wmt17/translation-task/test.tgz"], + md5=["86a1724c276004aa25455ae2a04cef26"], + description="Additional reference for EN-FI and FI-EN.", + langpairs={ + "en-fi": [ + "test/newstestB2017-enfi-src.en.sgm", + "test/newstestB2017-enfi-ref.fi.sgm", + ], + }, + ), + "wmt17/tworefs": FakeSGMLDataset( + "wmt17/tworefs", + data=["https://data.statmt.org/wmt17/translation-task/test.tgz"], + md5=["86a1724c276004aa25455ae2a04cef26"], + description="Systems with two references.", + langpairs={ + "en-fi": [ + "test/newstest2017-enfi-src.en.sgm", + "test/newstest2017-enfi-ref.fi.sgm", + "test/newstestB2017-enfi-ref.fi.sgm", + ], + }, + ), + "wmt17/improved": FakeSGMLDataset( + "wmt17/improved", + data=["https://data.statmt.org/wmt17/translation-task/test-update-1.tgz"], + md5=["91dbfd5af99bc6891a637a68e04dfd41"], + description="Improved zh-en and en-zh translations.", + langpairs={ + "en-zh": ["newstest2017-enzh-src.en.sgm", "newstest2017-enzh-ref.zh.sgm"], + "zh-en": ["newstest2017-zhen-src.zh.sgm", "newstest2017-zhen-ref.en.sgm"], + }, + ), + "wmt17/dev": FakeSGMLDataset( + "wmt17/dev", + data=["https://data.statmt.org/wmt17/translation-task/dev.tgz"], + md5=["9b1aa63c1cf49dccdd20b962fe313989"], + description="Development sets released for new languages in 2017.", + langpairs={ + "en-lv": [ + "dev/newsdev2017-enlv-src.en.sgm", + "dev/newsdev2017-enlv-ref.lv.sgm", + ], + "en-zh": [ + "dev/newsdev2017-enzh-src.en.sgm", + "dev/newsdev2017-enzh-ref.zh.sgm", + ], + "lv-en": [ + "dev/newsdev2017-lven-src.lv.sgm", + "dev/newsdev2017-lven-ref.en.sgm", + ], + "zh-en": [ + "dev/newsdev2017-zhen-src.zh.sgm", + "dev/newsdev2017-zhen-ref.en.sgm", + ], + }, + ), + "wmt17/ms": WMTAdditionDataset( + "wmt17/ms", + data=[ + "https://github.com/MicrosoftTranslator/Translator-HumanParityData/archive/master.zip", + "https://data.statmt.org/wmt17/translation-task/test-update-1.tgz", + ], + md5=["18fdaa7a3c84cf6ef688da1f6a5fa96f", "91dbfd5af99bc6891a637a68e04dfd41"], + description="Additional Chinese-English references from Microsoft Research.", + citation="@inproceedings{achieving-human-parity-on-automatic-chinese-to-english-news-translation,\n author = {Hassan Awadalla, Hany and Aue, Anthony and Chen, Chang and Chowdhary, Vishal and Clark, Jonathan and Federmann, Christian and Huang, Xuedong and Junczys-Dowmunt, Marcin and Lewis, Will and Li, Mu and Liu, Shujie and Liu, Tie-Yan and Luo, Renqian and Menezes, Arul and Qin, Tao and Seide, Frank and Tan, Xu and Tian, Fei and Wu, Lijun and Wu, Shuangzhi and Xia, Yingce and Zhang, Dongdong and Zhang, Zhirui and Zhou, Ming},\n title = {Achieving Human Parity on Automatic Chinese to English News Translation},\n booktitle = {},\n year = {2018},\n month = {March},\n abstract = {Machine translation has made rapid advances in recent years. Millions of people are using it today in online translation systems and mobile applications in order to communicate across language barriers. The question naturally arises whether such systems can approach or achieve parity with human translations. In this paper, we first address the problem of how to define and accurately measure human parity in translation. We then describe Microsoft’s machine translation system and measure the quality of its translations on the widely used WMT 2017 news translation task from Chinese to English. We find that our latest neural machine translation system has reached a new state-of-the-art, and that the translation quality is at human parity when compared to professional human translations. We also find that it significantly exceeds the quality of crowd-sourced non-professional translations.},\n publisher = {},\n url = {https://www.microsoft.com/en-us/research/publication/achieving-human-parity-on-automatic-chinese-to-english-news-translation/},\n address = {},\n pages = {},\n journal = {},\n volume = {},\n chapter = {},\n isbn = {},\n}", + langpairs={ + "zh-en": [ + "newstest2017-zhen-src.zh.sgm", + "newstest2017-zhen-ref.en.sgm", + "Translator-HumanParityData-master/Translator-HumanParityData/References/Translator-HumanParityData-Reference-HT.txt", + "Translator-HumanParityData-master/Translator-HumanParityData/References/Translator-HumanParityData-Reference-PE.txt", + ], + }, + ), + "wmt16": FakeSGMLDataset( + "wmt16", + data=["https://data.statmt.org/wmt16/translation-task/test.tgz"], + md5=["3d809cd0c2c86adb2c67034d15c4e446"], + description="Official evaluation data.", + citation="@InProceedings{bojar-EtAl:2016:WMT1,\n author = {Bojar, Ond\\v{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huck, Matthias and Jimeno Yepes, Antonio and Koehn, Philipp and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Neveol, Aurelie and Neves, Mariana and Popel, Martin and Post, Matt and Rubino, Raphael and Scarton, Carolina and Specia, Lucia and Turchi, Marco and Verspoor, Karin and Zampieri, Marcos},\n title = {Findings of the 2016 Conference on Machine Translation},\n booktitle = {Proceedings of the First Conference on Machine Translation},\n month = {August},\n year = {2016},\n address = {Berlin, Germany},\n publisher = {Association for Computational Linguistics},\n pages = {131--198},\n url = {http://www.aclweb.org/anthology/W/W16/W16-2301}\n}", + langpairs={ + "cs-en": [ + "test/newstest2016-csen-src.cs.sgm", + "test/newstest2016-csen-ref.en.sgm", + ], + "de-en": [ + "test/newstest2016-deen-src.de.sgm", + "test/newstest2016-deen-ref.en.sgm", + ], + "en-cs": [ + "test/newstest2016-encs-src.en.sgm", + "test/newstest2016-encs-ref.cs.sgm", + ], + "en-de": [ + "test/newstest2016-ende-src.en.sgm", + "test/newstest2016-ende-ref.de.sgm", + ], + "en-fi": [ + "test/newstest2016-enfi-src.en.sgm", + "test/newstest2016-enfi-ref.fi.sgm", + ], + "en-ro": [ + "test/newstest2016-enro-src.en.sgm", + "test/newstest2016-enro-ref.ro.sgm", + ], + "en-ru": [ + "test/newstest2016-enru-src.en.sgm", + "test/newstest2016-enru-ref.ru.sgm", + ], + "en-tr": [ + "test/newstest2016-entr-src.en.sgm", + "test/newstest2016-entr-ref.tr.sgm", + ], + "fi-en": [ + "test/newstest2016-fien-src.fi.sgm", + "test/newstest2016-fien-ref.en.sgm", + ], + "ro-en": [ + "test/newstest2016-roen-src.ro.sgm", + "test/newstest2016-roen-ref.en.sgm", + ], + "ru-en": [ + "test/newstest2016-ruen-src.ru.sgm", + "test/newstest2016-ruen-ref.en.sgm", + ], + "tr-en": [ + "test/newstest2016-tren-src.tr.sgm", + "test/newstest2016-tren-ref.en.sgm", + ], + }, + ), + "wmt16/B": FakeSGMLDataset( + "wmt16/B", + data=["https://data.statmt.org/wmt16/translation-task/test.tgz"], + md5=["3d809cd0c2c86adb2c67034d15c4e446"], + description="Additional reference for EN-FI.", + langpairs={ + "en-fi": [ + "test/newstest2016-enfi-src.en.sgm", + "test/newstestB2016-enfi-ref.fi.sgm", + ], + }, + ), + "wmt16/tworefs": FakeSGMLDataset( + "wmt16/tworefs", + data=["https://data.statmt.org/wmt16/translation-task/test.tgz"], + md5=["3d809cd0c2c86adb2c67034d15c4e446"], + description="EN-FI with two references.", + langpairs={ + "en-fi": [ + "test/newstest2016-enfi-src.en.sgm", + "test/newstest2016-enfi-ref.fi.sgm", + "test/newstestB2016-enfi-ref.fi.sgm", + ], + }, + ), + "wmt16/dev": FakeSGMLDataset( + "wmt16/dev", + data=["https://data.statmt.org/wmt16/translation-task/dev.tgz"], + md5=["4a3dc2760bb077f4308cce96b06e6af6"], + description="Development sets released for new languages in 2016.", + langpairs={ + "en-ro": [ + "dev/newsdev2016-enro-src.en.sgm", + "dev/newsdev2016-enro-ref.ro.sgm", + ], + "en-tr": [ + "dev/newsdev2016-entr-src.en.sgm", + "dev/newsdev2016-entr-ref.tr.sgm", + ], + "ro-en": [ + "dev/newsdev2016-roen-src.ro.sgm", + "dev/newsdev2016-roen-ref.en.sgm", + ], + "tr-en": [ + "dev/newsdev2016-tren-src.tr.sgm", + "dev/newsdev2016-tren-ref.en.sgm", + ], + }, + ), + "wmt15": FakeSGMLDataset( + "wmt15", + data=["https://statmt.org/wmt15/test.tgz"], + md5=["67e3beca15e69fe3d36de149da0a96df"], + description="Official evaluation data.", + citation="@InProceedings{bojar-EtAl:2015:WMT,\n author = {Bojar, Ond\\v{r}ej and Chatterjee, Rajen and Federmann, Christian and Haddow, Barry and Huck, Matthias and Hokamp, Chris and Koehn, Philipp and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Post, Matt and Scarton, Carolina and Specia, Lucia and Turchi, Marco},\n title = {Findings of the 2015 Workshop on Statistical Machine Translation},\n booktitle = {Proceedings of the Tenth Workshop on Statistical Machine Translation},\n month = {September},\n year = {2015},\n address = {Lisbon, Portugal},\n publisher = {Association for Computational Linguistics},\n pages = {1--46},\n url = {http://aclweb.org/anthology/W15-3001}\n}", + langpairs={ + "en-fr": [ + "test/newsdiscusstest2015-enfr-src.en.sgm", + "test/newsdiscusstest2015-enfr-ref.fr.sgm", + ], + "fr-en": [ + "test/newsdiscusstest2015-fren-src.fr.sgm", + "test/newsdiscusstest2015-fren-ref.en.sgm", + ], + "cs-en": [ + "test/newstest2015-csen-src.cs.sgm", + "test/newstest2015-csen-ref.en.sgm", + ], + "de-en": [ + "test/newstest2015-deen-src.de.sgm", + "test/newstest2015-deen-ref.en.sgm", + ], + "en-cs": [ + "test/newstest2015-encs-src.en.sgm", + "test/newstest2015-encs-ref.cs.sgm", + ], + "en-de": [ + "test/newstest2015-ende-src.en.sgm", + "test/newstest2015-ende-ref.de.sgm", + ], + "en-fi": [ + "test/newstest2015-enfi-src.en.sgm", + "test/newstest2015-enfi-ref.fi.sgm", + ], + "en-ru": [ + "test/newstest2015-enru-src.en.sgm", + "test/newstest2015-enru-ref.ru.sgm", + ], + "fi-en": [ + "test/newstest2015-fien-src.fi.sgm", + "test/newstest2015-fien-ref.en.sgm", + ], + "ru-en": [ + "test/newstest2015-ruen-src.ru.sgm", + "test/newstest2015-ruen-ref.en.sgm", + ], + }, + ), + "wmt14": FakeSGMLDataset( + "wmt14", + data=["https://statmt.org/wmt14/test-filtered.tgz"], + md5=["84c597844c1542e29c2aff23aaee4310"], + description="Official evaluation data.", + citation="@InProceedings{bojar-EtAl:2014:W14-33,\n author = {Bojar, Ondrej and Buck, Christian and Federmann, Christian and Haddow, Barry and Koehn, Philipp and Leveling, Johannes and Monz, Christof and Pecina, Pavel and Post, Matt and Saint-Amand, Herve and Soricut, Radu and Specia, Lucia and Tamchyna, Ale\\v{s}},\n title = {Findings of the 2014 Workshop on Statistical Machine Translation},\n booktitle = {Proceedings of the Ninth Workshop on Statistical Machine Translation},\n month = {June},\n year = {2014},\n address = {Baltimore, Maryland, USA},\n publisher = {Association for Computational Linguistics},\n pages = {12--58},\n url = {http://www.aclweb.org/anthology/W/W14/W14-3302}\n}", + langpairs={ + "cs-en": [ + "test/newstest2014-csen-src.cs.sgm", + "test/newstest2014-csen-ref.en.sgm", + ], + "en-cs": [ + "test/newstest2014-csen-src.en.sgm", + "test/newstest2014-csen-ref.cs.sgm", + ], + "de-en": [ + "test/newstest2014-deen-src.de.sgm", + "test/newstest2014-deen-ref.en.sgm", + ], + "en-de": [ + "test/newstest2014-deen-src.en.sgm", + "test/newstest2014-deen-ref.de.sgm", + ], + "en-fr": [ + "test/newstest2014-fren-src.en.sgm", + "test/newstest2014-fren-ref.fr.sgm", + ], + "fr-en": [ + "test/newstest2014-fren-src.fr.sgm", + "test/newstest2014-fren-ref.en.sgm", + ], + "en-hi": [ + "test/newstest2014-hien-src.en.sgm", + "test/newstest2014-hien-ref.hi.sgm", + ], + "hi-en": [ + "test/newstest2014-hien-src.hi.sgm", + "test/newstest2014-hien-ref.en.sgm", + ], + "en-ru": [ + "test/newstest2014-ruen-src.en.sgm", + "test/newstest2014-ruen-ref.ru.sgm", + ], + "ru-en": [ + "test/newstest2014-ruen-src.ru.sgm", + "test/newstest2014-ruen-ref.en.sgm", + ], + }, + ), + "wmt14/full": FakeSGMLDataset( + "wmt14/full", + data=["https://statmt.org/wmt14/test-full.tgz"], + md5=["a8cd784e006feb32ac6f3d9ec7eb389a"], + description="Evaluation data released after official evaluation for further research.", + langpairs={ + "cs-en": [ + "test-full/newstest2014-csen-src.cs.sgm", + "test-full/newstest2014-csen-ref.en.sgm", + ], + "en-cs": [ + "test-full/newstest2014-csen-src.en.sgm", + "test-full/newstest2014-csen-ref.cs.sgm", + ], + "de-en": [ + "test-full/newstest2014-deen-src.de.sgm", + "test-full/newstest2014-deen-ref.en.sgm", + ], + "en-de": [ + "test-full/newstest2014-deen-src.en.sgm", + "test-full/newstest2014-deen-ref.de.sgm", + ], + "en-fr": [ + "test-full/newstest2014-fren-src.en.sgm", + "test-full/newstest2014-fren-ref.fr.sgm", + ], + "fr-en": [ + "test-full/newstest2014-fren-src.fr.sgm", + "test-full/newstest2014-fren-ref.en.sgm", + ], + "en-hi": [ + "test-full/newstest2014-hien-src.en.sgm", + "test-full/newstest2014-hien-ref.hi.sgm", + ], + "hi-en": [ + "test-full/newstest2014-hien-src.hi.sgm", + "test-full/newstest2014-hien-ref.en.sgm", + ], + "en-ru": [ + "test-full/newstest2014-ruen-src.en.sgm", + "test-full/newstest2014-ruen-ref.ru.sgm", + ], + "ru-en": [ + "test-full/newstest2014-ruen-src.ru.sgm", + "test-full/newstest2014-ruen-ref.en.sgm", + ], + }, + ), + "wmt13": FakeSGMLDataset( + "wmt13", + data=["https://statmt.org/wmt13/test.tgz"], + md5=["48eca5d02f637af44e85186847141f67"], + description="Official evaluation data.", + citation="@InProceedings{bojar-EtAl:2013:WMT,\n author = {Bojar, Ond\\v{r}ej and Buck, Christian and Callison-Burch, Chris and Federmann, Christian and Haddow, Barry and Koehn, Philipp and Monz, Christof and Post, Matt and Soricut, Radu and Specia, Lucia},\n title = {Findings of the 2013 {Workshop on Statistical Machine Translation}},\n booktitle = {Proceedings of the Eighth Workshop on Statistical Machine Translation},\n month = {August},\n year = {2013},\n address = {Sofia, Bulgaria},\n publisher = {Association for Computational Linguistics},\n pages = {1--44},\n url = {http://www.aclweb.org/anthology/W13-2201}\n}", + langpairs={ + "cs-en": ["test/newstest2013-src.cs.sgm", "test/newstest2013-src.en.sgm"], + "en-cs": ["test/newstest2013-src.en.sgm", "test/newstest2013-src.cs.sgm"], + "de-en": ["test/newstest2013-src.de.sgm", "test/newstest2013-src.en.sgm"], + "en-de": ["test/newstest2013-src.en.sgm", "test/newstest2013-src.de.sgm"], + "es-en": ["test/newstest2013-src.es.sgm", "test/newstest2013-src.en.sgm"], + "en-es": ["test/newstest2013-src.en.sgm", "test/newstest2013-src.es.sgm"], + "fr-en": ["test/newstest2013-src.fr.sgm", "test/newstest2013-src.en.sgm"], + "en-fr": ["test/newstest2013-src.en.sgm", "test/newstest2013-src.fr.sgm"], + "ru-en": ["test/newstest2013-src.ru.sgm", "test/newstest2013-src.en.sgm"], + "en-ru": ["test/newstest2013-src.en.sgm", "test/newstest2013-src.ru.sgm"], + }, + ), + "wmt12": FakeSGMLDataset( + "wmt12", + data=["https://statmt.org/wmt12/test.tgz"], + md5=["608232d34ebc4ba2ff70fead45674e47"], + description="Official evaluation data.", + citation="@InProceedings{callisonburch-EtAl:2012:WMT,\n author = {Callison-Burch, Chris and Koehn, Philipp and Monz, Christof and Post, Matt and Soricut, Radu and Specia, Lucia},\n title = {Findings of the 2012 Workshop on Statistical Machine Translation},\n booktitle = {Proceedings of the Seventh Workshop on Statistical Machine Translation},\n month = {June},\n year = {2012},\n address = {Montr{'e}al, Canada},\n publisher = {Association for Computational Linguistics},\n pages = {10--51},\n url = {http://www.aclweb.org/anthology/W12-3102}\n}", + langpairs={ + "cs-en": ["test/newstest2012-src.cs.sgm", "test/newstest2012-src.en.sgm"], + "en-cs": ["test/newstest2012-src.en.sgm", "test/newstest2012-src.cs.sgm"], + "de-en": ["test/newstest2012-src.de.sgm", "test/newstest2012-src.en.sgm"], + "en-de": ["test/newstest2012-src.en.sgm", "test/newstest2012-src.de.sgm"], + "es-en": ["test/newstest2012-src.es.sgm", "test/newstest2012-src.en.sgm"], + "en-es": ["test/newstest2012-src.en.sgm", "test/newstest2012-src.es.sgm"], + "fr-en": ["test/newstest2012-src.fr.sgm", "test/newstest2012-src.en.sgm"], + "en-fr": ["test/newstest2012-src.en.sgm", "test/newstest2012-src.fr.sgm"], + }, + ), + "wmt11": FakeSGMLDataset( + "wmt11", + data=["https://statmt.org/wmt11/test.tgz"], + md5=["b0c9680adf32d394aefc2b24e3a5937e"], + description="Official evaluation data.", + citation="@InProceedings{callisonburch-EtAl:2011:WMT,\n author = {Callison-Burch, Chris and Koehn, Philipp and Monz, Christof and Zaidan, Omar},\n title = {Findings of the 2011 Workshop on Statistical Machine Translation},\n booktitle = {Proceedings of the Sixth Workshop on Statistical Machine Translation},\n month = {July},\n year = {2011},\n address = {Edinburgh, Scotland},\n publisher = {Association for Computational Linguistics},\n pages = {22--64},\n url = {http://www.aclweb.org/anthology/W11-2103}\n}", + langpairs={ + "cs-en": ["newstest2011-src.cs.sgm", "newstest2011-src.en.sgm"], + "en-cs": ["newstest2011-src.en.sgm", "newstest2011-src.cs.sgm"], + "de-en": ["newstest2011-src.de.sgm", "newstest2011-src.en.sgm"], + "en-de": ["newstest2011-src.en.sgm", "newstest2011-src.de.sgm"], + "fr-en": ["newstest2011-src.fr.sgm", "newstest2011-src.en.sgm"], + "en-fr": ["newstest2011-src.en.sgm", "newstest2011-src.fr.sgm"], + "es-en": ["newstest2011-src.es.sgm", "newstest2011-src.en.sgm"], + "en-es": ["newstest2011-src.en.sgm", "newstest2011-src.es.sgm"], + }, + ), + "wmt10": FakeSGMLDataset( + "wmt10", + data=["https://statmt.org/wmt10/test.tgz"], + md5=["491cb885a355da5a23ea66e7b3024d5c"], + description="Official evaluation data.", + citation="@InProceedings{callisonburch-EtAl:2010:WMT,\n author = {Callison-Burch, Chris and Koehn, Philipp and Monz, Christof and Peterson, Kay and Przybocki, Mark and Zaidan, Omar},\n title = {Findings of the 2010 Joint Workshop on Statistical Machine Translation and Metrics for Machine Translation},\n booktitle = {Proceedings of the Joint Fifth Workshop on Statistical Machine Translation and MetricsMATR},\n month = {July},\n year = {2010},\n address = {Uppsala, Sweden},\n publisher = {Association for Computational Linguistics},\n pages = {17--53},\n note = {Revised August 2010},\n url = {http://www.aclweb.org/anthology/W10-1703}\n}", + langpairs={ + "cs-en": ["test/newstest2010-src.cz.sgm", "test/newstest2010-src.en.sgm"], + "en-cs": ["test/newstest2010-src.en.sgm", "test/newstest2010-src.cz.sgm"], + "de-en": ["test/newstest2010-src.de.sgm", "test/newstest2010-src.en.sgm"], + "en-de": ["test/newstest2010-src.en.sgm", "test/newstest2010-src.de.sgm"], + "es-en": ["test/newstest2010-src.es.sgm", "test/newstest2010-src.en.sgm"], + "en-es": ["test/newstest2010-src.en.sgm", "test/newstest2010-src.es.sgm"], + "fr-en": ["test/newstest2010-src.fr.sgm", "test/newstest2010-src.en.sgm"], + "en-fr": ["test/newstest2010-src.en.sgm", "test/newstest2010-src.fr.sgm"], + }, + ), + "wmt09": FakeSGMLDataset( + "wmt09", + data=["https://statmt.org/wmt09/test.tgz"], + md5=["da227abfbd7b666ec175b742a0d27b37"], + description="Official evaluation data.", + citation="@InProceedings{callisonburch-EtAl:2009:WMT-09,\n author = {Callison-Burch, Chris and Koehn, Philipp and Monz, Christof and Schroeder, Josh},\n title = {Findings of the 2009 {W}orkshop on {S}tatistical {M}achine {T}ranslation},\n booktitle = {Proceedings of the Fourth Workshop on Statistical Machine Translation},\n month = {March},\n year = {2009},\n address = {Athens, Greece},\n publisher = {Association for Computational Linguistics},\n pages = {1--28},\n url = {http://www.aclweb.org/anthology/W/W09/W09-0401}\n}", + langpairs={ + "cs-en": ["test/newstest2009-src.cz.sgm", "test/newstest2009-src.en.sgm"], + "en-cs": ["test/newstest2009-src.en.sgm", "test/newstest2009-src.cz.sgm"], + "de-en": ["test/newstest2009-src.de.sgm", "test/newstest2009-src.en.sgm"], + "en-de": ["test/newstest2009-src.en.sgm", "test/newstest2009-src.de.sgm"], + "es-en": ["test/newstest2009-src.es.sgm", "test/newstest2009-src.en.sgm"], + "en-es": ["test/newstest2009-src.en.sgm", "test/newstest2009-src.es.sgm"], + "fr-en": ["test/newstest2009-src.fr.sgm", "test/newstest2009-src.en.sgm"], + "en-fr": ["test/newstest2009-src.en.sgm", "test/newstest2009-src.fr.sgm"], + "hu-en": ["test/newstest2009-src.hu.sgm", "test/newstest2009-src.en.sgm"], + "en-hu": ["test/newstest2009-src.en.sgm", "test/newstest2009-src.hu.sgm"], + "it-en": ["test/newstest2009-src.it.sgm", "test/newstest2009-src.en.sgm"], + "en-it": ["test/newstest2009-src.en.sgm", "test/newstest2009-src.it.sgm"], + }, + ), + "wmt08": FakeSGMLDataset( + "wmt08", + data=["https://statmt.org/wmt08/test.tgz"], + md5=["0582e4e894a3342044059c894e1aea3d"], + description="Official evaluation data.", + citation="@InProceedings{callisonburch-EtAl:2008:WMT,\n author = {Callison-Burch, Chris and Fordyce, Cameron and Koehn, Philipp and Monz, Christof and Schroeder, Josh},\n title = {Further Meta-Evaluation of Machine Translation},\n booktitle = {Proceedings of the Third Workshop on Statistical Machine Translation},\n month = {June},\n year = {2008},\n address = {Columbus, Ohio},\n publisher = {Association for Computational Linguistics},\n pages = {70--106},\n url = {http://www.aclweb.org/anthology/W/W08/W08-0309}\n}", + langpairs={ + "cs-en": ["test/newstest2008-src.cz.sgm", "test/newstest2008-src.en.sgm"], + "en-cs": ["test/newstest2008-src.en.sgm", "test/newstest2008-src.cz.sgm"], + "de-en": ["test/newstest2008-src.de.sgm", "test/newstest2008-src.en.sgm"], + "en-de": ["test/newstest2008-src.en.sgm", "test/newstest2008-src.de.sgm"], + "es-en": ["test/newstest2008-src.es.sgm", "test/newstest2008-src.en.sgm"], + "en-es": ["test/newstest2008-src.en.sgm", "test/newstest2008-src.es.sgm"], + "fr-en": ["test/newstest2008-src.fr.sgm", "test/newstest2008-src.en.sgm"], + "en-fr": ["test/newstest2008-src.en.sgm", "test/newstest2008-src.fr.sgm"], + "hu-en": ["test/newstest2008-src.hu.sgm", "test/newstest2008-src.en.sgm"], + "en-hu": ["test/newstest2008-src.en.sgm", "test/newstest2008-src.hu.sgm"], + }, + ), + "wmt08/nc": FakeSGMLDataset( + "wmt08/nc", + data=["https://statmt.org/wmt08/test.tgz"], + md5=["0582e4e894a3342044059c894e1aea3d"], + description="Official evaluation data (news commentary).", + langpairs={ + "cs-en": ["test/nc-test2008-src.cz.sgm", "test/nc-test2008-src.en.sgm"], + "en-cs": ["test/nc-test2008-src.en.sgm", "test/nc-test2008-src.cz.sgm"], + }, + ), + "wmt08/europarl": FakeSGMLDataset( + "wmt08/europarl", + data=["https://statmt.org/wmt08/test.tgz"], + md5=["0582e4e894a3342044059c894e1aea3d"], + description="Official evaluation data (Europarl).", + langpairs={ + "de-en": ["test/test2008-src.de.sgm", "test/test2008-src.en.sgm"], + "en-de": ["test/test2008-src.en.sgm", "test/test2008-src.de.sgm"], + "es-en": ["test/test2008-src.es.sgm", "test/test2008-src.en.sgm"], + "en-es": ["test/test2008-src.en.sgm", "test/test2008-src.es.sgm"], + "fr-en": ["test/test2008-src.fr.sgm", "test/test2008-src.en.sgm"], + "en-fr": ["test/test2008-src.en.sgm", "test/test2008-src.fr.sgm"], + }, + ), + # iwslt + "iwslt17": IWSLTXMLDataset( + "iwslt17", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/ar/en-ar.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/ar/en/ar-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/ja/en-ja.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/ja/en/ja-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/ko/en-ko.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/ko/en/ko-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/zh/en/zh-en.tgz", + ], + md5=[ + "1849bcc3b006dc0642a8843b11aa7192", + "79bf7a2ef02d226875f55fb076e7e473", + "b68e7097b179491f6c466ef41ad72b9b", + "e3f5b2a075a2da1a395c8b60bf1e9be1", + "ecdc6bc4ab4c8984e919444f3c05183a", + "4b5141d14b98706c081371e2f8afe0ca", + "d957ee79de1f33c89077d37c5a2c5b06", + "c213e8bb918ebf843543fe9fd2e33db2", + "59f6a81c707378176e9ad8bb8d811f5f", + "7e580af973bb389ec1d1378a1850742f", + "975a858783a0ebec8c57d83ddd5bd381", + "cc51d9b7fe1ff2af858c6a0dd80b8815", + ], + description="Official evaluation data for IWSLT.", + citation="@InProceedings{iwslt2017,\n author = {Cettolo, Mauro and Federico, Marcello and Bentivogli, Luisa and Niehues, Jan and Stüker, Sebastian and Sudoh, Katsuitho and Yoshino, Koichiro and Federmann, Christian},\n title = {Overview of the IWSLT 2017 Evaluation Campaign},\n booktitle = {14th International Workshop on Spoken Language Translation},\n month = {December},\n year = {2017},\n address = {Tokyo, Japan},\n pages = {2--14},\n url = {http://workshop2017.iwslt.org/downloads/iwslt2017_proceeding_v2.pdf}\n}", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2017.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2017.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2017.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2017.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2017.en-de.en.xml", + "de-en/IWSLT17.TED.tst2017.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2017.de-en.de.xml", + "en-de/IWSLT17.TED.tst2017.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2017.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2017.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2017.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2017.en-zh.en.xml", + ], + "en-ar": [ + "en-ar/IWSLT17.TED.tst2017.en-ar.en.xml", + "ar-en/IWSLT17.TED.tst2017.ar-en.ar.xml", + ], + "ar-en": [ + "ar-en/IWSLT17.TED.tst2017.ar-en.ar.xml", + "en-ar/IWSLT17.TED.tst2017.en-ar.en.xml", + ], + "en-ja": [ + "en-ja/IWSLT17.TED.tst2017.en-ja.en.xml", + "ja-en/IWSLT17.TED.tst2017.ja-en.ja.xml", + ], + "ja-en": [ + "ja-en/IWSLT17.TED.tst2017.ja-en.ja.xml", + "en-ja/IWSLT17.TED.tst2017.en-ja.en.xml", + ], + "en-ko": [ + "en-ko/IWSLT17.TED.tst2017.en-ko.en.xml", + "ko-en/IWSLT17.TED.tst2017.ko-en.ko.xml", + ], + "ko-en": [ + "ko-en/IWSLT17.TED.tst2017.ko-en.ko.xml", + "en-ko/IWSLT17.TED.tst2017.en-ko.en.xml", + ], + }, + ), + "iwslt17/tst2016": IWSLTXMLDataset( + "iwslt17/tst2016", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-ted-test/texts/zh/en/zh-en.tgz", + ], + md5=[ + "1849bcc3b006dc0642a8843b11aa7192", + "79bf7a2ef02d226875f55fb076e7e473", + "b68e7097b179491f6c466ef41ad72b9b", + "e3f5b2a075a2da1a395c8b60bf1e9be1", + "975a858783a0ebec8c57d83ddd5bd381", + "cc51d9b7fe1ff2af858c6a0dd80b8815", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2016.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2016.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2016.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2016.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2016.en-de.en.xml", + "de-en/IWSLT17.TED.tst2016.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2016.de-en.de.xml", + "en-de/IWSLT17.TED.tst2016.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2016.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2016.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2016.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2016.en-zh.en.xml", + ], + }, + ), + "iwslt17/tst2015": IWSLTXMLDataset( + "iwslt17/tst2015", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/zh/en/zh-en.tgz", + ], + md5=[ + "d8a32cfc002a4f12b17429cfa78050e6", + "ca2b94d694150d4d6c5dc64c200fa589", + "3cf07ebe305312b12f7f1a4d5f8f8377", + "19927da9de0f40348cad9c0fc61642ac", + "575b788dad6c5b9c5cee636f9ac1094a", + "1c0ae40171d52593df8a6963d3828116", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2015.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2015.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2015.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2015.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2015.en-de.en.xml", + "de-en/IWSLT17.TED.tst2015.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2015.de-en.de.xml", + "en-de/IWSLT17.TED.tst2015.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2015.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2015.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2015.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2015.en-zh.en.xml", + ], + }, + ), + "iwslt17/tst2014": IWSLTXMLDataset( + "iwslt17/tst2014", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/zh/en/zh-en.tgz", + ], + md5=[ + "d8a32cfc002a4f12b17429cfa78050e6", + "ca2b94d694150d4d6c5dc64c200fa589", + "3cf07ebe305312b12f7f1a4d5f8f8377", + "19927da9de0f40348cad9c0fc61642ac", + "575b788dad6c5b9c5cee636f9ac1094a", + "1c0ae40171d52593df8a6963d3828116", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2014.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2014.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2014.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2014.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2014.en-de.en.xml", + "de-en/IWSLT17.TED.tst2014.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2014.de-en.de.xml", + "en-de/IWSLT17.TED.tst2014.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2014.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2014.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2014.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2014.en-zh.en.xml", + ], + }, + ), + "iwslt17/tst2013": IWSLTXMLDataset( + "iwslt17/tst2013", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/zh/en/zh-en.tgz", + ], + md5=[ + "d8a32cfc002a4f12b17429cfa78050e6", + "ca2b94d694150d4d6c5dc64c200fa589", + "3cf07ebe305312b12f7f1a4d5f8f8377", + "19927da9de0f40348cad9c0fc61642ac", + "575b788dad6c5b9c5cee636f9ac1094a", + "1c0ae40171d52593df8a6963d3828116", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2013.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2013.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2013.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2013.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2013.en-de.en.xml", + "de-en/IWSLT17.TED.tst2013.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2013.de-en.de.xml", + "en-de/IWSLT17.TED.tst2013.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2013.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2013.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2013.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2013.en-zh.en.xml", + ], + }, + ), + "iwslt17/tst2012": IWSLTXMLDataset( + "iwslt17/tst2012", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/zh/en/zh-en.tgz", + ], + md5=[ + "d8a32cfc002a4f12b17429cfa78050e6", + "ca2b94d694150d4d6c5dc64c200fa589", + "3cf07ebe305312b12f7f1a4d5f8f8377", + "19927da9de0f40348cad9c0fc61642ac", + "575b788dad6c5b9c5cee636f9ac1094a", + "1c0ae40171d52593df8a6963d3828116", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2012.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2012.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2012.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2012.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2012.en-de.en.xml", + "de-en/IWSLT17.TED.tst2012.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2012.de-en.de.xml", + "en-de/IWSLT17.TED.tst2012.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2012.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2012.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2012.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2012.en-zh.en.xml", + ], + }, + ), + "iwslt17/tst2011": IWSLTXMLDataset( + "iwslt17/tst2011", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/zh/en/zh-en.tgz", + ], + md5=[ + "d8a32cfc002a4f12b17429cfa78050e6", + "ca2b94d694150d4d6c5dc64c200fa589", + "3cf07ebe305312b12f7f1a4d5f8f8377", + "19927da9de0f40348cad9c0fc61642ac", + "575b788dad6c5b9c5cee636f9ac1094a", + "1c0ae40171d52593df8a6963d3828116", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2011.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2011.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2011.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2011.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2011.en-de.en.xml", + "de-en/IWSLT17.TED.tst2011.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2011.de-en.de.xml", + "en-de/IWSLT17.TED.tst2011.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2011.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2011.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2011.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2011.en-zh.en.xml", + ], + }, + ), + "iwslt17/tst2010": IWSLTXMLDataset( + "iwslt17/tst2010", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/zh/en/zh-en.tgz", + ], + md5=[ + "d8a32cfc002a4f12b17429cfa78050e6", + "ca2b94d694150d4d6c5dc64c200fa589", + "3cf07ebe305312b12f7f1a4d5f8f8377", + "19927da9de0f40348cad9c0fc61642ac", + "575b788dad6c5b9c5cee636f9ac1094a", + "1c0ae40171d52593df8a6963d3828116", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.tst2010.en-fr.en.xml", + "fr-en/IWSLT17.TED.tst2010.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.tst2010.fr-en.fr.xml", + "en-fr/IWSLT17.TED.tst2010.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.tst2010.en-de.en.xml", + "de-en/IWSLT17.TED.tst2010.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.tst2010.de-en.de.xml", + "en-de/IWSLT17.TED.tst2010.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.tst2010.en-zh.en.xml", + "zh-en/IWSLT17.TED.tst2010.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.tst2010.zh-en.zh.xml", + "en-zh/IWSLT17.TED.tst2010.en-zh.en.xml", + ], + }, + ), + "iwslt17/dev2010": IWSLTXMLDataset( + "iwslt17/dev2010", + data=[ + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/de/en-de.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/de/en/de-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/fr/en-fr.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/fr/en/fr-en.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/en/zh/en-zh.tgz", + "https://raw.githubusercontent.com/hlt-mt/WIT3/master/archive/2017-01-trnted/texts/zh/en/zh-en.tgz", + ], + md5=[ + "d8a32cfc002a4f12b17429cfa78050e6", + "ca2b94d694150d4d6c5dc64c200fa589", + "3cf07ebe305312b12f7f1a4d5f8f8377", + "19927da9de0f40348cad9c0fc61642ac", + "575b788dad6c5b9c5cee636f9ac1094a", + "1c0ae40171d52593df8a6963d3828116", + ], + description="Development data for IWSLT 2017.", + langpairs={ + "en-fr": [ + "en-fr/IWSLT17.TED.dev2010.en-fr.en.xml", + "fr-en/IWSLT17.TED.dev2010.fr-en.fr.xml", + ], + "fr-en": [ + "fr-en/IWSLT17.TED.dev2010.fr-en.fr.xml", + "en-fr/IWSLT17.TED.dev2010.en-fr.en.xml", + ], + "en-de": [ + "en-de/IWSLT17.TED.dev2010.en-de.en.xml", + "de-en/IWSLT17.TED.dev2010.de-en.de.xml", + ], + "de-en": [ + "de-en/IWSLT17.TED.dev2010.de-en.de.xml", + "en-de/IWSLT17.TED.dev2010.en-de.en.xml", + ], + "en-zh": [ + "en-zh/IWSLT17.TED.dev2010.en-zh.en.xml", + "zh-en/IWSLT17.TED.dev2010.zh-en.zh.xml", + ], + "zh-en": [ + "zh-en/IWSLT17.TED.dev2010.zh-en.zh.xml", + "en-zh/IWSLT17.TED.dev2010.en-zh.en.xml", + ], + }, + ), + # mtedx + "mtedx/valid": PlainTextDataset( + "mtedx/valid", + data=[ + "https://raw.githubusercontent.com/esalesky/mtedx-eval/main/valid.tar.gz" + ], + description="mTEDx evaluation data, valid: http://openslr.org/100", + citation="@misc{salesky2021multilingual,\n title={The Multilingual TEDx Corpus for Speech Recognition and Translation}, \n author={Elizabeth Salesky and Matthew Wiesner and Jacob Bremerman and Roldano Cattoni and Matteo Negri and Marco Turchi and Douglas W. Oard and Matt Post},\n year={2021},\n eprint={2102.01757},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}", + md5=["40618171614c50e6cbb5e5bbceee0635"], + langpairs={ + "el-en": ["valid/mtedx-valid-elen.el", "valid/mtedx-valid-elen.en"], + "es-en": ["valid/mtedx-valid-esen.es", "valid/mtedx-valid-esen.en"], + "es-fr": ["valid/mtedx-valid-esfr.es", "valid/mtedx-valid-esfr.fr"], + "es-it": ["valid/mtedx-valid-esit.es", "valid/mtedx-valid-esit.it"], + "es-pt": ["valid/mtedx-valid-espt.es", "valid/mtedx-valid-espt.pt"], + "fr-en": ["valid/mtedx-valid-fren.fr", "valid/mtedx-valid-fren.en"], + "fr-es": ["valid/mtedx-valid-fres.fr", "valid/mtedx-valid-fres.es"], + "fr-pt": ["valid/mtedx-valid-frpt.fr", "valid/mtedx-valid-frpt.pt"], + "it-en": ["valid/mtedx-valid-iten.it", "valid/mtedx-valid-iten.en"], + "it-es": ["valid/mtedx-valid-ites.it", "valid/mtedx-valid-ites.es"], + "pt-en": ["valid/mtedx-valid-pten.pt", "valid/mtedx-valid-pten.en"], + "pt-es": ["valid/mtedx-valid-ptes.pt", "valid/mtedx-valid-ptes.es"], + "ru-en": ["valid/mtedx-valid-ruen.ru", "valid/mtedx-valid-ruen.en"], + }, + ), + "mtedx/test": PlainTextDataset( + "mtedx/test", + data=["https://raw.githubusercontent.com/esalesky/mtedx-eval/main/test.tar.gz"], + description="mTEDx evaluation data, test: http://openslr.org/100", + citation="@misc{salesky2021multilingual,\n title={The Multilingual TEDx Corpus for Speech Recognition and Translation}, \n author={Elizabeth Salesky and Matthew Wiesner and Jacob Bremerman and Roldano Cattoni and Matteo Negri and Marco Turchi and Douglas W. Oard and Matt Post},\n year={2021},\n eprint={2102.01757},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}", + md5=["fa4cb1548c210ec424d7d6bc9a3675a7"], + langpairs={ + "el-en": ["test/mtedx-test-elen.el", "test/mtedx-test-elen.en"], + "es-en": ["test/mtedx-test-esen.es", "test/mtedx-test-esen.en"], + "es-fr": ["test/mtedx-test-esfr.es", "test/mtedx-test-esfr.fr"], + "es-it": ["test/mtedx-test-esit.es", "test/mtedx-test-esit.it"], + "es-pt": ["test/mtedx-test-espt.es", "test/mtedx-test-espt.pt"], + "fr-en": ["test/mtedx-test-fren.fr", "test/mtedx-test-fren.en"], + "fr-es": ["test/mtedx-test-fres.fr", "test/mtedx-test-fres.es"], + "fr-pt": ["test/mtedx-test-frpt.fr", "test/mtedx-test-frpt.pt"], + "it-en": ["test/mtedx-test-iten.it", "test/mtedx-test-iten.en"], + "it-es": ["test/mtedx-test-ites.it", "test/mtedx-test-ites.es"], + "pt-en": ["test/mtedx-test-pten.pt", "test/mtedx-test-pten.en"], + "pt-es": ["test/mtedx-test-ptes.pt", "test/mtedx-test-ptes.es"], + "ru-en": ["test/mtedx-test-ruen.ru", "test/mtedx-test-ruen.en"], + }, + ), + # multi30k + "multi30k/2016": PlainTextDataset( + "multi30k/2016", + data=[ + "https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/multi30k_test_sets_d3ec2a38.tar.gz" + ], + md5=["9cf8f22d57fee2ca2af3c682dfdc525b"], + description="2016 flickr test set of Multi30k dataset", + citation='@InProceedings{elliott-etal-2016-multi30k,\n title = "{M}ulti30{K}: Multilingual {E}nglish-{G}erman Image Descriptions",\n author = "Elliott, Desmond and Frank, Stella and Sima{\'}an, Khalil and Specia, Lucia",\n booktitle = "Proceedings of the 5th Workshop on Vision and Language",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W16-3210",\n doi = "10.18653/v1/W16-3210",\n pages = "70--74",\n}', + langpairs={ + "en-fr": ["test_2016_flickr.en", "test_2016_flickr.fr"], + "en-de": ["test_2016_flickr.en", "test_2016_flickr.de"], + "en-cs": ["test_2016_flickr.en", "test_2016_flickr.cs"], + }, + ), + "multi30k/2017": PlainTextDataset( + "multi30k/2017", + data=[ + "https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/multi30k_test_sets_d3ec2a38.tar.gz" + ], + md5=["9cf8f22d57fee2ca2af3c682dfdc525b"], + description="2017 flickr test set of Multi30k dataset", + citation='@InProceedings{elliott-etal-2016-multi30k,\n title = "{M}ulti30{K}: Multilingual {E}nglish-{G}erman Image Descriptions",\n author = "Elliott, Desmond and Frank, Stella and Sima{\'}an, Khalil and Specia, Lucia",\n booktitle = "Proceedings of the 5th Workshop on Vision and Language",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W16-3210",\n doi = "10.18653/v1/W16-3210",\n pages = "70--74",\n}\n\n@InProceedings{elliott-etal-2017-findings,\n title = "Findings of the Second Shared Task on Multimodal Machine Translation and Multilingual Image Description",\n author = {Elliott, Desmond and Frank, Stella and Barrault, Lo{\\"\\i}c and Bougares, Fethi and Specia, Lucia},\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W17-4718",\n doi = "10.18653/v1/W17-4718",\n pages = "215--233",\n}\n', + langpairs={ + "en-fr": ["test_2017_flickr.en", "test_2017_flickr.fr"], + "en-de": ["test_2017_flickr.en", "test_2017_flickr.de"], + }, + ), + "multi30k/2018": PlainTextDataset( + "multi30k/2018", + data=[ + "https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/multi30k_test_sets_d3ec2a38.tar.gz", + "https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2018_flickr.cs.gz", + "https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2018_flickr.de.gz", + "https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2018_flickr.fr.gz", + ], + md5=[ + "9cf8f22d57fee2ca2af3c682dfdc525b", + "4c6b6490e58107b2e397c5e3e1690abc", + "87e00327083dd69feaa029a8f7c1a047", + "a64563e986438ed731a6713027c36bfd", + ], + description="2018 flickr test set of Multi30k dataset. See https://competitions.codalab.org/competitions/19917 for evaluation.", + citation='@InProceedings{elliott-etal-2016-multi30k,\n title = "{M}ulti30{K}: Multilingual {E}nglish-{G}erman Image Descriptions",\n author = "Elliott, Desmond and Frank, Stella and Sima{\'}an, Khalil and Specia, Lucia",\n booktitle = "Proceedings of the 5th Workshop on Vision and Language",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W16-3210",\n doi = "10.18653/v1/W16-3210",\n pages = "70--74",\n}\n\n@InProceedings{barrault-etal-2018-findings,\n title = "Findings of the Third Shared Task on Multimodal Machine Translation",\n author = {Barrault, Lo{\\"\\i}c and Bougares, Fethi and Specia, Lucia and Lala, Chiraag and Elliott, Desmond and Frank, Stella},\n booktitle = "Proceedings of the Third Conference on Machine Translation: Shared Task Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6402",\n doi = "10.18653/v1/W18-6402",\n pages = "304--323",\n}\n', + langpairs={ + "en-fr": ["test_2018_flickr.en", "multi30k_2018.test_2018_flickr.fr.gz"], + "en-de": ["test_2018_flickr.en", "multi30k_2018.test_2018_flickr.de.gz"], + "en-cs": ["test_2018_flickr.en", "multi30k_2018.test_2018_flickr.cs.gz"], + }, + ), + # mtnt + "mtnt2019": TSVDataset( + "mtnt2019", + data=["https://pmichel31415.github.io/hosting/MTNT2019.tar.gz"], + description="Test set for the WMT 19 robustness shared task", + md5=["78a672e1931f106a8549023c0e8af8f6"], + langpairs={ + "en-fr": ["2:MTNT2019/en-fr.final.tsv", "3:MTNT2019/en-fr.final.tsv"], + "fr-en": ["2:MTNT2019/fr-en.final.tsv", "3:MTNT2019/fr-en.final.tsv"], + "en-ja": ["2:MTNT2019/en-ja.final.tsv", "3:MTNT2019/en-ja.final.tsv"], + "ja-en": ["2:MTNT2019/ja-en.final.tsv", "3:MTNT2019/ja-en.final.tsv"], + }, + ), + "mtnt1.1/test": TSVDataset( + "mtnt1.1/test", + data=[ + "https://github.com/pmichel31415/mtnt/releases/download/v1.1/MTNT.1.1.tar.gz" + ], + description="Test data for the Machine Translation of Noisy Text task: http://www.cs.cmu.edu/~pmichel1/mtnt/", + citation='@InProceedings{michel2018a:mtnt,\n author = "Michel, Paul and Neubig, Graham",\n title = "MTNT: A Testbed for Machine Translation of Noisy Text",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n pages = "543--553",\n location = "Brussels, Belgium",\n url = "http://aclweb.org/anthology/D18-1050"\n}', + md5=["8ce1831ac584979ba8cdcd9d4be43e1d"], + langpairs={ + "en-fr": ["1:MTNT/test/test.en-fr.tsv", "2:MTNT/test/test.en-fr.tsv"], + "fr-en": ["1:MTNT/test/test.fr-en.tsv", "2:MTNT/test/test.fr-en.tsv"], + "en-ja": ["1:MTNT/test/test.en-ja.tsv", "2:MTNT/test/test.en-ja.tsv"], + "ja-en": ["1:MTNT/test/test.ja-en.tsv", "2:MTNT/test/test.ja-en.tsv"], + }, + ), + "mtnt1.1/valid": TSVDataset( + "mtnt1.1/valid", + data=[ + "https://github.com/pmichel31415/mtnt/releases/download/v1.1/MTNT.1.1.tar.gz" + ], + description="Validation data for the Machine Translation of Noisy Text task: http://www.cs.cmu.edu/~pmichel1/mtnt/", + citation='@InProceedings{michel2018a:mtnt,\n author = "Michel, Paul and Neubig, Graham",\n title = "MTNT: A Testbed for Machine Translation of Noisy Text",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n pages = "543--553",\n location = "Brussels, Belgium",\n url = "http://aclweb.org/anthology/D18-1050"\n}', + md5=["8ce1831ac584979ba8cdcd9d4be43e1d"], + langpairs={ + "en-fr": ["1:MTNT/valid/valid.en-fr.tsv", "2:MTNT/valid/valid.en-fr.tsv"], + "fr-en": ["1:MTNT/valid/valid.fr-en.tsv", "2:MTNT/valid/valid.fr-en.tsv"], + "en-ja": ["1:MTNT/valid/valid.en-ja.tsv", "2:MTNT/valid/valid.en-ja.tsv"], + "ja-en": ["1:MTNT/valid/valid.ja-en.tsv", "2:MTNT/valid/valid.ja-en.tsv"], + }, + ), + "mtnt1.1/train": TSVDataset( + "mtnt1.1/train", + data=[ + "https://github.com/pmichel31415/mtnt/releases/download/v1.1/MTNT.1.1.tar.gz" + ], + description="Validation data for the Machine Translation of Noisy Text task: http://www.cs.cmu.edu/~pmichel1/mtnt/", + citation='@InProceedings{michel2018a:mtnt,\n author = "Michel, Paul and Neubig, Graham",\n title = "MTNT: A Testbed for Machine Translation of Noisy Text",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n pages = "543--553",\n location = "Brussels, Belgium",\n url = "http://aclweb.org/anthology/D18-1050"\n}', + md5=["8ce1831ac584979ba8cdcd9d4be43e1d"], + langpairs={ + "en-fr": ["1:MTNT/train/train.en-fr.tsv", "2:MTNT/train/train.en-fr.tsv"], + "fr-en": ["1:MTNT/train/train.fr-en.tsv", "2:MTNT/train/train.fr-en.tsv"], + "en-ja": ["1:MTNT/train/train.en-ja.tsv", "2:MTNT/train/train.en-ja.tsv"], + "ja-en": ["1:MTNT/train/train.ja-en.tsv", "2:MTNT/train/train.ja-en.tsv"], + }, + ), +} diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py new file mode 100644 index 0000000000000000000000000000000000000000..d1f638123e8742bb22e2f671c4af5bd0e556f685 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py @@ -0,0 +1,116 @@ +import os +import re + +from ..utils import smart_open +from .base import Dataset + + +class FakeSGMLDataset(Dataset): + """ + The fake SGML format used by WMT prior to 2021. Can't be properly parsed. + Source and reference(s) in separate files. + """ + + def _convert_format(self, input_file_path, output_filep_path): + """ + Extract data from raw file and convert to raw txt format. + """ + with smart_open(input_file_path) as fin, smart_open( + output_filep_path, "wt" + ) as fout: + for line in fin: + if line.startswith("(.*).*?", "\\1", line)) + print(line, file=fout) + + def _convert_meta(self, input_file_path, field, output_filep_path): + """ + Extract metadata from document tags, projects across segments. + """ + with smart_open(input_file_path) as fin, smart_open( + output_filep_path, "wt" + ) as fout: + value = "" + for line in fin: + if line.startswith("= 2 + ), f"Each language pair in {self.name} must have at least 2 fields." + + fields = ["src"] + + if length == 2: + fields.append("ref") + else: + for i, _ in enumerate(meta[langpair][1:]): + fields.append(f"ref:{i}") + + if not self.name.startswith("wmt08"): + fields += ["docid", "genre", "origlang"] + + return fields + + +class WMTAdditionDataset(FakeSGMLDataset): + """ + Handle special case of WMT Google addition dataset. + """ + + def _convert_format(self, input_file_path, output_filep_path): + if input_file_path.endswith(".sgm"): + return super()._convert_format(input_file_path, output_filep_path) + else: + with smart_open(input_file_path) as fin: + with smart_open(output_filep_path, "wt") as fout: + for line in fin: + print(line.rstrip(), file=fout) diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/tsv.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/tsv.py new file mode 100644 index 0000000000000000000000000000000000000000..011bf20dd7e894a705d0327c44ceeb4faea5cc6b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/tsv.py @@ -0,0 +1,61 @@ +import os + +from ..utils import smart_open +from .base import Dataset + + +class TSVDataset(Dataset): + """ + The format used by the MTNT datasets. Data is in a single TSV file. + """ + + @staticmethod + def _split_index_and_filename(meta, field): + """ + Splits the index and filename from a metadata string. + + e.g. meta="3:en-de.tsv", filed=[Any value] -> (3, "en-de.tsv") + "en-de.tsv", filed="src" -> (1, "en-de.tsv") + "en-de.tsv", filed="tgt" -> (2, "en-de.tsv") + """ + arr = meta.split(":") + if len(arr) == 2: + try: + index = int(arr[0]) + except ValueError: + raise Exception(f"Invalid meta for TSVDataset: {meta}") + return index, arr[1] + + else: + index = 0 if field == "src" else 1 + return index, meta + + def process_to_text(self, langpair=None): + """Processes raw files to plain text files. + + :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed. + """ + # ensure that the dataset is downloaded + self.maybe_download() + langpairs = self._get_langpair_metadata(langpair) + + for langpair in langpairs: + fieldnames = self.fieldnames(langpair) + origin_files = [ + os.path.join(self._rawdir, path) for path in langpairs[langpair] + ] + + for field, origin_file, meta in zip( + fieldnames, origin_files, langpairs[langpair] + ): + index, origin_file = self._split_index_and_filename(meta, field) + + origin_file = os.path.join(self._rawdir, origin_file) + output_file = self._get_txt_file_path(langpair, field) + + with smart_open(origin_file) as fin: + with smart_open(output_file, "wt") as fout: + for line in fin: + # be careful with empty source or reference lines + # MTNT2019/ja-en.final.tsv:632 `'1033\t718\t\t\n'` + print(line.rstrip("\n").split("\t")[index], file=fout) diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/wmt_xml.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/wmt_xml.py new file mode 100644 index 0000000000000000000000000000000000000000..4f78bcc3ccb3447c178c1cf2e7d3aebe4e50ed1a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/wmt_xml.py @@ -0,0 +1,207 @@ +import os + +import lxml.etree as ET + +from ..utils import smart_open +from .base import Dataset + +from collections import defaultdict + + +def _get_field_by_translator(translator): + if not translator: + return "ref" + else: + return f"ref:{translator}" + +class WMTXMLDataset(Dataset): + """ + The 2021+ WMT dataset format. Everything is contained in a single file. + Can be parsed with the lxml parser. + """ + @staticmethod + def _unwrap_wmt21_or_later(raw_file): + """ + Unwraps the XML file from wmt21 or later. + This script is adapted from https://github.com/wmt-conference/wmt-format-tools + + :param raw_file: The raw xml file to unwrap. + :return: Dictionary which contains the following fields: + - `src`: The source sentences. + - `docid`: ID indicating which document the sentences belong to. + - `origlang`: The original language of the document. + - `ref:{translator}`: The references produced by each translator. + - `ref`: An alias for the references from the first translator. + """ + tree = ET.parse(raw_file) + # Find and check the documents (src, ref, hyp) + src_langs, ref_langs, translators = set(), set(), set() + for src_doc in tree.getroot().findall(".//src"): + src_langs.add(src_doc.get("lang")) + + for ref_doc in tree.getroot().findall(".//ref"): + ref_langs.add(ref_doc.get("lang")) + translator = ref_doc.get("translator") + translators.add(translator) + + assert ( + len(src_langs) == 1 + ), f"Multiple source languages found in the file: {raw_file}" + assert ( + len(ref_langs) == 1 + ), f"Found {len(ref_langs)} reference languages found in the file: {raw_file}" + + src = [] + docids = [] + orig_langs = [] + domains = [] + + refs = { _get_field_by_translator(translator): [] for translator in translators } + + systems = defaultdict(list) + + src_sent_count, doc_count = 0, 0 + for doc in tree.getroot().findall(".//doc"): + docid = doc.attrib["id"] + origlang = doc.attrib["origlang"] + # present wmt22++ + domain = doc.attrib.get("domain", None) + + # Skip the testsuite + if "testsuite" in doc.attrib: + continue + + doc_count += 1 + src_sents = { + int(seg.get("id")): seg.text for seg in doc.findall(".//src//seg") + } + + def get_sents(doc): + return { + int(seg.get("id")): seg.text if seg.text else "" + for seg in doc.findall(".//seg") + } + + ref_docs = doc.findall(".//ref") + + trans_to_ref = { + ref_doc.get("translator"): get_sents(ref_doc) for ref_doc in ref_docs + } + + hyp_docs = doc.findall(".//hyp") + hyps = { + hyp_doc.get("system"): get_sents(hyp_doc) for hyp_doc in hyp_docs + } + + for seg_id in sorted(src_sents.keys()): + # no ref translation is available for this segment + if not any([value.get(seg_id, "") for value in trans_to_ref.values()]): + continue + for translator in translators: + refs[_get_field_by_translator(translator)].append( + trans_to_ref.get(translator, {translator: {}}).get(seg_id, "") + ) + src.append(src_sents[seg_id]) + for system_name in hyps.keys(): + systems[system_name].append(hyps[system_name][seg_id]) + docids.append(docid) + orig_langs.append(origlang) + if domain is not None: + domains.append(domain) + src_sent_count += 1 + + data = {"src": src, **refs, "docid": docids, "origlang": orig_langs, **systems} + if len(domains): + data["domain"] = domains + + return data + + def _get_langpair_path(self, langpair): + """ + Returns the path for this language pair. + This is useful because in WMT22, the language-pair data structure can be a dict, + in order to allow for overriding which test set to use. + """ + langpair_data = self._get_langpair_metadata(langpair)[langpair] + rel_path = langpair_data["path"] if isinstance(langpair_data, dict) else langpair_data[0] + return os.path.join(self._rawdir, rel_path) + + def process_to_text(self, langpair=None): + """Processes raw files to plain text files. + + :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed. + """ + # ensure that the dataset is downloaded + self.maybe_download() + + for langpair in sorted(self._get_langpair_metadata(langpair).keys()): + # The data type can be a list of paths, or a dict, containing the "path" + # and an override on which labeled reference to use (key "refs") + rawfile = self._get_langpair_path(langpair) + + with smart_open(rawfile) as fin: + fields = self._unwrap_wmt21_or_later(fin) + + for fieldname in fields: + textfile = self._get_txt_file_path(langpair, fieldname) + + # skip if the file already exists + if os.path.exists(textfile) and os.path.getsize(textfile) > 0: + continue + + with smart_open(textfile, "w") as fout: + for line in fields[fieldname]: + print(self._clean(line), file=fout) + + def _get_langpair_allowed_refs(self, langpair): + """ + Returns the preferred references for this language pair. + This can be set in the language pair block (as in WMT22), and backs off to the + test-set-level default, or nothing. + + There is one exception. In the metadata, sometimes there is no translator field + listed (e.g., wmt22:liv-en). In this case, the reference is set to "", and the + field "ref" is returned. + """ + defaults = self.kwargs.get("refs", []) + langpair_data = self._get_langpair_metadata(langpair)[langpair] + if isinstance(langpair_data, dict): + allowed_refs = langpair_data.get("refs", defaults) + else: + allowed_refs = defaults + allowed_refs = [_get_field_by_translator(ref) for ref in allowed_refs] + + return allowed_refs + + def get_reference_files(self, langpair): + """ + Returns the requested reference files. + This is defined as a default at the test-set level, and can be overridden per language. + """ + # Iterate through the (label, file path) pairs, looking for permitted labels + allowed_refs = self._get_langpair_allowed_refs(langpair) + all_files = self.get_files(langpair) + all_fields = self.fieldnames(langpair) + ref_files = [ + f for f, field in zip(all_files, all_fields) if field in allowed_refs + ] + return ref_files + + def fieldnames(self, langpair): + """ + Return a list of all the field names. For most source, this is just + the source and the reference. For others, it might include the document + ID for each line, or the original language (origLang). + + get_files() should return the same number of items as this. + + :param langpair: The language pair (e.g., "de-en") + :return: a list of field names + """ + self.maybe_download() + rawfile = self._get_langpair_path(langpair) + + with smart_open(rawfile) as fin: + fields = self._unwrap_wmt21_or_later(fin) + + return list(fields.keys()) diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7010a6796eb561a913aad7a458a25f3b0c75c371 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce2c3a3570938f0a85c711350a095f228bc01b26 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77305dca95839b7282ae2908f7a8221c94706c99 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3497a0f333aab78a6f293b94ee39919af7bc07d8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/San_Luis b/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/San_Luis new file mode 100644 index 0000000000000000000000000000000000000000..0a81cbddfa2813041472b716baa91c35a8451379 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/San_Luis differ