diff --git a/env-llmeval/lib/python3.10/site-packages/aiohttp-3.9.4.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/aiohttp-3.9.4.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/aiohttp-3.9.4.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/aiohttp-3.9.4.dist-info/LICENSE.txt b/env-llmeval/lib/python3.10/site-packages/aiohttp-3.9.4.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..e497a322f2091d022983b9c5c043082ab61d1a8c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/aiohttp-3.9.4.dist-info/LICENSE.txt @@ -0,0 +1,13 @@ + Copyright aio-libs contributors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/env-llmeval/lib/python3.10/site-packages/attr/__init__.py b/env-llmeval/lib/python3.10/site-packages/attr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9226258a2d58777f1d5536c5695bbf1b4a635991 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/__init__.py @@ -0,0 +1,134 @@ +# SPDX-License-Identifier: MIT + +""" +Classes Without Boilerplate +""" + +from functools import partial +from typing import Callable + +from . import converters, exceptions, filters, setters, validators +from ._cmp import cmp_using +from ._compat import Protocol +from ._config import get_run_validators, set_run_validators +from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types +from ._make import ( + NOTHING, + Attribute, + Factory, + attrib, + attrs, + fields, + fields_dict, + make_class, + validate, +) +from ._next_gen import define, field, frozen, mutable +from ._version_info import VersionInfo + + +s = attributes = attrs +ib = attr = attrib +dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) + + +class AttrsInstance(Protocol): + pass + + +__all__ = [ + "Attribute", + "AttrsInstance", + "Factory", + "NOTHING", + "asdict", + "assoc", + "astuple", + "attr", + "attrib", + "attributes", + "attrs", + "cmp_using", + "converters", + "define", + "evolve", + "exceptions", + "field", + "fields", + "fields_dict", + "filters", + "frozen", + "get_run_validators", + "has", + "ib", + "make_class", + "mutable", + "resolve_types", + "s", + "set_run_validators", + "setters", + "validate", + "validators", +] + + +def _make_getattr(mod_name: str) -> Callable: + """ + Create a metadata proxy for packaging information that uses *mod_name* in + its warnings and errors. + """ + + def __getattr__(name: str) -> str: + dunder_to_metadata = { + "__title__": "Name", + "__copyright__": "", + "__version__": "version", + "__version_info__": "version", + "__description__": "summary", + "__uri__": "", + "__url__": "", + "__author__": "", + "__email__": "", + "__license__": "license", + } + if name not in dunder_to_metadata: + msg = f"module {mod_name} has no attribute {name}" + raise AttributeError(msg) + + import sys + import warnings + + if sys.version_info < (3, 8): + from importlib_metadata import metadata + else: + from importlib.metadata import metadata + + if name not in ("__version__", "__version_info__"): + warnings.warn( + f"Accessing {mod_name}.{name} is deprecated and will be " + "removed in a future release. Use importlib.metadata directly " + "to query for attrs's packaging metadata.", + DeprecationWarning, + stacklevel=2, + ) + + meta = metadata("attrs") + if name == "__license__": + return "MIT" + if name == "__copyright__": + return "Copyright (c) 2015 Hynek Schlawack" + if name in ("__uri__", "__url__"): + return meta["Project-URL"].split(" ", 1)[-1] + if name == "__version_info__": + return VersionInfo._from_version_string(meta["version"]) + if name == "__author__": + return meta["Author-email"].rsplit(" ", 1)[0] + if name == "__email__": + return meta["Author-email"].rsplit("<", 1)[1][:-1] + + return meta[dunder_to_metadata[name]] + + return __getattr__ + + +__getattr__ = _make_getattr(__name__) diff --git a/env-llmeval/lib/python3.10/site-packages/attr/_cmp.py b/env-llmeval/lib/python3.10/site-packages/attr/_cmp.py new file mode 100644 index 0000000000000000000000000000000000000000..a4a35e08fc9d9b078a11edc3236d7e27027cd28e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/_cmp.py @@ -0,0 +1,150 @@ +# SPDX-License-Identifier: MIT + + +import functools +import types + +from ._make import _make_ne + + +_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="} + + +def cmp_using( + eq=None, + lt=None, + le=None, + gt=None, + ge=None, + require_same_type=True, + class_name="Comparable", +): + """ + Create a class that can be passed into `attrs.field`'s ``eq``, ``order``, + and ``cmp`` arguments to customize field comparison. + + The resulting class will have a full set of ordering methods if at least + one of ``{lt, le, gt, ge}`` and ``eq`` are provided. + + :param Optional[callable] eq: `callable` used to evaluate equality of two + objects. + :param Optional[callable] lt: `callable` used to evaluate whether one + object is less than another object. + :param Optional[callable] le: `callable` used to evaluate whether one + object is less than or equal to another object. + :param Optional[callable] gt: `callable` used to evaluate whether one + object is greater than another object. + :param Optional[callable] ge: `callable` used to evaluate whether one + object is greater than or equal to another object. + + :param bool require_same_type: When `True`, equality and ordering methods + will return `NotImplemented` if objects are not of the same type. + + :param Optional[str] class_name: Name of class. Defaults to 'Comparable'. + + See `comparison` for more details. + + .. versionadded:: 21.1.0 + """ + + body = { + "__slots__": ["value"], + "__init__": _make_init(), + "_requirements": [], + "_is_comparable_to": _is_comparable_to, + } + + # Add operations. + num_order_functions = 0 + has_eq_function = False + + if eq is not None: + has_eq_function = True + body["__eq__"] = _make_operator("eq", eq) + body["__ne__"] = _make_ne() + + if lt is not None: + num_order_functions += 1 + body["__lt__"] = _make_operator("lt", lt) + + if le is not None: + num_order_functions += 1 + body["__le__"] = _make_operator("le", le) + + if gt is not None: + num_order_functions += 1 + body["__gt__"] = _make_operator("gt", gt) + + if ge is not None: + num_order_functions += 1 + body["__ge__"] = _make_operator("ge", ge) + + type_ = types.new_class( + class_name, (object,), {}, lambda ns: ns.update(body) + ) + + # Add same type requirement. + if require_same_type: + type_._requirements.append(_check_same_type) + + # Add total ordering if at least one operation was defined. + if 0 < num_order_functions < 4: + if not has_eq_function: + # functools.total_ordering requires __eq__ to be defined, + # so raise early error here to keep a nice stack. + msg = "eq must be define is order to complete ordering from lt, le, gt, ge." + raise ValueError(msg) + type_ = functools.total_ordering(type_) + + return type_ + + +def _make_init(): + """ + Create __init__ method. + """ + + def __init__(self, value): + """ + Initialize object with *value*. + """ + self.value = value + + return __init__ + + +def _make_operator(name, func): + """ + Create operator method. + """ + + def method(self, other): + if not self._is_comparable_to(other): + return NotImplemented + + result = func(self.value, other.value) + if result is NotImplemented: + return NotImplemented + + return result + + method.__name__ = f"__{name}__" + method.__doc__ = ( + f"Return a {_operation_names[name]} b. Computed by attrs." + ) + + return method + + +def _is_comparable_to(self, other): + """ + Check whether `other` is comparable to `self`. + """ + return all(func(self, other) for func in self._requirements) + + +def _check_same_type(self, other): + """ + Return True if *self* and *other* are of the same type, False otherwise. + """ + return other.value.__class__ is self.value.__class__ diff --git a/env-llmeval/lib/python3.10/site-packages/attr/_funcs.py b/env-llmeval/lib/python3.10/site-packages/attr/_funcs.py new file mode 100644 index 0000000000000000000000000000000000000000..a888991d98fdac72abb6e5ce8ac6d620a8f0e54b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/_funcs.py @@ -0,0 +1,483 @@ +# SPDX-License-Identifier: MIT + + +import copy + +from ._compat import PY_3_9_PLUS, get_generic_base +from ._make import NOTHING, _obj_setattr, fields +from .exceptions import AttrsAttributeNotFoundError + + +def asdict( + inst, + recurse=True, + filter=None, + dict_factory=dict, + retain_collection_types=False, + value_serializer=None, +): + """ + Return the *attrs* attribute values of *inst* as a dict. + + Optionally recurse into other *attrs*-decorated classes. + + :param inst: Instance of an *attrs*-decorated class. + :param bool recurse: Recurse into classes that are also + *attrs*-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the `attrs.Attribute` as the first argument and the + value as the second argument. + :param callable dict_factory: A callable to produce dictionaries from. For + example, to produce ordered dictionaries instead of normal Python + dictionaries, pass in ``collections.OrderedDict``. + :param bool retain_collection_types: Do not convert to ``list`` when + encountering an attribute whose type is ``tuple`` or ``set``. Only + meaningful if ``recurse`` is ``True``. + :param Optional[callable] value_serializer: A hook that is called for every + attribute or dict key/value. It receives the current instance, field + and value and must return the (updated) value. The hook is run *after* + the optional *filter* has been applied. + + :rtype: return type of *dict_factory* + + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + .. versionadded:: 16.0.0 *dict_factory* + .. versionadded:: 16.1.0 *retain_collection_types* + .. versionadded:: 20.3.0 *value_serializer* + .. versionadded:: 21.3.0 If a dict has a collection for a key, it is + serialized as a tuple. + """ + attrs = fields(inst.__class__) + rv = dict_factory() + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + + if value_serializer is not None: + v = value_serializer(inst, a, v) + + if recurse is True: + if has(v.__class__): + rv[a.name] = asdict( + v, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain_collection_types is True else list + items = [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in v + ] + try: + rv[a.name] = cf(items) + except TypeError: + if not issubclass(cf, tuple): + raise + # Workaround for TypeError: cf.__new__() missing 1 required + # positional argument (which appears, for a namedturle) + rv[a.name] = cf(*items) + elif isinstance(v, dict): + df = dict_factory + rv[a.name] = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in v.items() + ) + else: + rv[a.name] = v + else: + rv[a.name] = v + return rv + + +def _asdict_anything( + val, + is_key, + filter, + dict_factory, + retain_collection_types, + value_serializer, +): + """ + ``asdict`` only works on attrs instances, this works on anything. + """ + if getattr(val.__class__, "__attrs_attrs__", None) is not None: + # Attrs class. + rv = asdict( + val, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(val, (tuple, list, set, frozenset)): + if retain_collection_types is True: + cf = val.__class__ + elif is_key: + cf = tuple + else: + cf = list + + rv = cf( + [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in val + ] + ) + elif isinstance(val, dict): + df = dict_factory + rv = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in val.items() + ) + else: + rv = val + if value_serializer is not None: + rv = value_serializer(None, None, rv) + + return rv + + +def astuple( + inst, + recurse=True, + filter=None, + tuple_factory=tuple, + retain_collection_types=False, +): + """ + Return the *attrs* attribute values of *inst* as a tuple. + + Optionally recurse into other *attrs*-decorated classes. + + :param inst: Instance of an *attrs*-decorated class. + :param bool recurse: Recurse into classes that are also + *attrs*-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the `attrs.Attribute` as the first argument and the + value as the second argument. + :param callable tuple_factory: A callable to produce tuples from. For + example, to produce lists instead of tuples. + :param bool retain_collection_types: Do not convert to ``list`` + or ``dict`` when encountering an attribute which type is + ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is + ``True``. + + :rtype: return type of *tuple_factory* + + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + .. versionadded:: 16.2.0 + """ + attrs = fields(inst.__class__) + rv = [] + retain = retain_collection_types # Very long. :/ + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + if recurse is True: + if has(v.__class__): + rv.append( + astuple( + v, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain is True else list + items = [ + astuple( + j, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(j.__class__) + else j + for j in v + ] + try: + rv.append(cf(items)) + except TypeError: + if not issubclass(cf, tuple): + raise + # Workaround for TypeError: cf.__new__() missing 1 required + # positional argument (which appears, for a namedturle) + rv.append(cf(*items)) + elif isinstance(v, dict): + df = v.__class__ if retain is True else dict + rv.append( + df( + ( + astuple( + kk, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(kk.__class__) + else kk, + astuple( + vv, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(vv.__class__) + else vv, + ) + for kk, vv in v.items() + ) + ) + else: + rv.append(v) + else: + rv.append(v) + + return rv if tuple_factory is list else tuple_factory(rv) + + +def has(cls): + """ + Check whether *cls* is a class with *attrs* attributes. + + :param type cls: Class to introspect. + :raise TypeError: If *cls* is not a class. + + :rtype: bool + """ + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is not None: + return True + + # No attrs, maybe it's a specialized generic (A[str])? + generic_base = get_generic_base(cls) + if generic_base is not None: + generic_attrs = getattr(generic_base, "__attrs_attrs__", None) + if generic_attrs is not None: + # Stick it on here for speed next time. + cls.__attrs_attrs__ = generic_attrs + return generic_attrs is not None + return False + + +def assoc(inst, **changes): + """ + Copy *inst* and apply *changes*. + + This is different from `evolve` that applies the changes to the arguments + that create the new instance. + + `evolve`'s behavior is preferable, but there are `edge cases`_ where it + doesn't work. Therefore `assoc` is deprecated, but will not be removed. + + .. _`edge cases`: https://github.com/python-attrs/attrs/issues/251 + + :param inst: Instance of a class with *attrs* attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise attrs.exceptions.AttrsAttributeNotFoundError: If *attr_name* + couldn't be found on *cls*. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + .. deprecated:: 17.1.0 + Use `attrs.evolve` instead if you can. + This function will not be removed du to the slightly different approach + compared to `attrs.evolve`. + """ + new = copy.copy(inst) + attrs = fields(inst.__class__) + for k, v in changes.items(): + a = getattr(attrs, k, NOTHING) + if a is NOTHING: + msg = f"{k} is not an attrs attribute on {new.__class__}." + raise AttrsAttributeNotFoundError(msg) + _obj_setattr(new, k, v) + return new + + +def evolve(*args, **changes): + """ + Create a new instance, based on the first positional argument with + *changes* applied. + + :param inst: Instance of a class with *attrs* attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise TypeError: If *attr_name* couldn't be found in the class + ``__init__``. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + .. versionadded:: 17.1.0 + .. deprecated:: 23.1.0 + It is now deprecated to pass the instance using the keyword argument + *inst*. It will raise a warning until at least April 2024, after which + it will become an error. Always pass the instance as a positional + argument. + """ + # Try to get instance by positional argument first. + # Use changes otherwise and warn it'll break. + if args: + try: + (inst,) = args + except ValueError: + msg = f"evolve() takes 1 positional argument, but {len(args)} were given" + raise TypeError(msg) from None + else: + try: + inst = changes.pop("inst") + except KeyError: + msg = "evolve() missing 1 required positional argument: 'inst'" + raise TypeError(msg) from None + + import warnings + + warnings.warn( + "Passing the instance per keyword argument is deprecated and " + "will stop working in, or after, April 2024.", + DeprecationWarning, + stacklevel=2, + ) + + cls = inst.__class__ + attrs = fields(cls) + for a in attrs: + if not a.init: + continue + attr_name = a.name # To deal with private attributes. + init_name = a.alias + if init_name not in changes: + changes[init_name] = getattr(inst, attr_name) + + return cls(**changes) + + +def resolve_types( + cls, globalns=None, localns=None, attribs=None, include_extras=True +): + """ + Resolve any strings and forward annotations in type annotations. + + This is only required if you need concrete types in `Attribute`'s *type* + field. In other words, you don't need to resolve your types if you only + use them for static type checking. + + With no arguments, names will be looked up in the module in which the class + was created. If this is not what you want, e.g. if the name only exists + inside a method, you may pass *globalns* or *localns* to specify other + dictionaries in which to look up these names. See the docs of + `typing.get_type_hints` for more details. + + :param type cls: Class to resolve. + :param Optional[dict] globalns: Dictionary containing global variables. + :param Optional[dict] localns: Dictionary containing local variables. + :param Optional[list] attribs: List of attribs for the given class. + This is necessary when calling from inside a ``field_transformer`` + since *cls* is not an *attrs* class yet. + :param bool include_extras: Resolve more accurately, if possible. + Pass ``include_extras`` to ``typing.get_hints``, if supported by the + typing module. On supported Python versions (3.9+), this resolves the + types more accurately. + + :raise TypeError: If *cls* is not a class. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class and you didn't pass any attribs. + :raise NameError: If types cannot be resolved because of missing variables. + + :returns: *cls* so you can use this function also as a class decorator. + Please note that you have to apply it **after** `attrs.define`. That + means the decorator has to come in the line **before** `attrs.define`. + + .. versionadded:: 20.1.0 + .. versionadded:: 21.1.0 *attribs* + .. versionadded:: 23.1.0 *include_extras* + + """ + # Since calling get_type_hints is expensive we cache whether we've + # done it already. + if getattr(cls, "__attrs_types_resolved__", None) != cls: + import typing + + kwargs = {"globalns": globalns, "localns": localns} + + if PY_3_9_PLUS: + kwargs["include_extras"] = include_extras + + hints = typing.get_type_hints(cls, **kwargs) + for field in fields(cls) if attribs is None else attribs: + if field.name in hints: + # Since fields have been frozen we must work around it. + _obj_setattr(field, "type", hints[field.name]) + # We store the class we resolved so that subclasses know they haven't + # been resolved. + cls.__attrs_types_resolved__ = cls + + # Return the class so you can use it as a decorator too. + return cls diff --git a/env-llmeval/lib/python3.10/site-packages/attr/_make.py b/env-llmeval/lib/python3.10/site-packages/attr/_make.py new file mode 100644 index 0000000000000000000000000000000000000000..10b4eca779621c819060c4564379fa2c098c36d5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/_make.py @@ -0,0 +1,3119 @@ +# SPDX-License-Identifier: MIT + +import contextlib +import copy +import enum +import functools +import inspect +import itertools +import linecache +import sys +import types +import typing + +from operator import itemgetter + +# We need to import _compat itself in addition to the _compat members to avoid +# having the thread-local in the globals here. +from . import _compat, _config, setters +from ._compat import ( + PY310, + PY_3_8_PLUS, + _AnnotationExtractor, + get_generic_base, +) +from .exceptions import ( + DefaultAlreadySetError, + FrozenInstanceError, + NotAnAttrsClassError, + UnannotatedAttributeError, +) + + +# This is used at least twice, so cache it here. +_obj_setattr = object.__setattr__ +_init_converter_pat = "__attr_converter_%s" +_init_factory_pat = "__attr_factory_%s" +_classvar_prefixes = ( + "typing.ClassVar", + "t.ClassVar", + "ClassVar", + "typing_extensions.ClassVar", +) +# we don't use a double-underscore prefix because that triggers +# name mangling when trying to create a slot for the field +# (when slots=True) +_hash_cache_field = "_attrs_cached_hash" + +_empty_metadata_singleton = types.MappingProxyType({}) + +# Unique object for unequivocal getattr() defaults. +_sentinel = object() + +_ng_default_on_setattr = setters.pipe(setters.convert, setters.validate) + + +class _Nothing(enum.Enum): + """ + Sentinel to indicate the lack of a value when ``None`` is ambiguous. + + If extending attrs, you can use ``typing.Literal[NOTHING]`` to show + that a value may be ``NOTHING``. + + .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False. + .. versionchanged:: 22.2.0 ``NOTHING`` is now an ``enum.Enum`` variant. + """ + + NOTHING = enum.auto() + + def __repr__(self): + return "NOTHING" + + def __bool__(self): + return False + + +NOTHING = _Nothing.NOTHING +""" +Sentinel to indicate the lack of a value when ``None`` is ambiguous. +""" + + +class _CacheHashWrapper(int): + """ + An integer subclass that pickles / copies as None + + This is used for non-slots classes with ``cache_hash=True``, to avoid + serializing a potentially (even likely) invalid hash value. Since ``None`` + is the default value for uncalculated hashes, whenever this is copied, + the copy's value for the hash should automatically reset. + + See GH #613 for more details. + """ + + def __reduce__(self, _none_constructor=type(None), _args=()): # noqa: B008 + return _none_constructor, _args + + +def attrib( + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, + alias=None, +): + """ + Create a new attribute on a class. + + .. warning:: + + Does *not* do anything unless the class is also decorated with `attr.s` + / `attrs.define` / and so on! + + Please consider using `attrs.field` in new code (``attr.ib`` will *never* + go away, though). + + :param default: A value that is used if an *attrs*-generated ``__init__`` + is used and no value is passed while instantiating or the attribute is + excluded using ``init=False``. + + If the value is an instance of `attrs.Factory`, its callable will be + used to construct a new value (useful for mutable data types like lists + or dicts). + + If a default is not set (or set manually to `attrs.NOTHING`), a value + *must* be supplied when instantiating; otherwise a `TypeError` will be + raised. + + The default can also be set using decorator notation as shown below. + + .. seealso:: `defaults` + + :param callable factory: Syntactic sugar for + ``default=attr.Factory(factory)``. + + :param validator: `callable` that is called by *attrs*-generated + ``__init__`` methods after the instance has been initialized. They + receive the initialized instance, the :func:`~attrs.Attribute`, and the + passed value. + + The return value is *not* inspected so the validator has to throw an + exception itself. + + If a `list` is passed, its items are treated as validators and must all + pass. + + Validators can be globally disabled and re-enabled using + `attrs.validators.get_disabled` / `attrs.validators.set_disabled`. + + The validator can also be set using decorator notation as shown below. + + .. seealso:: :ref:`validators` + + :type validator: `callable` or a `list` of `callable`\\ s. + + :param repr: Include this attribute in the generated ``__repr__`` method. + If ``True``, include the attribute; if ``False``, omit it. By default, + the built-in ``repr()`` function is used. To override how the attribute + value is formatted, pass a ``callable`` that takes a single value and + returns a string. Note that the resulting string is used as-is, i.e. it + will be used directly *instead* of calling ``repr()`` (the default). + :type repr: a `bool` or a `callable` to use a custom function. + + :param eq: If ``True`` (default), include this attribute in the generated + ``__eq__`` and ``__ne__`` methods that check two instances for + equality. To override how the attribute value is compared, pass a + ``callable`` that takes a single value and returns the value to be + compared. + + .. seealso:: `comparison` + :type eq: a `bool` or a `callable`. + + :param order: If ``True`` (default), include this attributes in the + generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. To + override how the attribute value is ordered, pass a ``callable`` that + takes a single value and returns the value to be ordered. + + .. seealso:: `comparison` + :type order: a `bool` or a `callable`. + + :param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the + same value. Must not be mixed with *eq* or *order*. + + .. seealso:: `comparison` + :type cmp: a `bool` or a `callable`. + + :param bool | None hash: Include this attribute in the generated + ``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This + is the correct behavior according the Python spec. Setting this value + to anything else than ``None`` is *discouraged*. + + .. seealso:: `hashing` + :param bool init: Include this attribute in the generated ``__init__`` + method. It is possible to set this to ``False`` and set a default + value. In that case this attributed is unconditionally initialized + with the specified default value or factory. + + .. seealso:: `init` + :param callable converter: `callable` that is called by *attrs*-generated + ``__init__`` methods to convert attribute's value to the desired + format. It is given the passed-in value, and the returned value will + be used as the new value of the attribute. The value is converted + before being passed to the validator, if any. + + .. seealso:: :ref:`converters` + :param dict | None metadata: An arbitrary mapping, to be used by + third-party components. See `extending-metadata`. + + :param type: The type of the attribute. Nowadays, the preferred method to + specify the type is using a variable annotation (see :pep:`526`). This + argument is provided for backward compatibility. Regardless of the + approach used, the type will be stored on ``Attribute.type``. + + Please note that *attrs* doesn't do anything with this metadata by + itself. You can use it as part of your own code or for `static type + checking `. + :param bool kw_only: Make this attribute keyword-only in the generated + ``__init__`` (if ``init`` is ``False``, this parameter is ignored). + :param on_setattr: Allows to overwrite the *on_setattr* setting from + `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used. + Set to `attrs.setters.NO_OP` to run **no** `setattr` hooks for this + attribute -- regardless of the setting in `attr.s`. + :type on_setattr: `callable`, or a list of callables, or `None`, or + `attrs.setters.NO_OP` + :param str | None alias: Override this attribute's parameter name in the + generated ``__init__`` method. If left `None`, default to ``name`` + stripped of leading underscores. See `private-attributes`. + + .. versionadded:: 15.2.0 *convert* + .. versionadded:: 16.3.0 *metadata* + .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. + .. versionchanged:: 17.1.0 + *hash* is ``None`` and therefore mirrors *eq* by default. + .. versionadded:: 17.3.0 *type* + .. deprecated:: 17.4.0 *convert* + .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated + *convert* to achieve consistency with other noun-based arguments. + .. versionadded:: 18.1.0 + ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. + .. versionadded:: 18.2.0 *kw_only* + .. versionchanged:: 19.2.0 *convert* keyword argument removed. + .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.3.0 *kw_only* backported to Python 2 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 22.2.0 *alias* + """ + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq, order, True + ) + + if hash is not None and hash is not True and hash is not False: + msg = "Invalid value for hash. Must be True, False, or None." + raise TypeError(msg) + + if factory is not None: + if default is not NOTHING: + msg = ( + "The `default` and `factory` arguments are mutually exclusive." + ) + raise ValueError(msg) + if not callable(factory): + msg = "The `factory` argument must be a callable." + raise ValueError(msg) + default = Factory(factory) + + if metadata is None: + metadata = {} + + # Apply syntactic sugar by auto-wrapping. + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + if validator and isinstance(validator, (list, tuple)): + validator = and_(*validator) + + if converter and isinstance(converter, (list, tuple)): + converter = pipe(*converter) + + return _CountingAttr( + default=default, + validator=validator, + repr=repr, + cmp=None, + hash=hash, + init=init, + converter=converter, + metadata=metadata, + type=type, + kw_only=kw_only, + eq=eq, + eq_key=eq_key, + order=order, + order_key=order_key, + on_setattr=on_setattr, + alias=alias, + ) + + +def _compile_and_eval(script, globs, locs=None, filename=""): + """ + "Exec" the script with the given global (globs) and local (locs) variables. + """ + bytecode = compile(script, filename, "exec") + eval(bytecode, globs, locs) + + +def _make_method(name, script, filename, globs): + """ + Create the method with the script given and return the method object. + """ + locs = {} + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + count = 1 + base_filename = filename + while True: + linecache_tuple = ( + len(script), + None, + script.splitlines(True), + filename, + ) + old_val = linecache.cache.setdefault(filename, linecache_tuple) + if old_val == linecache_tuple: + break + + filename = f"{base_filename[:-1]}-{count}>" + count += 1 + + _compile_and_eval(script, globs, locs, filename) + + return locs[name] + + +def _make_attr_tuple_class(cls_name, attr_names): + """ + Create a tuple subclass to hold `Attribute`s for an `attrs` class. + + The subclass is a bare tuple with properties for names. + + class MyClassAttributes(tuple): + __slots__ = () + x = property(itemgetter(0)) + """ + attr_class_name = f"{cls_name}Attributes" + attr_class_template = [ + f"class {attr_class_name}(tuple):", + " __slots__ = ()", + ] + if attr_names: + for i, attr_name in enumerate(attr_names): + attr_class_template.append( + f" {attr_name} = _attrs_property(_attrs_itemgetter({i}))" + ) + else: + attr_class_template.append(" pass") + globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property} + _compile_and_eval("\n".join(attr_class_template), globs) + return globs[attr_class_name] + + +# Tuple class for extracted attributes from a class definition. +# `base_attrs` is a subset of `attrs`. +_Attributes = _make_attr_tuple_class( + "_Attributes", + [ + # all attributes to build dunder methods for + "attrs", + # attributes that have been inherited + "base_attrs", + # map inherited attributes to their originating classes + "base_attrs_map", + ], +) + + +def _is_class_var(annot): + """ + Check whether *annot* is a typing.ClassVar. + + The string comparison hack is used to avoid evaluating all string + annotations which would put attrs-based classes at a performance + disadvantage compared to plain old classes. + """ + annot = str(annot) + + # Annotation can be quoted. + if annot.startswith(("'", '"')) and annot.endswith(("'", '"')): + annot = annot[1:-1] + + return annot.startswith(_classvar_prefixes) + + +def _has_own_attribute(cls, attrib_name): + """ + Check whether *cls* defines *attrib_name* (and doesn't just inherit it). + """ + attr = getattr(cls, attrib_name, _sentinel) + if attr is _sentinel: + return False + + for base_cls in cls.__mro__[1:]: + a = getattr(base_cls, attrib_name, None) + if attr is a: + return False + + return True + + +def _get_annotations(cls): + """ + Get annotations for *cls*. + """ + if _has_own_attribute(cls, "__annotations__"): + return cls.__annotations__ + + return {} + + +def _collect_base_attrs(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in reversed(cls.__mro__[1:-1]): + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.inherited or a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) # noqa: PLW2901 + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + # For each name, only keep the freshest definition i.e. the furthest at the + # back. base_attr_map is fine because it gets overwritten with every new + # instance. + filtered = [] + seen = set() + for a in reversed(base_attrs): + if a.name in seen: + continue + filtered.insert(0, a) + seen.add(a.name) + + return filtered, base_attr_map + + +def _collect_base_attrs_broken(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + + N.B. *taken_attr_names* will be mutated. + + Adhere to the old incorrect behavior. + + Notably it collects from the front and considers inherited attributes which + leads to the buggy behavior reported in #428. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in cls.__mro__[1:-1]: + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) # noqa: PLW2901 + taken_attr_names.add(a.name) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + return base_attrs, base_attr_map + + +def _transform_attrs( + cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer +): + """ + Transform all `_CountingAttr`s on a class into `Attribute`s. + + If *these* is passed, use that and don't look for them on the class. + + *collect_by_mro* is True, collect them in the correct MRO order, otherwise + use the old -- incorrect -- order. See #428. + + Return an `_Attributes`. + """ + cd = cls.__dict__ + anns = _get_annotations(cls) + + if these is not None: + ca_list = list(these.items()) + elif auto_attribs is True: + ca_names = { + name + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + } + ca_list = [] + annot_names = set() + for attr_name, type in anns.items(): + if _is_class_var(type): + continue + annot_names.add(attr_name) + a = cd.get(attr_name, NOTHING) + + if not isinstance(a, _CountingAttr): + a = attrib() if a is NOTHING else attrib(default=a) + ca_list.append((attr_name, a)) + + unannotated = ca_names - annot_names + if len(unannotated) > 0: + raise UnannotatedAttributeError( + "The following `attr.ib`s lack a type annotation: " + + ", ".join( + sorted(unannotated, key=lambda n: cd.get(n).counter) + ) + + "." + ) + else: + ca_list = sorted( + ( + (name, attr) + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + ), + key=lambda e: e[1].counter, + ) + + own_attrs = [ + Attribute.from_counting_attr( + name=attr_name, ca=ca, type=anns.get(attr_name) + ) + for attr_name, ca in ca_list + ] + + if collect_by_mro: + base_attrs, base_attr_map = _collect_base_attrs( + cls, {a.name for a in own_attrs} + ) + else: + base_attrs, base_attr_map = _collect_base_attrs_broken( + cls, {a.name for a in own_attrs} + ) + + if kw_only: + own_attrs = [a.evolve(kw_only=True) for a in own_attrs] + base_attrs = [a.evolve(kw_only=True) for a in base_attrs] + + attrs = base_attrs + own_attrs + + # Mandatory vs non-mandatory attr order only matters when they are part of + # the __init__ signature and when they aren't kw_only (which are moved to + # the end and can be mandatory or non-mandatory in any order, as they will + # be specified as keyword args anyway). Check the order of those attrs: + had_default = False + for a in (a for a in attrs if a.init is not False and a.kw_only is False): + if had_default is True and a.default is NOTHING: + msg = f"No mandatory attributes allowed after an attribute with a default value or factory. Attribute in question: {a!r}" + raise ValueError(msg) + + if had_default is False and a.default is not NOTHING: + had_default = True + + if field_transformer is not None: + attrs = field_transformer(cls, attrs) + + # Resolve default field alias after executing field_transformer. + # This allows field_transformer to differentiate between explicit vs + # default aliases and supply their own defaults. + attrs = [ + a.evolve(alias=_default_init_alias_for(a.name)) if not a.alias else a + for a in attrs + ] + + # Create AttrsClass *after* applying the field_transformer since it may + # add or remove attributes! + attr_names = [a.name for a in attrs] + AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) + + return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map)) + + +def _make_cached_property_getattr( + cached_properties, + original_getattr, + cls, +): + lines = [ + # Wrapped to get `__class__` into closure cell for super() + # (It will be replaced with the newly constructed class after construction). + "def wrapper():", + " __class__ = _cls", + " def __getattr__(self, item, cached_properties=cached_properties, original_getattr=original_getattr, _cached_setattr_get=_cached_setattr_get):", + " func = cached_properties.get(item)", + " if func is not None:", + " result = func(self)", + " _setter = _cached_setattr_get(self)", + " _setter(item, result)", + " return result", + ] + if original_getattr is not None: + lines.append( + " return original_getattr(self, item)", + ) + else: + lines.extend( + [ + " if hasattr(super(), '__getattr__'):", + " return super().__getattr__(item)", + " original_error = f\"'{self.__class__.__name__}' object has no attribute '{item}'\"", + " raise AttributeError(original_error)", + ] + ) + + lines.extend( + [ + " return __getattr__", + "__getattr__ = wrapper()", + ] + ) + + unique_filename = _generate_unique_filename(cls, "getattr") + + glob = { + "cached_properties": cached_properties, + "_cached_setattr_get": _obj_setattr.__get__, + "_cls": cls, + "original_getattr": original_getattr, + } + + return _make_method( + "__getattr__", + "\n".join(lines), + unique_filename, + glob, + ) + + +def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + if isinstance(self, BaseException) and name in ( + "__cause__", + "__context__", + "__traceback__", + ): + BaseException.__setattr__(self, name, value) + return + + raise FrozenInstanceError() + + +def _frozen_delattrs(self, name): + """ + Attached to frozen classes as __delattr__. + """ + raise FrozenInstanceError() + + +class _ClassBuilder: + """ + Iteratively build *one* class. + """ + + __slots__ = ( + "_attr_names", + "_attrs", + "_base_attr_map", + "_base_names", + "_cache_hash", + "_cls", + "_cls_dict", + "_delete_attribs", + "_frozen", + "_has_pre_init", + "_pre_init_has_args", + "_has_post_init", + "_is_exc", + "_on_setattr", + "_slots", + "_weakref_slot", + "_wrote_own_setattr", + "_has_custom_setattr", + ) + + def __init__( + self, + cls, + these, + slots, + frozen, + weakref_slot, + getstate_setstate, + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_custom_setattr, + field_transformer, + ): + attrs, base_attrs, base_map = _transform_attrs( + cls, + these, + auto_attribs, + kw_only, + collect_by_mro, + field_transformer, + ) + + self._cls = cls + self._cls_dict = dict(cls.__dict__) if slots else {} + self._attrs = attrs + self._base_names = {a.name for a in base_attrs} + self._base_attr_map = base_map + self._attr_names = tuple(a.name for a in attrs) + self._slots = slots + self._frozen = frozen + self._weakref_slot = weakref_slot + self._cache_hash = cache_hash + self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False)) + self._pre_init_has_args = False + if self._has_pre_init: + # Check if the pre init method has more arguments than just `self` + # We want to pass arguments if pre init expects arguments + pre_init_func = cls.__attrs_pre_init__ + pre_init_signature = inspect.signature(pre_init_func) + self._pre_init_has_args = len(pre_init_signature.parameters) > 1 + self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) + self._delete_attribs = not bool(these) + self._is_exc = is_exc + self._on_setattr = on_setattr + + self._has_custom_setattr = has_custom_setattr + self._wrote_own_setattr = False + + self._cls_dict["__attrs_attrs__"] = self._attrs + + if frozen: + self._cls_dict["__setattr__"] = _frozen_setattrs + self._cls_dict["__delattr__"] = _frozen_delattrs + + self._wrote_own_setattr = True + elif on_setattr in ( + _ng_default_on_setattr, + setters.validate, + setters.convert, + ): + has_validator = has_converter = False + for a in attrs: + if a.validator is not None: + has_validator = True + if a.converter is not None: + has_converter = True + + if has_validator and has_converter: + break + if ( + ( + on_setattr == _ng_default_on_setattr + and not (has_validator or has_converter) + ) + or (on_setattr == setters.validate and not has_validator) + or (on_setattr == setters.convert and not has_converter) + ): + # If class-level on_setattr is set to convert + validate, but + # there's no field to convert or validate, pretend like there's + # no on_setattr. + self._on_setattr = None + + if getstate_setstate: + ( + self._cls_dict["__getstate__"], + self._cls_dict["__setstate__"], + ) = self._make_getstate_setstate() + + def __repr__(self): + return f"<_ClassBuilder(cls={self._cls.__name__})>" + + if PY310: + import abc + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + if self._slots is True: + return self._create_slots_class() + + return self.abc.update_abstractmethods( + self._patch_original_class() + ) + + else: + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + if self._slots is True: + return self._create_slots_class() + + return self._patch_original_class() + + def _patch_original_class(self): + """ + Apply accumulated methods and return the class. + """ + cls = self._cls + base_names = self._base_names + + # Clean class of attribute definitions (`attr.ib()`s). + if self._delete_attribs: + for name in self._attr_names: + if ( + name not in base_names + and getattr(cls, name, _sentinel) is not _sentinel + ): + # An AttributeError can happen if a base class defines a + # class variable and we want to set an attribute with the + # same name by using only a type annotation. + with contextlib.suppress(AttributeError): + delattr(cls, name) + + # Attach our dunder methods. + for name, value in self._cls_dict.items(): + setattr(cls, name, value) + + # If we've inherited an attrs __setattr__ and don't write our own, + # reset it to object's. + if not self._wrote_own_setattr and getattr( + cls, "__attrs_own_setattr__", False + ): + cls.__attrs_own_setattr__ = False + + if not self._has_custom_setattr: + cls.__setattr__ = _obj_setattr + + return cls + + def _create_slots_class(self): + """ + Build and return a new class with a `__slots__` attribute. + """ + cd = { + k: v + for k, v in self._cls_dict.items() + if k not in (*tuple(self._attr_names), "__dict__", "__weakref__") + } + + # If our class doesn't have its own implementation of __setattr__ + # (either from the user or by us), check the bases, if one of them has + # an attrs-made __setattr__, that needs to be reset. We don't walk the + # MRO because we only care about our immediate base classes. + # XXX: This can be confused by subclassing a slotted attrs class with + # XXX: a non-attrs class and subclass the resulting class with an attrs + # XXX: class. See `test_slotted_confused` for details. For now that's + # XXX: OK with us. + if not self._wrote_own_setattr: + cd["__attrs_own_setattr__"] = False + + if not self._has_custom_setattr: + for base_cls in self._cls.__bases__: + if base_cls.__dict__.get("__attrs_own_setattr__", False): + cd["__setattr__"] = _obj_setattr + break + + # Traverse the MRO to collect existing slots + # and check for an existing __weakref__. + existing_slots = {} + weakref_inherited = False + for base_cls in self._cls.__mro__[1:-1]: + if base_cls.__dict__.get("__weakref__", None) is not None: + weakref_inherited = True + existing_slots.update( + { + name: getattr(base_cls, name) + for name in getattr(base_cls, "__slots__", []) + } + ) + + base_names = set(self._base_names) + + names = self._attr_names + if ( + self._weakref_slot + and "__weakref__" not in getattr(self._cls, "__slots__", ()) + and "__weakref__" not in names + and not weakref_inherited + ): + names += ("__weakref__",) + + if PY_3_8_PLUS: + cached_properties = { + name: cached_property.func + for name, cached_property in cd.items() + if isinstance(cached_property, functools.cached_property) + } + else: + # `functools.cached_property` was introduced in 3.8. + # So can't be used before this. + cached_properties = {} + + # Collect methods with a `__class__` reference that are shadowed in the new class. + # To know to update them. + additional_closure_functions_to_update = [] + if cached_properties: + # Add cached properties to names for slotting. + names += tuple(cached_properties.keys()) + + for name in cached_properties: + # Clear out function from class to avoid clashing. + del cd[name] + + class_annotations = _get_annotations(self._cls) + for name, func in cached_properties.items(): + annotation = inspect.signature(func).return_annotation + if annotation is not inspect.Parameter.empty: + class_annotations[name] = annotation + + original_getattr = cd.get("__getattr__") + if original_getattr is not None: + additional_closure_functions_to_update.append(original_getattr) + + cd["__getattr__"] = _make_cached_property_getattr( + cached_properties, original_getattr, self._cls + ) + + # We only add the names of attributes that aren't inherited. + # Setting __slots__ to inherited attributes wastes memory. + slot_names = [name for name in names if name not in base_names] + + # There are slots for attributes from current class + # that are defined in parent classes. + # As their descriptors may be overridden by a child class, + # we collect them here and update the class dict + reused_slots = { + slot: slot_descriptor + for slot, slot_descriptor in existing_slots.items() + if slot in slot_names + } + slot_names = [name for name in slot_names if name not in reused_slots] + cd.update(reused_slots) + if self._cache_hash: + slot_names.append(_hash_cache_field) + + cd["__slots__"] = tuple(slot_names) + + cd["__qualname__"] = self._cls.__qualname__ + + # Create new class based on old class and our methods. + cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) + + # The following is a fix for + # . + # If a method mentions `__class__` or uses the no-arg super(), the + # compiler will bake a reference to the class in the method itself + # as `method.__closure__`. Since we replace the class with a + # clone, we rewrite these references so it keeps working. + for item in itertools.chain( + cls.__dict__.values(), additional_closure_functions_to_update + ): + if isinstance(item, (classmethod, staticmethod)): + # Class- and staticmethods hide their functions inside. + # These might need to be rewritten as well. + closure_cells = getattr(item.__func__, "__closure__", None) + elif isinstance(item, property): + # Workaround for property `super()` shortcut (PY3-only). + # There is no universal way for other descriptors. + closure_cells = getattr(item.fget, "__closure__", None) + else: + closure_cells = getattr(item, "__closure__", None) + + if not closure_cells: # Catch None or the empty list. + continue + for cell in closure_cells: + try: + match = cell.cell_contents is self._cls + except ValueError: # noqa: PERF203 + # ValueError: Cell is empty + pass + else: + if match: + cell.cell_contents = cls + return cls + + def add_repr(self, ns): + self._cls_dict["__repr__"] = self._add_method_dunders( + _make_repr(self._attrs, ns, self._cls) + ) + return self + + def add_str(self): + repr = self._cls_dict.get("__repr__") + if repr is None: + msg = "__str__ can only be generated if a __repr__ exists." + raise ValueError(msg) + + def __str__(self): + return self.__repr__() + + self._cls_dict["__str__"] = self._add_method_dunders(__str__) + return self + + def _make_getstate_setstate(self): + """ + Create custom __setstate__ and __getstate__ methods. + """ + # __weakref__ is not writable. + state_attr_names = tuple( + an for an in self._attr_names if an != "__weakref__" + ) + + def slots_getstate(self): + """ + Automatically created by attrs. + """ + return {name: getattr(self, name) for name in state_attr_names} + + hash_caching_enabled = self._cache_hash + + def slots_setstate(self, state): + """ + Automatically created by attrs. + """ + __bound_setattr = _obj_setattr.__get__(self) + if isinstance(state, tuple): + # Backward compatibility with attrs instances pickled with + # attrs versions before v22.2.0 which stored tuples. + for name, value in zip(state_attr_names, state): + __bound_setattr(name, value) + else: + for name in state_attr_names: + if name in state: + __bound_setattr(name, state[name]) + + # The hash code cache is not included when the object is + # serialized, but it still needs to be initialized to None to + # indicate that the first call to __hash__ should be a cache + # miss. + if hash_caching_enabled: + __bound_setattr(_hash_cache_field, None) + + return slots_getstate, slots_setstate + + def make_unhashable(self): + self._cls_dict["__hash__"] = None + return self + + def add_hash(self): + self._cls_dict["__hash__"] = self._add_method_dunders( + _make_hash( + self._cls, + self._attrs, + frozen=self._frozen, + cache_hash=self._cache_hash, + ) + ) + + return self + + def add_init(self): + self._cls_dict["__init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._pre_init_has_args, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=False, + ) + ) + + return self + + def add_match_args(self): + self._cls_dict["__match_args__"] = tuple( + field.name + for field in self._attrs + if field.init and not field.kw_only + ) + + def add_attrs_init(self): + self._cls_dict["__attrs_init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._pre_init_has_args, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=True, + ) + ) + + return self + + def add_eq(self): + cd = self._cls_dict + + cd["__eq__"] = self._add_method_dunders( + _make_eq(self._cls, self._attrs) + ) + cd["__ne__"] = self._add_method_dunders(_make_ne()) + + return self + + def add_order(self): + cd = self._cls_dict + + cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( + self._add_method_dunders(meth) + for meth in _make_order(self._cls, self._attrs) + ) + + return self + + def add_setattr(self): + if self._frozen: + return self + + sa_attrs = {} + for a in self._attrs: + on_setattr = a.on_setattr or self._on_setattr + if on_setattr and on_setattr is not setters.NO_OP: + sa_attrs[a.name] = a, on_setattr + + if not sa_attrs: + return self + + if self._has_custom_setattr: + # We need to write a __setattr__ but there already is one! + msg = "Can't combine custom __setattr__ with on_setattr hooks." + raise ValueError(msg) + + # docstring comes from _add_method_dunders + def __setattr__(self, name, val): + try: + a, hook = sa_attrs[name] + except KeyError: + nval = val + else: + nval = hook(self, a, val) + + _obj_setattr(self, name, nval) + + self._cls_dict["__attrs_own_setattr__"] = True + self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__) + self._wrote_own_setattr = True + + return self + + def _add_method_dunders(self, method): + """ + Add __module__ and __qualname__ to a *method* if possible. + """ + with contextlib.suppress(AttributeError): + method.__module__ = self._cls.__module__ + + with contextlib.suppress(AttributeError): + method.__qualname__ = f"{self._cls.__qualname__}.{method.__name__}" + + with contextlib.suppress(AttributeError): + method.__doc__ = ( + "Method generated by attrs for class " + f"{self._cls.__qualname__}." + ) + + return method + + +def _determine_attrs_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + msg = "Don't mix `cmp` with `eq' and `order`." + raise ValueError(msg) + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + return cmp, cmp + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq = default_eq + + if order is None: + order = eq + + if eq is False and order is True: + msg = "`order` can only be True if `eq` is True too." + raise ValueError(msg) + + return eq, order + + +def _determine_attrib_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + msg = "Don't mix `cmp` with `eq' and `order`." + raise ValueError(msg) + + def decide_callable_or_boolean(value): + """ + Decide whether a key function is used. + """ + if callable(value): + value, key = True, value + else: + key = None + return value, key + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + cmp, cmp_key = decide_callable_or_boolean(cmp) + return cmp, cmp_key, cmp, cmp_key + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq, eq_key = default_eq, None + else: + eq, eq_key = decide_callable_or_boolean(eq) + + if order is None: + order, order_key = eq, eq_key + else: + order, order_key = decide_callable_or_boolean(order) + + if eq is False and order is True: + msg = "`order` can only be True if `eq` is True too." + raise ValueError(msg) + + return eq, eq_key, order, order_key + + +def _determine_whether_to_implement( + cls, flag, auto_detect, dunders, default=True +): + """ + Check whether we should implement a set of methods for *cls*. + + *flag* is the argument passed into @attr.s like 'init', *auto_detect* the + same as passed into @attr.s and *dunders* is a tuple of attribute names + whose presence signal that the user has implemented it themselves. + + Return *default* if no reason for either for or against is found. + """ + if flag is True or flag is False: + return flag + + if flag is None and auto_detect is False: + return default + + # Logically, flag is None and auto_detect is True here. + for dunder in dunders: + if _has_own_attribute(cls, dunder): + return False + + return default + + +def attrs( + maybe_cls=None, + these=None, + repr_ns=None, + repr=None, + cmp=None, + hash=None, + init=None, + slots=False, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=False, + kw_only=False, + cache_hash=False, + auto_exc=False, + eq=None, + order=None, + auto_detect=False, + collect_by_mro=False, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, + unsafe_hash=None, +): + r""" + A class decorator that adds :term:`dunder methods` according to the + specified attributes using `attr.ib` or the *these* argument. + + Please consider using `attrs.define` / `attrs.frozen` in new code + (``attr.s`` will *never* go away, though). + + :param these: A dictionary of name to `attr.ib` mappings. This is useful + to avoid the definition of your attributes within the class body + because you can't (e.g. if you want to add ``__repr__`` methods to + Django models) or don't want to. + + If *these* is not ``None``, *attrs* will *not* search the class body + for attributes and will *not* remove any attributes from it. + + The order is deduced from the order of the attributes inside *these*. + + :type these: `dict` of `str` to `attr.ib` + + :param str repr_ns: When using nested classes, there's no way in Python 2 + to automatically detect that. Therefore it's possible to set the + namespace explicitly for a more meaningful ``repr`` output. + :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*, + *order*, and *hash* arguments explicitly, assume they are set to + ``True`` **unless any** of the involved methods for one of the + arguments is implemented in the *current* class (i.e. it is *not* + inherited from some base class). + + So for example by implementing ``__eq__`` on a class yourself, *attrs* + will deduce ``eq=False`` and will create *neither* ``__eq__`` *nor* + ``__ne__`` (but Python classes come with a sensible ``__ne__`` by + default, so it *should* be enough to only implement ``__eq__`` in most + cases). + + .. warning:: + + If you prevent *attrs* from creating the ordering methods for you + (``order=False``, e.g. by implementing ``__le__``), it becomes + *your* responsibility to make sure its ordering is sound. The best + way is to use the `functools.total_ordering` decorator. + + + Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*, *cmp*, + or *hash* overrides whatever *auto_detect* would determine. + + :param bool repr: Create a ``__repr__`` method with a human readable + representation of *attrs* attributes.. + :param bool str: Create a ``__str__`` method that is identical to + ``__repr__``. This is usually not necessary except for `Exception`\ s. + :param bool | None eq: If ``True`` or ``None`` (default), add ``__eq__`` + and ``__ne__`` methods that check two instances for equality. + + They compare the instances as if they were tuples of their *attrs* + attributes if and only if the types of both classes are *identical*! + + .. seealso:: `comparison` + :param bool | None order: If ``True``, add ``__lt__``, ``__le__``, + ``__gt__``, and ``__ge__`` methods that behave like *eq* above and + allow instances to be ordered. If ``None`` (default) mirror value of + *eq*. + + .. seealso:: `comparison` + :param bool | None cmp: Setting *cmp* is equivalent to setting *eq* and + *order* to the same value. Must not be mixed with *eq* or *order*. + + .. seealso:: `comparison` + :param bool | None unsafe_hash: If ``None`` (default), the ``__hash__`` + method is generated according how *eq* and *frozen* are set. + + 1. If *both* are True, *attrs* will generate a ``__hash__`` for you. + 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to + None, marking it unhashable (which it is). + 3. If *eq* is False, ``__hash__`` will be left untouched meaning the + ``__hash__`` method of the base class will be used (if base class is + ``object``, this means it will fall back to id-based hashing.). + + Although not recommended, you can decide for yourself and force *attrs* + to create one (e.g. if the class is immutable even though you didn't + freeze it programmatically) by passing ``True`` or not. Both of these + cases are rather special and should be used carefully. + + .. seealso:: + + - Our documentation on `hashing`, + - Python's documentation on `object.__hash__`, + - and the `GitHub issue that led to the default \ + behavior `_ for + more details. + + :param bool | None hash: Alias for *unsafe_hash*. *unsafe_hash* takes + precedence. + :param bool init: Create a ``__init__`` method that initializes the *attrs* + attributes. Leading underscores are stripped for the argument name. If + a ``__attrs_pre_init__`` method exists on the class, it will be called + before the class is initialized. If a ``__attrs_post_init__`` method + exists on the class, it will be called after the class is fully + initialized. + + If ``init`` is ``False``, an ``__attrs_init__`` method will be injected + instead. This allows you to define a custom ``__init__`` method that + can do pre-init work such as ``super().__init__()``, and then call + ``__attrs_init__()`` and ``__attrs_post_init__()``. + + .. seealso:: `init` + :param bool slots: Create a :term:`slotted class ` that's + more memory-efficient. Slotted classes are generally superior to the + default dict classes, but have some gotchas you should know about, so + we encourage you to read the :term:`glossary entry `. + :param bool frozen: Make instances immutable after initialization. If + someone attempts to modify a frozen instance, + `attrs.exceptions.FrozenInstanceError` is raised. + + .. note:: + + 1. This is achieved by installing a custom ``__setattr__`` method + on your class, so you can't implement your own. + + 2. True immutability is impossible in Python. + + 3. This *does* have a minor a runtime performance `impact + ` when initializing new instances. In other words: + ``__init__`` is slightly slower with ``frozen=True``. + + 4. If a class is frozen, you cannot modify ``self`` in + ``__attrs_post_init__`` or a self-written ``__init__``. You can + circumvent that limitation by using ``object.__setattr__(self, + "attribute_name", value)``. + + 5. Subclasses of a frozen class are frozen too. + + :param bool weakref_slot: Make instances weak-referenceable. This has no + effect unless ``slots`` is also enabled. + :param bool auto_attribs: If ``True``, collect :pep:`526`-annotated + attributes from the class body. + + In this case, you **must** annotate every field. If *attrs* encounters + a field that is set to an `attr.ib` but lacks a type annotation, an + `attr.exceptions.UnannotatedAttributeError` is raised. Use + ``field_name: typing.Any = attr.ib(...)`` if you don't want to set a + type. + + If you assign a value to those attributes (e.g. ``x: int = 42``), that + value becomes the default value like if it were passed using + ``attr.ib(default=42)``. Passing an instance of `attrs.Factory` also + works as expected in most cases (see warning below). + + Attributes annotated as `typing.ClassVar`, and attributes that are + neither annotated nor set to an `attr.ib` are **ignored**. + + .. warning:: + For features that use the attribute name to create decorators (e.g. + :ref:`validators `), you still *must* assign `attr.ib` + to them. Otherwise Python will either not find the name or try to + use the default value to call e.g. ``validator`` on it. + + These errors can be quite confusing and probably the most common bug + report on our bug tracker. + + :param bool kw_only: Make all attributes keyword-only in the generated + ``__init__`` (if ``init`` is ``False``, this parameter is ignored). + :param bool cache_hash: Ensure that the object's hash code is computed only + once and stored on the object. If this is set to ``True``, hashing + must be either explicitly or implicitly enabled for this class. If the + hash code is cached, avoid any reassignments of fields involved in hash + code computation or mutations of the objects those fields point to + after object creation. If such changes occur, the behavior of the + object's hash code is undefined. + :param bool auto_exc: If the class subclasses `BaseException` (which + implicitly includes any subclass of any exception), the following + happens to behave like a well-behaved Python exceptions class: + + - the values for *eq*, *order*, and *hash* are ignored and the + instances compare and hash by the instance's ids (N.B. *attrs* will + *not* remove existing implementations of ``__hash__`` or the equality + methods. It just won't add own ones.), + - all attributes that are either passed into ``__init__`` or have a + default value are additionally available as a tuple in the ``args`` + attribute, + - the value of *str* is ignored leaving ``__str__`` to base classes. + :param bool collect_by_mro: Setting this to `True` fixes the way *attrs* + collects attributes from base classes. The default behavior is + incorrect in certain cases of multiple inheritance. It should be on by + default but is kept off for backward-compatibility. + + .. seealso:: + Issue `#428 `_ + + :param bool | None getstate_setstate: + .. note:: + This is usually only interesting for slotted classes and you should + probably just set *auto_detect* to `True`. + + If `True`, ``__getstate__`` and ``__setstate__`` are generated and + attached to the class. This is necessary for slotted classes to be + pickleable. If left `None`, it's `True` by default for slotted classes + and ``False`` for dict classes. + + If *auto_detect* is `True`, and *getstate_setstate* is left `None`, and + **either** ``__getstate__`` or ``__setstate__`` is detected directly on + the class (i.e. not inherited), it is set to `False` (this is usually + what you want). + + :param on_setattr: A callable that is run whenever the user attempts to set + an attribute (either by assignment like ``i.x = 42`` or by using + `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments + as validators: the instance, the attribute that is being modified, and + the new value. + + If no exception is raised, the attribute is set to the return value of + the callable. + + If a list of callables is passed, they're automatically wrapped in an + `attrs.setters.pipe`. + :type on_setattr: `callable`, or a list of callables, or `None`, or + `attrs.setters.NO_OP` + + :param callable | None field_transformer: + A function that is called with the original class object and all fields + right before *attrs* finalizes the class. You can use this, e.g., to + automatically add converters or validators to fields based on their + types. + + .. seealso:: `transform-fields` + + :param bool match_args: + If `True` (default), set ``__match_args__`` on the class to support + :pep:`634` (Structural Pattern Matching). It is a tuple of all + non-keyword-only ``__init__`` parameter names on Python 3.10 and later. + Ignored on older Python versions. + + .. versionadded:: 16.0.0 *slots* + .. versionadded:: 16.1.0 *frozen* + .. versionadded:: 16.3.0 *str* + .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. + .. versionchanged:: 17.1.0 + *hash* supports ``None`` as value which is also the default now. + .. versionadded:: 17.3.0 *auto_attribs* + .. versionchanged:: 18.1.0 + If *these* is passed, no attributes are deleted from the class body. + .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. + .. versionadded:: 18.2.0 *weakref_slot* + .. deprecated:: 18.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a + `DeprecationWarning` if the classes compared are subclasses of + each other. ``__eq`` and ``__ne__`` never tried to compared subclasses + to each other. + .. versionchanged:: 19.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider + subclasses comparable anymore. + .. versionadded:: 18.2.0 *kw_only* + .. versionadded:: 18.2.0 *cache_hash* + .. versionadded:: 19.1.0 *auto_exc* + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *auto_detect* + .. versionadded:: 20.1.0 *collect_by_mro* + .. versionadded:: 20.1.0 *getstate_setstate* + .. versionadded:: 20.1.0 *on_setattr* + .. versionadded:: 20.3.0 *field_transformer* + .. versionchanged:: 21.1.0 + ``init=False`` injects ``__attrs_init__`` + .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 21.3.0 *match_args* + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). + """ + eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) + + # unsafe_hash takes precedence due to PEP 681. + if unsafe_hash is not None: + hash = unsafe_hash + + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + def wrap(cls): + is_frozen = frozen or _has_frozen_base_class(cls) + is_exc = auto_exc is True and issubclass(cls, BaseException) + has_own_setattr = auto_detect and _has_own_attribute( + cls, "__setattr__" + ) + + if has_own_setattr and is_frozen: + msg = "Can't freeze a class with a custom __setattr__." + raise ValueError(msg) + + builder = _ClassBuilder( + cls, + these, + slots, + is_frozen, + weakref_slot, + _determine_whether_to_implement( + cls, + getstate_setstate, + auto_detect, + ("__getstate__", "__setstate__"), + default=slots, + ), + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_own_setattr, + field_transformer, + ) + if _determine_whether_to_implement( + cls, repr, auto_detect, ("__repr__",) + ): + builder.add_repr(repr_ns) + if str is True: + builder.add_str() + + eq = _determine_whether_to_implement( + cls, eq_, auto_detect, ("__eq__", "__ne__") + ) + if not is_exc and eq is True: + builder.add_eq() + if not is_exc and _determine_whether_to_implement( + cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__") + ): + builder.add_order() + + builder.add_setattr() + + nonlocal hash + if ( + hash is None + and auto_detect is True + and _has_own_attribute(cls, "__hash__") + ): + hash = False + + if hash is not True and hash is not False and hash is not None: + # Can't use `hash in` because 1 == True for example. + msg = "Invalid value for hash. Must be True, False, or None." + raise TypeError(msg) + + if hash is False or (hash is None and eq is False) or is_exc: + # Don't do anything. Should fall back to __object__'s __hash__ + # which is by id. + if cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, hashing must be either explicitly or implicitly enabled." + raise TypeError(msg) + elif hash is True or ( + hash is None and eq is True and is_frozen is True + ): + # Build a __hash__ if told so, or if it's safe. + builder.add_hash() + else: + # Raise TypeError on attempts to hash. + if cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, hashing must be either explicitly or implicitly enabled." + raise TypeError(msg) + builder.make_unhashable() + + if _determine_whether_to_implement( + cls, init, auto_detect, ("__init__",) + ): + builder.add_init() + else: + builder.add_attrs_init() + if cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, init must be True." + raise TypeError(msg) + + if ( + PY310 + and match_args + and not _has_own_attribute(cls, "__match_args__") + ): + builder.add_match_args() + + return builder.build_class() + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + + return wrap(maybe_cls) + + +_attrs = attrs +""" +Internal alias so we can use it in functions that take an argument called +*attrs*. +""" + + +def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return cls.__setattr__ is _frozen_setattrs + + +def _generate_unique_filename(cls, func_name): + """ + Create a "filename" suitable for a function being generated. + """ + return ( + f"" + ) + + +def _make_hash(cls, attrs, frozen, cache_hash): + attrs = tuple( + a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) + ) + + tab = " " + + unique_filename = _generate_unique_filename(cls, "hash") + type_hash = hash(unique_filename) + # If eq is custom generated, we need to include the functions in globs + globs = {} + + hash_def = "def __hash__(self" + hash_func = "hash((" + closing_braces = "))" + if not cache_hash: + hash_def += "):" + else: + hash_def += ", *" + + hash_def += ", _cache_wrapper=__import__('attr._make')._make._CacheHashWrapper):" + hash_func = "_cache_wrapper(" + hash_func + closing_braces += ")" + + method_lines = [hash_def] + + def append_hash_computation_lines(prefix, indent): + """ + Generate the code for actually computing the hash code. + Below this will either be returned directly or used to compute + a value which is then cached, depending on the value of cache_hash + """ + + method_lines.extend( + [ + indent + prefix + hash_func, + indent + f" {type_hash},", + ] + ) + + for a in attrs: + if a.eq_key: + cmp_name = f"_{a.name}_key" + globs[cmp_name] = a.eq_key + method_lines.append( + indent + f" {cmp_name}(self.{a.name})," + ) + else: + method_lines.append(indent + f" self.{a.name},") + + method_lines.append(indent + " " + closing_braces) + + if cache_hash: + method_lines.append(tab + f"if self.{_hash_cache_field} is None:") + if frozen: + append_hash_computation_lines( + f"object.__setattr__(self, '{_hash_cache_field}', ", tab * 2 + ) + method_lines.append(tab * 2 + ")") # close __setattr__ + else: + append_hash_computation_lines( + f"self.{_hash_cache_field} = ", tab * 2 + ) + method_lines.append(tab + f"return self.{_hash_cache_field}") + else: + append_hash_computation_lines("return ", tab) + + script = "\n".join(method_lines) + return _make_method("__hash__", script, unique_filename, globs) + + +def _add_hash(cls, attrs): + """ + Add a hash method to *cls*. + """ + cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False) + return cls + + +def _make_ne(): + """ + Create __ne__ method. + """ + + def __ne__(self, other): + """ + Check equality and either forward a NotImplemented or + return the result negated. + """ + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + + return not result + + return __ne__ + + +def _make_eq(cls, attrs): + """ + Create __eq__ method for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.eq] + + unique_filename = _generate_unique_filename(cls, "eq") + lines = [ + "def __eq__(self, other):", + " if other.__class__ is not self.__class__:", + " return NotImplemented", + ] + + # We can't just do a big self.x = other.x and... clause due to + # irregularities like nan == nan is false but (nan,) == (nan,) is true. + globs = {} + if attrs: + lines.append(" return (") + others = [" ) == ("] + for a in attrs: + if a.eq_key: + cmp_name = f"_{a.name}_key" + # Add the key function to the global namespace + # of the evaluated function. + globs[cmp_name] = a.eq_key + lines.append(f" {cmp_name}(self.{a.name}),") + others.append(f" {cmp_name}(other.{a.name}),") + else: + lines.append(f" self.{a.name},") + others.append(f" other.{a.name},") + + lines += [*others, " )"] + else: + lines.append(" return True") + + script = "\n".join(lines) + + return _make_method("__eq__", script, unique_filename, globs) + + +def _make_order(cls, attrs): + """ + Create ordering methods for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.order] + + def attrs_to_tuple(obj): + """ + Save us some typing. + """ + return tuple( + key(value) if key else value + for value, key in ( + (getattr(obj, a.name), a.order_key) for a in attrs + ) + ) + + def __lt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) < attrs_to_tuple(other) + + return NotImplemented + + def __le__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) <= attrs_to_tuple(other) + + return NotImplemented + + def __gt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) > attrs_to_tuple(other) + + return NotImplemented + + def __ge__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) >= attrs_to_tuple(other) + + return NotImplemented + + return __lt__, __le__, __gt__, __ge__ + + +def _add_eq(cls, attrs=None): + """ + Add equality methods to *cls* with *attrs*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__eq__ = _make_eq(cls, attrs) + cls.__ne__ = _make_ne() + + return cls + + +def _make_repr(attrs, ns, cls): + unique_filename = _generate_unique_filename(cls, "repr") + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, (repr if a.repr is True else a.repr), a.init) + for a in attrs + if a.repr is not False + ) + globs = { + name + "_repr": r for name, r, _ in attr_names_with_reprs if r != repr + } + globs["_compat"] = _compat + globs["AttributeError"] = AttributeError + globs["NOTHING"] = NOTHING + attribute_fragments = [] + for name, r, i in attr_names_with_reprs: + accessor = ( + "self." + name if i else 'getattr(self, "' + name + '", NOTHING)' + ) + fragment = ( + "%s={%s!r}" % (name, accessor) + if r == repr + else "%s={%s_repr(%s)}" % (name, name, accessor) + ) + attribute_fragments.append(fragment) + repr_fragment = ", ".join(attribute_fragments) + + if ns is None: + cls_name_fragment = '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' + else: + cls_name_fragment = ns + ".{self.__class__.__name__}" + + lines = [ + "def __repr__(self):", + " try:", + " already_repring = _compat.repr_context.already_repring", + " except AttributeError:", + " already_repring = {id(self),}", + " _compat.repr_context.already_repring = already_repring", + " else:", + " if id(self) in already_repring:", + " return '...'", + " else:", + " already_repring.add(id(self))", + " try:", + f" return f'{cls_name_fragment}({repr_fragment})'", + " finally:", + " already_repring.remove(id(self))", + ] + + return _make_method( + "__repr__", "\n".join(lines), unique_filename, globs=globs + ) + + +def _add_repr(cls, ns=None, attrs=None): + """ + Add a repr method to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__repr__ = _make_repr(attrs, ns, cls) + return cls + + +def fields(cls): + """ + Return the tuple of *attrs* attributes for a class. + + The tuple also allows accessing the fields by their names (see below for + examples). + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + :rtype: tuple (with name accessors) of `attrs.Attribute` + + .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields + by name. + .. versionchanged:: 23.1.0 Add support for generic classes. + """ + generic_base = get_generic_base(cls) + + if generic_base is None and not isinstance(cls, type): + msg = "Passed object must be a class." + raise TypeError(msg) + + attrs = getattr(cls, "__attrs_attrs__", None) + + if attrs is None: + if generic_base is not None: + attrs = getattr(generic_base, "__attrs_attrs__", None) + if attrs is not None: + # Even though this is global state, stick it on here to speed + # it up. We rely on `cls` being cached for this to be + # efficient. + cls.__attrs_attrs__ = attrs + return attrs + msg = f"{cls!r} is not an attrs-decorated class." + raise NotAnAttrsClassError(msg) + + return attrs + + +def fields_dict(cls): + """ + Return an ordered dictionary of *attrs* attributes for a class, whose + keys are the attribute names. + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + :rtype: dict + + .. versionadded:: 18.1.0 + """ + if not isinstance(cls, type): + msg = "Passed object must be a class." + raise TypeError(msg) + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + msg = f"{cls!r} is not an attrs-decorated class." + raise NotAnAttrsClassError(msg) + return {a.name: a for a in attrs} + + +def validate(inst): + """ + Validate all attributes on *inst* that have a validator. + + Leaves all exceptions through. + + :param inst: Instance of a class with *attrs* attributes. + """ + if _config._run_validators is False: + return + + for a in fields(inst.__class__): + v = a.validator + if v is not None: + v(inst, a, getattr(inst, a.name)) + + +def _is_slot_cls(cls): + return "__slots__" in cls.__dict__ + + +def _is_slot_attr(a_name, base_attr_map): + """ + Check if the attribute name comes from a slot class. + """ + return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name]) + + +def _make_init( + cls, + attrs, + pre_init, + pre_init_has_args, + post_init, + frozen, + slots, + cache_hash, + base_attr_map, + is_exc, + cls_on_setattr, + attrs_init, +): + has_cls_on_setattr = ( + cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP + ) + + if frozen and has_cls_on_setattr: + msg = "Frozen classes can't use on_setattr." + raise ValueError(msg) + + needs_cached_setattr = cache_hash or frozen + filtered_attrs = [] + attr_dict = {} + for a in attrs: + if not a.init and a.default is NOTHING: + continue + + filtered_attrs.append(a) + attr_dict[a.name] = a + + if a.on_setattr is not None: + if frozen is True: + msg = "Frozen classes can't use on_setattr." + raise ValueError(msg) + + needs_cached_setattr = True + elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP: + needs_cached_setattr = True + + unique_filename = _generate_unique_filename(cls, "init") + + script, globs, annotations = _attrs_to_init_script( + filtered_attrs, + frozen, + slots, + pre_init, + pre_init_has_args, + post_init, + cache_hash, + base_attr_map, + is_exc, + needs_cached_setattr, + has_cls_on_setattr, + attrs_init, + ) + if cls.__module__ in sys.modules: + # This makes typing.get_type_hints(CLS.__init__) resolve string types. + globs.update(sys.modules[cls.__module__].__dict__) + + globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) + + if needs_cached_setattr: + # Save the lookup overhead in __init__ if we need to circumvent + # setattr hooks. + globs["_cached_setattr_get"] = _obj_setattr.__get__ + + init = _make_method( + "__attrs_init__" if attrs_init else "__init__", + script, + unique_filename, + globs, + ) + init.__annotations__ = annotations + + return init + + +def _setattr(attr_name, value_var, has_on_setattr): + """ + Use the cached object.setattr to set *attr_name* to *value_var*. + """ + return f"_setattr('{attr_name}', {value_var})" + + +def _setattr_with_converter(attr_name, value_var, has_on_setattr): + """ + Use the cached object.setattr to set *attr_name* to *value_var*, but run + its converter first. + """ + return "_setattr('%s', %s(%s))" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + +def _assign(attr_name, value, has_on_setattr): + """ + Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise + relegate to _setattr. + """ + if has_on_setattr: + return _setattr(attr_name, value, True) + + return f"self.{attr_name} = {value}" + + +def _assign_with_converter(attr_name, value_var, has_on_setattr): + """ + Unless *attr_name* has an on_setattr hook, use normal assignment after + conversion. Otherwise relegate to _setattr_with_converter. + """ + if has_on_setattr: + return _setattr_with_converter(attr_name, value_var, True) + + return "self.%s = %s(%s)" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + +def _attrs_to_init_script( + attrs, + frozen, + slots, + pre_init, + pre_init_has_args, + post_init, + cache_hash, + base_attr_map, + is_exc, + needs_cached_setattr, + has_cls_on_setattr, + attrs_init, +): + """ + Return a script of an initializer for *attrs* and a dict of globals. + + The globals are expected by the generated script. + + If *frozen* is True, we cannot set the attributes directly so we use + a cached ``object.__setattr__``. + """ + lines = [] + if pre_init: + lines.append("self.__attrs_pre_init__()") + + if needs_cached_setattr: + lines.append( + # Circumvent the __setattr__ descriptor to save one lookup per + # assignment. + # Note _setattr will be used again below if cache_hash is True + "_setattr = _cached_setattr_get(self)" + ) + + if frozen is True: + if slots is True: + fmt_setter = _setattr + fmt_setter_with_converter = _setattr_with_converter + else: + # Dict frozen classes assign directly to __dict__. + # But only if the attribute doesn't come from an ancestor slot + # class. + # Note _inst_dict will be used again below if cache_hash is True + lines.append("_inst_dict = self.__dict__") + + def fmt_setter(attr_name, value_var, has_on_setattr): + if _is_slot_attr(attr_name, base_attr_map): + return _setattr(attr_name, value_var, has_on_setattr) + + return f"_inst_dict['{attr_name}'] = {value_var}" + + def fmt_setter_with_converter( + attr_name, value_var, has_on_setattr + ): + if has_on_setattr or _is_slot_attr(attr_name, base_attr_map): + return _setattr_with_converter( + attr_name, value_var, has_on_setattr + ) + + return "_inst_dict['%s'] = %s(%s)" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + else: + # Not frozen. + fmt_setter = _assign + fmt_setter_with_converter = _assign_with_converter + + args = [] + kw_only_args = [] + attrs_to_validate = [] + + # This is a dictionary of names to validator and converter callables. + # Injecting this into __init__ globals lets us avoid lookups. + names_for_globals = {} + annotations = {"return": None} + + for a in attrs: + if a.validator: + attrs_to_validate.append(a) + + attr_name = a.name + has_on_setattr = a.on_setattr is not None or ( + a.on_setattr is not setters.NO_OP and has_cls_on_setattr + ) + # a.alias is set to maybe-mangled attr_name in _ClassBuilder if not + # explicitly provided + arg_name = a.alias + + has_factory = isinstance(a.default, Factory) + maybe_self = "self" if has_factory and a.default.takes_self else "" + + if a.init is False: + if has_factory: + init_factory_name = _init_factory_pat % (a.name,) + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + init_factory_name + f"({maybe_self})", + has_on_setattr, + ) + ) + conv_name = _init_converter_pat % (a.name,) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + init_factory_name + f"({maybe_self})", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + elif a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + f"attr_dict['{attr_name}'].default", + has_on_setattr, + ) + ) + conv_name = _init_converter_pat % (a.name,) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + f"attr_dict['{attr_name}'].default", + has_on_setattr, + ) + ) + elif a.default is not NOTHING and not has_factory: + arg = f"{arg_name}=attr_dict['{attr_name}'].default" + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + elif has_factory: + arg = f"{arg_name}=NOTHING" + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + lines.append(f"if {arg_name} is not NOTHING:") + + init_factory_name = _init_factory_pat % (a.name,) + if a.converter is not None: + lines.append( + " " + + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter_with_converter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append( + " " + fmt_setter(attr_name, arg_name, has_on_setattr) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.kw_only: + kw_only_args.append(arg_name) + else: + args.append(arg_name) + + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + if a.init is True: + if a.type is not None and a.converter is None: + annotations[arg_name] = a.type + elif a.converter is not None: + # Try to get the type from the converter. + t = _AnnotationExtractor(a.converter).get_first_param_type() + if t: + annotations[arg_name] = t + + if attrs_to_validate: # we can skip this if there are no validators. + names_for_globals["_config"] = _config + lines.append("if _config._run_validators is True:") + for a in attrs_to_validate: + val_name = "__attr_validator_" + a.name + attr_name = "__attr_" + a.name + lines.append(f" {val_name}(self, {attr_name}, self.{a.name})") + names_for_globals[val_name] = a.validator + names_for_globals[attr_name] = a + + if post_init: + lines.append("self.__attrs_post_init__()") + + # because this is set only after __attrs_post_init__ is called, a crash + # will result if post-init tries to access the hash code. This seemed + # preferable to setting this beforehand, in which case alteration to + # field values during post-init combined with post-init accessing the + # hash code would result in silent bugs. + if cache_hash: + if frozen: + if slots: # noqa: SIM108 + # if frozen and slots, then _setattr defined above + init_hash_cache = "_setattr('%s', %s)" + else: + # if frozen and not slots, then _inst_dict defined above + init_hash_cache = "_inst_dict['%s'] = %s" + else: + init_hash_cache = "self.%s = %s" + lines.append(init_hash_cache % (_hash_cache_field, "None")) + + # For exceptions we rely on BaseException.__init__ for proper + # initialization. + if is_exc: + vals = ",".join(f"self.{a.name}" for a in attrs if a.init) + + lines.append(f"BaseException.__init__(self, {vals})") + + args = ", ".join(args) + pre_init_args = args + if kw_only_args: + args += "%s*, %s" % ( + ", " if args else "", # leading comma + ", ".join(kw_only_args), # kw_only args + ) + pre_init_kw_only_args = ", ".join( + ["%s=%s" % (kw_arg, kw_arg) for kw_arg in kw_only_args] + ) + pre_init_args += ( + ", " if pre_init_args else "" + ) # handle only kwargs and no regular args + pre_init_args += pre_init_kw_only_args + + if pre_init and pre_init_has_args: + # If pre init method has arguments, pass same arguments as `__init__` + lines[0] = "self.__attrs_pre_init__(%s)" % pre_init_args + + return ( + "def %s(self, %s):\n %s\n" + % ( + ("__attrs_init__" if attrs_init else "__init__"), + args, + "\n ".join(lines) if lines else "pass", + ), + names_for_globals, + annotations, + ) + + +def _default_init_alias_for(name: str) -> str: + """ + The default __init__ parameter name for a field. + + This performs private-name adjustment via leading-unscore stripping, + and is the default value of Attribute.alias if not provided. + """ + + return name.lstrip("_") + + +class Attribute: + """ + *Read-only* representation of an attribute. + + .. warning:: + + You should never instantiate this class yourself. + + The class has *all* arguments of `attr.ib` (except for ``factory`` + which is only syntactic sugar for ``default=Factory(...)`` plus the + following: + + - ``name`` (`str`): The name of the attribute. + - ``alias`` (`str`): The __init__ parameter name of the attribute, after + any explicit overrides and default private-attribute-name handling. + - ``inherited`` (`bool`): Whether or not that attribute has been inherited + from a base class. + - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The callables + that are used for comparing and ordering objects by this attribute, + respectively. These are set by passing a callable to `attr.ib`'s ``eq``, + ``order``, or ``cmp`` arguments. See also :ref:`comparison customization + `. + + Instances of this class are frequently used for introspection purposes + like: + + - `fields` returns a tuple of them. + - Validators get them passed as the first argument. + - The :ref:`field transformer ` hook receives a list of + them. + - The ``alias`` property exposes the __init__ parameter name of the field, + with any overrides and default private-attribute handling applied. + + + .. versionadded:: 20.1.0 *inherited* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.2.0 *inherited* is not taken into account for + equality checks and hashing anymore. + .. versionadded:: 21.1.0 *eq_key* and *order_key* + .. versionadded:: 22.2.0 *alias* + + For the full version history of the fields, see `attr.ib`. + """ + + __slots__ = ( + "name", + "default", + "validator", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "type", + "converter", + "kw_only", + "inherited", + "on_setattr", + "alias", + ) + + def __init__( + self, + name, + default, + validator, + repr, + cmp, # XXX: unused, remove along with other cmp code. + hash, + init, + inherited, + metadata=None, + type=None, + converter=None, + kw_only=False, + eq=None, + eq_key=None, + order=None, + order_key=None, + on_setattr=None, + alias=None, + ): + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq_key or eq, order_key or order, True + ) + + # Cache this descriptor here to speed things up later. + bound_setattr = _obj_setattr.__get__(self) + + # Despite the big red warning, people *do* instantiate `Attribute` + # themselves. + bound_setattr("name", name) + bound_setattr("default", default) + bound_setattr("validator", validator) + bound_setattr("repr", repr) + bound_setattr("eq", eq) + bound_setattr("eq_key", eq_key) + bound_setattr("order", order) + bound_setattr("order_key", order_key) + bound_setattr("hash", hash) + bound_setattr("init", init) + bound_setattr("converter", converter) + bound_setattr( + "metadata", + ( + types.MappingProxyType(dict(metadata)) # Shallow copy + if metadata + else _empty_metadata_singleton + ), + ) + bound_setattr("type", type) + bound_setattr("kw_only", kw_only) + bound_setattr("inherited", inherited) + bound_setattr("on_setattr", on_setattr) + bound_setattr("alias", alias) + + def __setattr__(self, name, value): + raise FrozenInstanceError() + + @classmethod + def from_counting_attr(cls, name, ca, type=None): + # type holds the annotated value. deal with conflicts: + if type is None: + type = ca.type + elif ca.type is not None: + msg = "Type annotation and type argument cannot both be present" + raise ValueError(msg) + inst_dict = { + k: getattr(ca, k) + for k in Attribute.__slots__ + if k + not in ( + "name", + "validator", + "default", + "type", + "inherited", + ) # exclude methods and deprecated alias + } + return cls( + name=name, + validator=ca._validator, + default=ca._default, + type=type, + cmp=None, + inherited=False, + **inst_dict, + ) + + # Don't use attrs.evolve since fields(Attribute) doesn't work + def evolve(self, **changes): + """ + Copy *self* and apply *changes*. + + This works similarly to `attrs.evolve` but that function does not work + with `Attribute`. + + It is mainly meant to be used for `transform-fields`. + + .. versionadded:: 20.3.0 + """ + new = copy.copy(self) + + new._setattrs(changes.items()) + + return new + + # Don't use _add_pickle since fields(Attribute) doesn't work + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple( + getattr(self, name) if name != "metadata" else dict(self.metadata) + for name in self.__slots__ + ) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + self._setattrs(zip(self.__slots__, state)) + + def _setattrs(self, name_values_pairs): + bound_setattr = _obj_setattr.__get__(self) + for name, value in name_values_pairs: + if name != "metadata": + bound_setattr(name, value) + else: + bound_setattr( + name, + types.MappingProxyType(dict(value)) + if value + else _empty_metadata_singleton, + ) + + +_a = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=(name != "metadata"), + init=True, + inherited=False, + alias=_default_init_alias_for(name), + ) + for name in Attribute.__slots__ +] + +Attribute = _add_hash( + _add_eq( + _add_repr(Attribute, attrs=_a), + attrs=[a for a in _a if a.name != "inherited"], + ), + attrs=[a for a in _a if a.hash and a.name != "inherited"], +) + + +class _CountingAttr: + """ + Intermediate representation of attributes that uses a counter to preserve + the order in which the attributes have been defined. + + *Internal* data structure of the attrs library. Running into is most + likely the result of a bug like a forgotten `@attr.s` decorator. + """ + + __slots__ = ( + "counter", + "_default", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "_validator", + "converter", + "type", + "kw_only", + "on_setattr", + "alias", + ) + __attrs_attrs__ = ( + *tuple( + Attribute( + name=name, + alias=_default_init_alias_for(name), + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=True, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ) + for name in ( + "counter", + "_default", + "repr", + "eq", + "order", + "hash", + "init", + "on_setattr", + "alias", + ) + ), + Attribute( + name="metadata", + alias="metadata", + default=None, + validator=None, + repr=True, + cmp=None, + hash=False, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ), + ) + cls_counter = 0 + + def __init__( + self, + default, + validator, + repr, + cmp, + hash, + init, + converter, + metadata, + type, + kw_only, + eq, + eq_key, + order, + order_key, + on_setattr, + alias, + ): + _CountingAttr.cls_counter += 1 + self.counter = _CountingAttr.cls_counter + self._default = default + self._validator = validator + self.converter = converter + self.repr = repr + self.eq = eq + self.eq_key = eq_key + self.order = order + self.order_key = order_key + self.hash = hash + self.init = init + self.metadata = metadata + self.type = type + self.kw_only = kw_only + self.on_setattr = on_setattr + self.alias = alias + + def validator(self, meth): + """ + Decorator that adds *meth* to the list of validators. + + Returns *meth* unchanged. + + .. versionadded:: 17.1.0 + """ + if self._validator is None: + self._validator = meth + else: + self._validator = and_(self._validator, meth) + return meth + + def default(self, meth): + """ + Decorator that allows to set the default for an attribute. + + Returns *meth* unchanged. + + :raises DefaultAlreadySetError: If default has been set before. + + .. versionadded:: 17.1.0 + """ + if self._default is not NOTHING: + raise DefaultAlreadySetError() + + self._default = Factory(meth, takes_self=True) + + return meth + + +_CountingAttr = _add_eq(_add_repr(_CountingAttr)) + + +class Factory: + """ + Stores a factory callable. + + If passed as the default value to `attrs.field`, the factory is used to + generate a new value. + + :param callable factory: A callable that takes either none or exactly one + mandatory positional argument depending on *takes_self*. + :param bool takes_self: Pass the partially initialized instance that is + being initialized as a positional argument. + + .. versionadded:: 17.1.0 *takes_self* + """ + + __slots__ = ("factory", "takes_self") + + def __init__(self, factory, takes_self=False): + self.factory = factory + self.takes_self = takes_self + + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple(getattr(self, name) for name in self.__slots__) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + for name, value in zip(self.__slots__, state): + setattr(self, name, value) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in Factory.__slots__ +] + +Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f) + + +def make_class( + name, attrs, bases=(object,), class_body=None, **attributes_arguments +): + r""" + A quick way to create a new class called *name* with *attrs*. + + :param str name: The name for the new class. + + :param attrs: A list of names or a dictionary of mappings of names to + `attr.ib`\ s / `attrs.field`\ s. + + The order is deduced from the order of the names or attributes inside + *attrs*. Otherwise the order of the definition of the attributes is + used. + :type attrs: `list` or `dict` + + :param tuple bases: Classes that the new class will subclass. + + :param dict class_body: An optional dictionary of class attributes for the new class. + + :param attributes_arguments: Passed unmodified to `attr.s`. + + :return: A new class with *attrs*. + :rtype: type + + .. versionadded:: 17.1.0 *bases* + .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. + .. versionchanged:: 23.2.0 *class_body* + """ + if isinstance(attrs, dict): + cls_dict = attrs + elif isinstance(attrs, (list, tuple)): + cls_dict = {a: attrib() for a in attrs} + else: + msg = "attrs argument must be a dict or a list." + raise TypeError(msg) + + pre_init = cls_dict.pop("__attrs_pre_init__", None) + post_init = cls_dict.pop("__attrs_post_init__", None) + user_init = cls_dict.pop("__init__", None) + + body = {} + if class_body is not None: + body.update(class_body) + if pre_init is not None: + body["__attrs_pre_init__"] = pre_init + if post_init is not None: + body["__attrs_post_init__"] = post_init + if user_init is not None: + body["__init__"] = user_init + + type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body)) + + # For pickling to work, the __module__ variable needs to be set to the + # frame where the class is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + with contextlib.suppress(AttributeError, ValueError): + type_.__module__ = sys._getframe(1).f_globals.get( + "__name__", "__main__" + ) + + # We do it here for proper warnings with meaningful stacklevel. + cmp = attributes_arguments.pop("cmp", None) + ( + attributes_arguments["eq"], + attributes_arguments["order"], + ) = _determine_attrs_eq_order( + cmp, + attributes_arguments.get("eq"), + attributes_arguments.get("order"), + True, + ) + + return _attrs(these=cls_dict, **attributes_arguments)(type_) + + +# These are required by within this module so we define them here and merely +# import into .validators / .converters. + + +@attrs(slots=True, hash=True) +class _AndValidator: + """ + Compose many validators to a single one. + """ + + _validators = attrib() + + def __call__(self, inst, attr, value): + for v in self._validators: + v(inst, attr, value) + + +def and_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators. + + :param callables validators: Arbitrary number of validators. + + .. versionadded:: 17.1.0 + """ + vals = [] + for validator in validators: + vals.extend( + validator._validators + if isinstance(validator, _AndValidator) + else [validator] + ) + + return _AndValidator(tuple(vals)) + + +def pipe(*converters): + """ + A converter that composes multiple converters into one. + + When called on a value, it runs all wrapped converters, returning the + *last* value. + + Type annotations will be inferred from the wrapped converters', if + they have any. + + :param callables converters: Arbitrary number of converters. + + .. versionadded:: 20.1.0 + """ + + def pipe_converter(val): + for converter in converters: + val = converter(val) + + return val + + if not converters: + # If the converter list is empty, pipe_converter is the identity. + A = typing.TypeVar("A") + pipe_converter.__annotations__ = {"val": A, "return": A} + else: + # Get parameter type from first converter. + t = _AnnotationExtractor(converters[0]).get_first_param_type() + if t: + pipe_converter.__annotations__["val"] = t + + # Get return type from last converter. + rt = _AnnotationExtractor(converters[-1]).get_return_type() + if rt: + pipe_converter.__annotations__["return"] = rt + + return pipe_converter diff --git a/env-llmeval/lib/python3.10/site-packages/attr/_version_info.pyi b/env-llmeval/lib/python3.10/site-packages/attr/_version_info.pyi new file mode 100644 index 0000000000000000000000000000000000000000..45ced086337783c4b73b26cd17d2c1c260e24029 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/_version_info.pyi @@ -0,0 +1,9 @@ +class VersionInfo: + @property + def year(self) -> int: ... + @property + def minor(self) -> int: ... + @property + def micro(self) -> int: ... + @property + def releaselevel(self) -> str: ... diff --git a/env-llmeval/lib/python3.10/site-packages/attr/converters.pyi b/env-llmeval/lib/python3.10/site-packages/attr/converters.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5abb49f6d5a8c3447d0f223a308e2278ad027416 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/converters.pyi @@ -0,0 +1,13 @@ +from typing import Callable, TypeVar, overload + +from . import _ConverterType + +_T = TypeVar("_T") + +def pipe(*validators: _ConverterType) -> _ConverterType: ... +def optional(converter: _ConverterType) -> _ConverterType: ... +@overload +def default_if_none(default: _T) -> _ConverterType: ... +@overload +def default_if_none(*, factory: Callable[[], _T]) -> _ConverterType: ... +def to_bool(val: str) -> bool: ... diff --git a/env-llmeval/lib/python3.10/site-packages/attr/exceptions.py b/env-llmeval/lib/python3.10/site-packages/attr/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..3b7abb8154108aa1d0ae52fa9ee8e489f05b5563 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/exceptions.py @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: MIT + +from __future__ import annotations + +from typing import ClassVar + + +class FrozenError(AttributeError): + """ + A frozen/immutable instance or attribute have been attempted to be + modified. + + It mirrors the behavior of ``namedtuples`` by using the same error message + and subclassing `AttributeError`. + + .. versionadded:: 20.1.0 + """ + + msg = "can't set attribute" + args: ClassVar[tuple[str]] = [msg] + + +class FrozenInstanceError(FrozenError): + """ + A frozen instance has been attempted to be modified. + + .. versionadded:: 16.1.0 + """ + + +class FrozenAttributeError(FrozenError): + """ + A frozen attribute has been attempted to be modified. + + .. versionadded:: 20.1.0 + """ + + +class AttrsAttributeNotFoundError(ValueError): + """ + An *attrs* function couldn't find an attribute that the user asked for. + + .. versionadded:: 16.2.0 + """ + + +class NotAnAttrsClassError(ValueError): + """ + A non-*attrs* class has been passed into an *attrs* function. + + .. versionadded:: 16.2.0 + """ + + +class DefaultAlreadySetError(RuntimeError): + """ + A default has been set when defining the field and is attempted to be reset + using the decorator. + + .. versionadded:: 17.1.0 + """ + + +class UnannotatedAttributeError(RuntimeError): + """ + A class with ``auto_attribs=True`` has a field without a type annotation. + + .. versionadded:: 17.3.0 + """ + + +class PythonTooOldError(RuntimeError): + """ + It was attempted to use an *attrs* feature that requires a newer Python + version. + + .. versionadded:: 18.2.0 + """ + + +class NotCallableError(TypeError): + """ + A field requiring a callable has been set with a value that is not + callable. + + .. versionadded:: 19.2.0 + """ + + def __init__(self, msg, value): + super(TypeError, self).__init__(msg, value) + self.msg = msg + self.value = value + + def __str__(self): + return str(self.msg) diff --git a/env-llmeval/lib/python3.10/site-packages/attr/exceptions.pyi b/env-llmeval/lib/python3.10/site-packages/attr/exceptions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f2680118b404db8f5227d04d27e8439331341c4d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/exceptions.pyi @@ -0,0 +1,17 @@ +from typing import Any + +class FrozenError(AttributeError): + msg: str = ... + +class FrozenInstanceError(FrozenError): ... +class FrozenAttributeError(FrozenError): ... +class AttrsAttributeNotFoundError(ValueError): ... +class NotAnAttrsClassError(ValueError): ... +class DefaultAlreadySetError(RuntimeError): ... +class UnannotatedAttributeError(RuntimeError): ... +class PythonTooOldError(RuntimeError): ... + +class NotCallableError(TypeError): + msg: str = ... + value: Any = ... + def __init__(self, msg: str, value: Any) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/attr/filters.pyi b/env-llmeval/lib/python3.10/site-packages/attr/filters.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8a02fa0fc0631dde0b4501c8d1c168b467c0d1a9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/filters.pyi @@ -0,0 +1,6 @@ +from typing import Any, Union + +from . import Attribute, _FilterType + +def include(*what: Union[type, str, Attribute[Any]]) -> _FilterType[Any]: ... +def exclude(*what: Union[type, str, Attribute[Any]]) -> _FilterType[Any]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/attr/py.typed b/env-llmeval/lib/python3.10/site-packages/attr/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/attr/setters.py b/env-llmeval/lib/python3.10/site-packages/attr/setters.py new file mode 100644 index 0000000000000000000000000000000000000000..12ed6750df35b96e2ccde24a9752dca22929188d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/setters.py @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly used hooks for on_setattr. +""" + + +from . import _config +from .exceptions import FrozenAttributeError + + +def pipe(*setters): + """ + Run all *setters* and return the return value of the last one. + + .. versionadded:: 20.1.0 + """ + + def wrapped_pipe(instance, attrib, new_value): + rv = new_value + + for setter in setters: + rv = setter(instance, attrib, rv) + + return rv + + return wrapped_pipe + + +def frozen(_, __, ___): + """ + Prevent an attribute to be modified. + + .. versionadded:: 20.1.0 + """ + raise FrozenAttributeError() + + +def validate(instance, attrib, new_value): + """ + Run *attrib*'s validator on *new_value* if it has one. + + .. versionadded:: 20.1.0 + """ + if _config._run_validators is False: + return new_value + + v = attrib.validator + if not v: + return new_value + + v(instance, attrib, new_value) + + return new_value + + +def convert(instance, attrib, new_value): + """ + Run *attrib*'s converter -- if it has one -- on *new_value* and return the + result. + + .. versionadded:: 20.1.0 + """ + c = attrib.converter + if c: + return c(new_value) + + return new_value + + +# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes. +# autodata stopped working, so the docstring is inlined in the API docs. +NO_OP = object() diff --git a/env-llmeval/lib/python3.10/site-packages/attr/validators.pyi b/env-llmeval/lib/python3.10/site-packages/attr/validators.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d194a75abcacfa434f2445e66ea25975236dffcf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/attr/validators.pyi @@ -0,0 +1,88 @@ +from typing import ( + Any, + AnyStr, + Callable, + Container, + ContextManager, + Iterable, + List, + Mapping, + Match, + Optional, + Pattern, + Tuple, + Type, + TypeVar, + Union, + overload, +) + +from . import _ValidatorType +from . import _ValidatorArgType + +_T = TypeVar("_T") +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_I = TypeVar("_I", bound=Iterable) +_K = TypeVar("_K") +_V = TypeVar("_V") +_M = TypeVar("_M", bound=Mapping) + +def set_disabled(run: bool) -> None: ... +def get_disabled() -> bool: ... +def disabled() -> ContextManager[None]: ... + +# To be more precise on instance_of use some overloads. +# If there are more than 3 items in the tuple then we fall back to Any +@overload +def instance_of(type: Type[_T]) -> _ValidatorType[_T]: ... +@overload +def instance_of(type: Tuple[Type[_T]]) -> _ValidatorType[_T]: ... +@overload +def instance_of( + type: Tuple[Type[_T1], Type[_T2]] +) -> _ValidatorType[Union[_T1, _T2]]: ... +@overload +def instance_of( + type: Tuple[Type[_T1], Type[_T2], Type[_T3]] +) -> _ValidatorType[Union[_T1, _T2, _T3]]: ... +@overload +def instance_of(type: Tuple[type, ...]) -> _ValidatorType[Any]: ... +def provides(interface: Any) -> _ValidatorType[Any]: ... +def optional( + validator: Union[ + _ValidatorType[_T], List[_ValidatorType[_T]], Tuple[_ValidatorType[_T]] + ] +) -> _ValidatorType[Optional[_T]]: ... +def in_(options: Container[_T]) -> _ValidatorType[_T]: ... +def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... +def matches_re( + regex: Union[Pattern[AnyStr], AnyStr], + flags: int = ..., + func: Optional[ + Callable[[AnyStr, AnyStr, int], Optional[Match[AnyStr]]] + ] = ..., +) -> _ValidatorType[AnyStr]: ... +def deep_iterable( + member_validator: _ValidatorArgType[_T], + iterable_validator: Optional[_ValidatorType[_I]] = ..., +) -> _ValidatorType[_I]: ... +def deep_mapping( + key_validator: _ValidatorType[_K], + value_validator: _ValidatorType[_V], + mapping_validator: Optional[_ValidatorType[_M]] = ..., +) -> _ValidatorType[_M]: ... +def is_callable() -> _ValidatorType[_T]: ... +def lt(val: _T) -> _ValidatorType[_T]: ... +def le(val: _T) -> _ValidatorType[_T]: ... +def ge(val: _T) -> _ValidatorType[_T]: ... +def gt(val: _T) -> _ValidatorType[_T]: ... +def max_len(length: int) -> _ValidatorType[_T]: ... +def min_len(length: int) -> _ValidatorType[_T]: ... +def not_( + validator: _ValidatorType[_T], + *, + msg: Optional[str] = None, + exc_types: Union[Type[Exception], Iterable[Type[Exception]]] = ..., +) -> _ValidatorType[_T]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__init__.py b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0bb6c2d0a1556d8119d3b400a9adcff73b34e6e5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ruff: noqa: F401 +"""Contains helpers to serialize tensors.""" + +from ._base import StateDictSplit, split_state_dict_into_shards_factory +from ._numpy import split_numpy_state_dict_into_shards +from ._tensorflow import split_tf_state_dict_into_shards +from ._torch import split_torch_state_dict_into_shards diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da9f58d3f06452df74ac78cb35f88aaf4b660dc3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b07529125d647e315d6e1d7f5627beed1859370 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_numpy.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_numpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fa9b1998da6655369a963d3a3b88abe4666423e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_numpy.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_tensorflow.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_tensorflow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ed16b273f5c7254e0a4f3510193330f891cb768 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_tensorflow.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_torch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_torch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6a4a0a4948f46419ddf90ca216ec0bb39941b12 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_torch.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/_base.py b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..a7f7bba89263d31d5facb1ebed66c5f701dba973 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/_base.py @@ -0,0 +1,169 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains helpers to split tensors into shards.""" + +from dataclasses import dataclass, field +from typing import Any, Callable, Dict, List, Optional, TypeVar + +from .. import logging + + +TensorT = TypeVar("TensorT") +TensorSizeFn_T = Callable[[TensorT], int] +StorageIDFn_T = Callable[[TensorT], Optional[Any]] + +MAX_SHARD_SIZE = 5_000_000_000 # 5GB +FILENAME_PATTERN = "model{suffix}.safetensors" + +logger = logging.get_logger(__file__) + + +@dataclass +class StateDictSplit: + is_sharded: bool = field(init=False) + metadata: Dict[str, Any] + filename_to_tensors: Dict[str, List[str]] + tensor_to_filename: Dict[str, str] + + def __post_init__(self): + self.is_sharded = len(self.filename_to_tensors) > 1 + + +def split_state_dict_into_shards_factory( + state_dict: Dict[str, TensorT], + *, + get_tensor_size: TensorSizeFn_T, + get_storage_id: StorageIDFn_T = lambda tensor: None, + filename_pattern: str = FILENAME_PATTERN, + max_shard_size: int = MAX_SHARD_SIZE, +) -> StateDictSplit: + """ + Split a model state dictionary in shards so that each shard is smaller than a given size. + + The shards are determined by iterating through the `state_dict` in the order of its keys. There is no optimization + made to make each shard as close as possible to the maximum size passed. For example, if the limit is 10GB and we + have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not + [6+2+2GB], [6+2GB], [6GB]. + + + + If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a + size greater than `max_shard_size`. + + + + Args: + state_dict (`Dict[str, Tensor]`): + The state dictionary to save. + get_tensor_size (`Callable[[Tensor], int]`): + A function that returns the size of a tensor in bytes. + get_storage_id (`Callable[[Tensor], Optional[Any]]`, *optional*): + A function that returns a unique identifier to a tensor storage. Multiple different tensors can share the + same underlying storage. This identifier is guaranteed to be unique and constant for this tensor's storage + during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id. + filename_pattern (`str`, *optional*): + The pattern to generate the files names in which the model will be saved. Pattern must be a string that + can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix` + Defaults to `"model{suffix}.safetensors"`. + max_shard_size (`int` or `str`, *optional*): + The maximum size of each shard, in bytes. Defaults to 5GB. + + Returns: + [`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them. + """ + storage_id_to_tensors: Dict[Any, List[str]] = {} + + shard_list: List[Dict[str, TensorT]] = [] + current_shard: Dict[str, TensorT] = {} + current_shard_size = 0 + total_size = 0 + + for key, tensor in state_dict.items(): + # when bnb serialization is used the weights in the state dict can be strings + # check: https://github.com/huggingface/transformers/pull/24416 for more details + if isinstance(tensor, str): + logger.info("Skipping tensor %s as it is a string (bnb serialization)", key) + continue + + # If a `tensor` shares the same underlying storage as another tensor, we put `tensor` in the same `block` + storage_id = get_storage_id(tensor) + if storage_id is not None: + if storage_id in storage_id_to_tensors: + # We skip this tensor for now and will reassign to correct shard later + storage_id_to_tensors[storage_id].append(key) + continue + else: + # This is the first tensor with this storage_id, we create a new entry + # in the storage_id_to_tensors dict => we will assign the shard id later + storage_id_to_tensors[storage_id] = [key] + + # Compute tensor size + tensor_size = get_tensor_size(tensor) + + # If this tensor is bigger than the maximal size, we put it in its own shard + if tensor_size > max_shard_size: + total_size += tensor_size + shard_list.append({key: tensor}) + continue + + # If this tensor is going to tip up over the maximal size, we split. + # Current shard already has some tensors, we add it to the list of shards and create a new one. + if current_shard_size + tensor_size > max_shard_size: + shard_list.append(current_shard) + current_shard = {} + current_shard_size = 0 + + # Add the tensor to the current shard + current_shard[key] = tensor + current_shard_size += tensor_size + total_size += tensor_size + + # Add the last shard + if len(current_shard) > 0: + shard_list.append(current_shard) + nb_shards = len(shard_list) + + # Loop over the tensors that share the same storage and assign them together + for storage_id, keys in storage_id_to_tensors.items(): + # Let's try to find the shard where the first tensor of this storage is and put all tensors in the same shard + for shard in shard_list: + if keys[0] in shard: + for key in keys: + shard[key] = state_dict[key] + break + + # If we only have one shard, we return it => no need to build the index + if nb_shards == 1: + filename = filename_pattern.format(suffix="") + return StateDictSplit( + metadata={"total_size": total_size}, + filename_to_tensors={filename: list(state_dict.keys())}, + tensor_to_filename={key: filename for key in state_dict.keys()}, + ) + + # Now that each tensor is assigned to a shard, let's assign a filename to each shard + tensor_name_to_filename = {} + filename_to_tensors = {} + for idx, shard in enumerate(shard_list): + filename = filename_pattern.format(suffix=f"-{idx+1:05d}-of-{nb_shards:05d}") + for key in shard: + tensor_name_to_filename[key] = filename + filename_to_tensors[filename] = list(shard.keys()) + + # Build the index and return + return StateDictSplit( + metadata={"total_size": total_size}, + filename_to_tensors=filename_to_tensors, + tensor_to_filename=tensor_name_to_filename, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/_numpy.py b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/_numpy.py new file mode 100644 index 0000000000000000000000000000000000000000..214c77d9acde2a14069f403ed337e6c8c57047ad --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/_numpy.py @@ -0,0 +1,68 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains numpy-specific helpers.""" + +from typing import TYPE_CHECKING, Dict + +from ._base import FILENAME_PATTERN, MAX_SHARD_SIZE, StateDictSplit, split_state_dict_into_shards_factory + + +if TYPE_CHECKING: + import numpy as np + + +def split_numpy_state_dict_into_shards( + state_dict: Dict[str, "np.ndarray"], + *, + filename_pattern: str = FILENAME_PATTERN, + max_shard_size: int = MAX_SHARD_SIZE, +) -> StateDictSplit: + """ + Split a model state dictionary in shards so that each shard is smaller than a given size. + + The shards are determined by iterating through the `state_dict` in the order of its keys. There is no optimization + made to make each shard as close as possible to the maximum size passed. For example, if the limit is 10GB and we + have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not + [6+2+2GB], [6+2GB], [6GB]. + + + + If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a + size greater than `max_shard_size`. + + + + Args: + state_dict (`Dict[str, np.ndarray]`): + The state dictionary to save. + filename_pattern (`str`, *optional*): + The pattern to generate the files names in which the model will be saved. Pattern must be a string that + can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix` + Defaults to `"model{suffix}.safetensors"`. + max_shard_size (`int` or `str`, *optional*): + The maximum size of each shard, in bytes. Defaults to 5GB. + + Returns: + [`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them. + """ + return split_state_dict_into_shards_factory( + state_dict, + max_shard_size=max_shard_size, + filename_pattern=filename_pattern, + get_tensor_size=get_tensor_size, + ) + + +def get_tensor_size(tensor: "np.ndarray") -> int: + return tensor.nbytes diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/_tensorflow.py b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/_tensorflow.py new file mode 100644 index 0000000000000000000000000000000000000000..f8d752c083063d3e9772b69982e8f979fbda53ea --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/_tensorflow.py @@ -0,0 +1,94 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains tensorflow-specific helpers.""" + +import math +import re +from typing import TYPE_CHECKING, Dict + +from ._base import MAX_SHARD_SIZE, StateDictSplit, split_state_dict_into_shards_factory + + +if TYPE_CHECKING: + import tensorflow as tf + + +def split_tf_state_dict_into_shards( + state_dict: Dict[str, "tf.Tensor"], + *, + filename_pattern: str = "tf_model{suffix}.h5", + max_shard_size: int = MAX_SHARD_SIZE, +) -> StateDictSplit: + """ + Split a model state dictionary in shards so that each shard is smaller than a given size. + + The shards are determined by iterating through the `state_dict` in the order of its keys. There is no optimization + made to make each shard as close as possible to the maximum size passed. For example, if the limit is 10GB and we + have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not + [6+2+2GB], [6+2GB], [6GB]. + + + + If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a + size greater than `max_shard_size`. + + + + Args: + state_dict (`Dict[str, Tensor]`): + The state dictionary to save. + filename_pattern (`str`, *optional*): + The pattern to generate the files names in which the model will be saved. Pattern must be a string that + can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix` + Defaults to `"tf_model{suffix}.h5"`. + max_shard_size (`int` or `str`, *optional*): + The maximum size of each shard, in bytes. Defaults to 5GB. + + Returns: + [`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them. + """ + return split_state_dict_into_shards_factory( + state_dict, + max_shard_size=max_shard_size, + filename_pattern=filename_pattern, + get_tensor_size=get_tensor_size, + ) + + +def get_tensor_size(tensor: "tf.Tensor") -> int: + # Return `math.ceil` since dtype byte size can be a float (e.g., 0.125 for tf.bool). + # Better to overestimate than underestimate. + return math.ceil(tensor.numpy().size * _dtype_byte_size_tf(tensor.dtype)) + + +def _dtype_byte_size_tf(dtype) -> float: + """ + Returns the size (in bytes) occupied by one parameter of type `dtype`. + Taken from https://github.com/huggingface/transformers/blob/74d9d0cebb0263a3f8ab9c280569170cc74651d0/src/transformers/modeling_tf_utils.py#L608. + NOTE: why not `tensor.numpy().nbytes`? + Example: + ```py + >>> _dtype_byte_size(tf.float32) + 4 + ``` + """ + import tensorflow as tf + + if dtype == tf.bool: + return 1 / 8 + bit_search = re.search(r"[^\d](\d+)$", dtype.name) + if bit_search is None: + raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") + bit_size = int(bit_search.groups()[0]) + return bit_size // 8 diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/_torch.py b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..00ab7e2c80d7a8fde928588c284213fca2100cf3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/serialization/_torch.py @@ -0,0 +1,200 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains pytorch-specific helpers.""" + +import importlib +from functools import lru_cache +from typing import TYPE_CHECKING, Dict, Tuple + +from ._base import FILENAME_PATTERN, MAX_SHARD_SIZE, StateDictSplit, split_state_dict_into_shards_factory + + +if TYPE_CHECKING: + import torch + + +def split_torch_state_dict_into_shards( + state_dict: Dict[str, "torch.Tensor"], + *, + filename_pattern: str = FILENAME_PATTERN, + max_shard_size: int = MAX_SHARD_SIZE, +) -> StateDictSplit: + """ + Split a model state dictionary in shards so that each shard is smaller than a given size. + + The shards are determined by iterating through the `state_dict` in the order of its keys. There is no optimization + made to make each shard as close as possible to the maximum size passed. For example, if the limit is 10GB and we + have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not + [6+2+2GB], [6+2GB], [6GB]. + + + + If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a + size greater than `max_shard_size`. + + + + Args: + state_dict (`Dict[str, torch.Tensor]`): + The state dictionary to save. + filename_pattern (`str`, *optional*): + The pattern to generate the files names in which the model will be saved. Pattern must be a string that + can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix` + Defaults to `"model{suffix}.safetensors"`. + max_shard_size (`int` or `str`, *optional*): + The maximum size of each shard, in bytes. Defaults to 5GB. + + Returns: + [`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them. + + Example: + ```py + >>> import json + >>> import os + >>> from safetensors.torch import save_file as safe_save_file + >>> from huggingface_hub import split_torch_state_dict_into_shards + + >>> def save_state_dict(state_dict: Dict[str, torch.Tensor], save_directory: str): + ... state_dict_split = split_torch_state_dict_into_shards(state_dict) + ... for filename, tensors in state_dict_split.filename_to_tensors.values(): + ... shard = {tensor: state_dict[tensor] for tensor in tensors} + ... safe_save_file( + ... shard, + ... os.path.join(save_directory, filename), + ... metadata={"format": "pt"}, + ... ) + ... if state_dict_split.is_sharded: + ... index = { + ... "metadata": state_dict_split.metadata, + ... "weight_map": state_dict_split.tensor_to_filename, + ... } + ... with open(os.path.join(save_directory, "model.safetensors.index.json"), "w") as f: + ... f.write(json.dumps(index, indent=2)) + ``` + """ + return split_state_dict_into_shards_factory( + state_dict, + max_shard_size=max_shard_size, + filename_pattern=filename_pattern, + get_tensor_size=get_tensor_size, + get_storage_id=get_storage_id, + ) + + +def get_storage_id(tensor: "torch.Tensor") -> Tuple["torch.device", int, int]: + """ + Return unique identifier to a tensor storage. + + Multiple different tensors can share the same underlying storage. For + example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is + guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with + non-overlapping lifetimes may have the same id. + + Taken from https://github.com/huggingface/transformers/blob/1ecf5f7c982d761b4daaa96719d162c324187c64/src/transformers/pytorch_utils.py#L278. + """ + if tensor.device.type == "xla" and is_torch_tpu_available(): + # NOTE: xla tensors dont have storage + # use some other unique id to distinguish. + # this is a XLA tensor, it must be created using torch_xla's + # device. So the following import is safe: + import torch_xla + + unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor) + else: + unique_id = storage_ptr(tensor) + + return tensor.device, unique_id, get_storage_size(tensor) + + +def get_tensor_size(tensor: "torch.Tensor") -> int: + return tensor.numel() * tensor.element_size() + + +@lru_cache() +def is_torch_tpu_available(check_device=True): + """ + Checks if `torch_xla` is installed and potentially if a TPU is in the environment + + Taken from https://github.com/huggingface/transformers/blob/1ecf5f7c982d761b4daaa96719d162c324187c64/src/transformers/utils/import_utils.py#L463. + """ + if importlib.util.find_spec("torch_xla") is not None: + if check_device: + # We need to check if `xla_device` can be found, will raise a RuntimeError if not + try: + import torch_xla.core.xla_model as xm + + _ = xm.xla_device() + return True + except RuntimeError: + return False + return True + return False + + +def storage_ptr(tensor: "torch.Tensor") -> int: + """ + Taken from https://github.com/huggingface/safetensors/blob/08db34094e9e59e2f9218f2df133b7b4aaff5a99/bindings/python/py_src/safetensors/torch.py#L11C1-L20C21. + """ + try: + return tensor.untyped_storage().data_ptr() + except Exception: + # Fallback for torch==1.10 + try: + return tensor.storage().data_ptr() + except NotImplementedError: + # Fallback for meta storage + return 0 + + +def get_storage_size(tensor: "torch.Tensor") -> int: + """ + Taken from https://github.com/huggingface/safetensors/blob/08db34094e9e59e2f9218f2df133b7b4aaff5a99/bindings/python/py_src/safetensors/torch.py#L31C1-L41C59 + """ + try: + return tensor.untyped_storage().nbytes() + except AttributeError: + # Fallback for torch==1.10 + try: + return tensor.storage().size() * _get_dtype_size(tensor.dtype) + except NotImplementedError: + # Fallback for meta storage + # On torch >=2.0 this is the tensor size + return tensor.nelement() * _get_dtype_size(tensor.dtype) + + +@lru_cache() +def _get_dtype_size(dtype: "torch.dtype") -> int: + """ + Taken from https://github.com/huggingface/safetensors/blob/08db34094e9e59e2f9218f2df133b7b4aaff5a99/bindings/python/py_src/safetensors/torch.py#L344 + """ + import torch + + # torch.float8 formats require 2.1; we do not support these dtypes on earlier versions + _float8_e4m3fn = getattr(torch, "float8_e4m3fn", None) + _float8_e5m2 = getattr(torch, "float8_e5m2", None) + _SIZE = { + torch.int64: 8, + torch.float32: 4, + torch.int32: 4, + torch.bfloat16: 2, + torch.float16: 2, + torch.int16: 2, + torch.uint8: 1, + torch.int8: 1, + torch.bool: 1, + torch.float64: 8, + _float8_e4m3fn: 1, + _float8_e5m2: 1, + } + return _SIZE[dtype] diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_cache_manager.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_cache_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbd8bda38ea322c9cd51cb9989d3dc1548c170ae Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_cache_manager.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_experimental.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_experimental.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce47a1928bfc40a356e5e82d78bc4c56e9bd520e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_experimental.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_hf_folder.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_hf_folder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10567ae3c42f23ae8bfef411bcff56ad08912eaf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_hf_folder.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_paths.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_paths.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f448abcd4d2360ad9168d9d8f378db738a817576 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_paths.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_runtime.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_runtime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..accd51df1f43a63a3ea182b431ea49a8ef17dc22 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_runtime.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_safetensors.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_safetensors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6ac665814421bf3ecef2015a362a3895fdfe868 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_safetensors.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_subprocess.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_subprocess.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e03cbbe1a679c6c1c538581cb4214511bfaf7a19 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_subprocess.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_telemetry.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_telemetry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5399ffbf28ab2ae4c0d383de7cc59cfa91a877a5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_telemetry.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_typing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..387d7773e98f6520b608fa6d2501b7fd36f44bfb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_typing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/sha.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/sha.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0686100dcaa0229dbed8be42e6df99560e15f5da Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/sha.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/tqdm.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/tqdm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79f5c48a290128b7658ef6cc433bf2cbca6ec7e6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/tqdm.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py new file mode 100644 index 0000000000000000000000000000000000000000..c359122f97125ed630760029f7fd0689f1caefd3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py @@ -0,0 +1,26 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +__all__ = [ + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", +] + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "21.2" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD-2-Clause or Apache-2.0" +__copyright__ = "2014-2019 %s" % __author__ diff --git a/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__init__.py b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3c50c5dcfeeda2efed282200a5c5cc8c5f7542f7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__init__.py @@ -0,0 +1,25 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from .__about__ import ( + __author__, + __copyright__, + __email__, + __license__, + __summary__, + __title__, + __uri__, + __version__, +) + +__all__ = [ + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e364ea4ea9a311cf1bbf3963bc7ba4a4df130c38 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffa8d7eb1e5a424eede2fa2f5927e70a63425448 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad696000aca6cdd0ca6bbf29f97a760db83b2afb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_manylinux.py b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_manylinux.py new file mode 100644 index 0000000000000000000000000000000000000000..4c379aa6f69ff56c8f19612002c6e3e939ea6012 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_manylinux.py @@ -0,0 +1,301 @@ +import collections +import functools +import os +import re +import struct +import sys +import warnings +from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple + + +# Python does not provide platform information at sufficient granularity to +# identify the architecture of the running executable in some cases, so we +# determine it dynamically by reading the information from the running +# process. This only applies on Linux, which uses the ELF format. +class _ELFFileHeader: + # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header + class _InvalidELFFileHeader(ValueError): + """ + An invalid ELF file header was found. + """ + + ELF_MAGIC_NUMBER = 0x7F454C46 + ELFCLASS32 = 1 + ELFCLASS64 = 2 + ELFDATA2LSB = 1 + ELFDATA2MSB = 2 + EM_386 = 3 + EM_S390 = 22 + EM_ARM = 40 + EM_X86_64 = 62 + EF_ARM_ABIMASK = 0xFF000000 + EF_ARM_ABI_VER5 = 0x05000000 + EF_ARM_ABI_FLOAT_HARD = 0x00000400 + + def __init__(self, file: IO[bytes]) -> None: + def unpack(fmt: str) -> int: + try: + data = file.read(struct.calcsize(fmt)) + result: Tuple[int, ...] = struct.unpack(fmt, data) + except struct.error: + raise _ELFFileHeader._InvalidELFFileHeader() + return result[0] + + self.e_ident_magic = unpack(">I") + if self.e_ident_magic != self.ELF_MAGIC_NUMBER: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_class = unpack("B") + if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_data = unpack("B") + if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_version = unpack("B") + self.e_ident_osabi = unpack("B") + self.e_ident_abiversion = unpack("B") + self.e_ident_pad = file.read(7) + format_h = "H" + format_i = "I" + format_q = "Q" + format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q + self.e_type = unpack(format_h) + self.e_machine = unpack(format_h) + self.e_version = unpack(format_i) + self.e_entry = unpack(format_p) + self.e_phoff = unpack(format_p) + self.e_shoff = unpack(format_p) + self.e_flags = unpack(format_i) + self.e_ehsize = unpack(format_h) + self.e_phentsize = unpack(format_h) + self.e_phnum = unpack(format_h) + self.e_shentsize = unpack(format_h) + self.e_shnum = unpack(format_h) + self.e_shstrndx = unpack(format_h) + + +def _get_elf_header() -> Optional[_ELFFileHeader]: + try: + with open(sys.executable, "rb") as f: + elf_header = _ELFFileHeader(f) + except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): + return None + return elf_header + + +def _is_linux_armhf() -> bool: + # hard-float ABI can be detected from the ELF header of the running + # process + # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf + elf_header = _get_elf_header() + if elf_header is None: + return False + result = elf_header.e_ident_class == elf_header.ELFCLASS32 + result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB + result &= elf_header.e_machine == elf_header.EM_ARM + result &= ( + elf_header.e_flags & elf_header.EF_ARM_ABIMASK + ) == elf_header.EF_ARM_ABI_VER5 + result &= ( + elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD + ) == elf_header.EF_ARM_ABI_FLOAT_HARD + return result + + +def _is_linux_i686() -> bool: + elf_header = _get_elf_header() + if elf_header is None: + return False + result = elf_header.e_ident_class == elf_header.ELFCLASS32 + result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB + result &= elf_header.e_machine == elf_header.EM_386 + return result + + +def _have_compatible_abi(arch: str) -> bool: + if arch == "armv7l": + return _is_linux_armhf() + if arch == "i686": + return _is_linux_i686() + return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} + + +# If glibc ever changes its major version, we need to know what the last +# minor version was, so we can build the complete list of all versions. +# For now, guess what the highest minor version might be, assume it will +# be 50 for testing. Once this actually happens, update the dictionary +# with the actual value. +_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50) + + +class _GLibCVersion(NamedTuple): + major: int + minor: int + + +def _glibc_version_string_confstr() -> Optional[str]: + """ + Primary implementation of glibc_version_string using os.confstr. + """ + # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely + # to be broken or missing. This strategy is used in the standard library + # platform module. + # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 + try: + # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". + version_string = os.confstr("CS_GNU_LIBC_VERSION") + assert version_string is not None + _, version = version_string.split() + except (AssertionError, AttributeError, OSError, ValueError): + # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... + return None + return version + + +def _glibc_version_string_ctypes() -> Optional[str]: + """ + Fallback implementation of glibc_version_string using ctypes. + """ + try: + import ctypes + except ImportError: + return None + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + # + # We must also handle the special case where the executable is not a + # dynamically linked executable. This can occur when using musl libc, + # for example. In this situation, dlopen() will error, leading to an + # OSError. Interestingly, at least in the case of musl, there is no + # errno set on the OSError. The single string argument used to construct + # OSError comes from libc itself and is therefore not portable to + # hard code here. In any case, failure to call dlopen() means we + # can proceed, so we bail on our attempt. + try: + process_namespace = ctypes.CDLL(None) + except OSError: + return None + + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str: str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +def _glibc_version_string() -> Optional[str]: + """Returns glibc version string, or None if not using glibc.""" + return _glibc_version_string_confstr() or _glibc_version_string_ctypes() + + +def _parse_glibc_version(version_str: str) -> Tuple[int, int]: + """Parse glibc version. + + We use a regexp instead of str.split because we want to discard any + random junk that might come after the minor version -- this might happen + in patched/forked versions of glibc (e.g. Linaro's version of glibc + uses version strings like "2.20-2014.11"). See gh-3588. + """ + m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) + if not m: + warnings.warn( + "Expected glibc version with 2 components major.minor," + " got: %s" % version_str, + RuntimeWarning, + ) + return -1, -1 + return int(m.group("major")), int(m.group("minor")) + + +@functools.lru_cache() +def _get_glibc_version() -> Tuple[int, int]: + version_str = _glibc_version_string() + if version_str is None: + return (-1, -1) + return _parse_glibc_version(version_str) + + +# From PEP 513, PEP 600 +def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: + sys_glibc = _get_glibc_version() + if sys_glibc < version: + return False + # Check for presence of _manylinux module. + try: + import _manylinux # noqa + except ImportError: + return True + if hasattr(_manylinux, "manylinux_compatible"): + result = _manylinux.manylinux_compatible(version[0], version[1], arch) + if result is not None: + return bool(result) + return True + if version == _GLibCVersion(2, 5): + if hasattr(_manylinux, "manylinux1_compatible"): + return bool(_manylinux.manylinux1_compatible) + if version == _GLibCVersion(2, 12): + if hasattr(_manylinux, "manylinux2010_compatible"): + return bool(_manylinux.manylinux2010_compatible) + if version == _GLibCVersion(2, 17): + if hasattr(_manylinux, "manylinux2014_compatible"): + return bool(_manylinux.manylinux2014_compatible) + return True + + +_LEGACY_MANYLINUX_MAP = { + # CentOS 7 w/ glibc 2.17 (PEP 599) + (2, 17): "manylinux2014", + # CentOS 6 w/ glibc 2.12 (PEP 571) + (2, 12): "manylinux2010", + # CentOS 5 w/ glibc 2.5 (PEP 513) + (2, 5): "manylinux1", +} + + +def platform_tags(linux: str, arch: str) -> Iterator[str]: + if not _have_compatible_abi(arch): + return + # Oldest glibc to be supported regardless of architecture is (2, 17). + too_old_glibc2 = _GLibCVersion(2, 16) + if arch in {"x86_64", "i686"}: + # On x86/i686 also oldest glibc to be supported is (2, 5). + too_old_glibc2 = _GLibCVersion(2, 4) + current_glibc = _GLibCVersion(*_get_glibc_version()) + glibc_max_list = [current_glibc] + # We can assume compatibility across glibc major versions. + # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 + # + # Build a list of maximum glibc versions so that we can + # output the canonical list of all glibc from current_glibc + # down to too_old_glibc2, including all intermediary versions. + for glibc_major in range(current_glibc.major - 1, 1, -1): + glibc_minor = _LAST_GLIBC_MINOR[glibc_major] + glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) + for glibc_max in glibc_max_list: + if glibc_max.major == too_old_glibc2.major: + min_minor = too_old_glibc2.minor + else: + # For other glibc major versions oldest supported is (x, 0). + min_minor = -1 + for glibc_minor in range(glibc_max.minor, min_minor, -1): + glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) + tag = "manylinux_{}_{}".format(*glibc_version) + if _is_compatible(tag, arch, glibc_version): + yield linux.replace("linux", tag) + # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. + if glibc_version in _LEGACY_MANYLINUX_MAP: + legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] + if _is_compatible(legacy_tag, arch, glibc_version): + yield linux.replace("linux", legacy_tag) diff --git a/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py new file mode 100644 index 0000000000000000000000000000000000000000..951549753afa255148c7c60d868303963f8c1813 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py @@ -0,0 +1,67 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> "NegativeInfinityType": + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() diff --git a/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/markers.py b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/markers.py new file mode 100644 index 0000000000000000000000000000000000000000..18769b09a8a34f1e7d63cc61e62cd128ff5f9484 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/markers.py @@ -0,0 +1,304 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import operator +import os +import platform +import sys +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from pkg_resources.extern.pyparsing import ( # noqa: N817 + Forward, + Group, + Literal as L, + ParseException, + ParseResults, + QuotedString, + ZeroOrMore, + stringEnd, + stringStart, +) + +from .specifiers import InvalidSpecifier, Specifier + +__all__ = [ + "InvalidMarker", + "UndefinedComparison", + "UndefinedEnvironmentName", + "Marker", + "default_environment", +] + +Operator = Callable[[str, str], bool] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +class Node: + def __init__(self, value: Any) -> None: + self.value = value + + def __str__(self) -> str: + return str(self.value) + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" + + def serialize(self) -> str: + raise NotImplementedError + + +class Variable(Node): + def serialize(self) -> str: + return str(self) + + +class Value(Node): + def serialize(self) -> str: + return f'"{self}"' + + +class Op(Node): + def serialize(self) -> str: + return str(self) + + +VARIABLE = ( + L("implementation_version") + | L("platform_python_implementation") + | L("implementation_name") + | L("python_full_version") + | L("platform_release") + | L("platform_version") + | L("platform_machine") + | L("platform_system") + | L("python_version") + | L("sys_platform") + | L("os_name") + | L("os.name") # PEP-345 + | L("sys.platform") # PEP-345 + | L("platform.version") # PEP-345 + | L("platform.machine") # PEP-345 + | L("platform.python_implementation") # PEP-345 + | L("python_implementation") # undocumented setuptools legacy + | L("extra") # PEP-508 +) +ALIASES = { + "os.name": "os_name", + "sys.platform": "sys_platform", + "platform.version": "platform_version", + "platform.machine": "platform_machine", + "platform.python_implementation": "platform_python_implementation", + "python_implementation": "platform_python_implementation", +} +VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) + +VERSION_CMP = ( + L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") +) + +MARKER_OP = VERSION_CMP | L("not in") | L("in") +MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) + +MARKER_VALUE = QuotedString("'") | QuotedString('"') +MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) + +BOOLOP = L("and") | L("or") + +MARKER_VAR = VARIABLE | MARKER_VALUE + +MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) +MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) + +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() + +MARKER_EXPR = Forward() +MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) +MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) + +MARKER = stringStart + MARKER_EXPR + stringEnd + + +def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]: + if isinstance(results, ParseResults): + return [_coerce_parse_result(i) for i in results] + else: + return results + + +def _format_marker( + marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True +) -> str: + + assert isinstance(marker, (list, tuple, str)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if ( + isinstance(marker, list) + and len(marker) == 1 + and isinstance(marker[0], (list, tuple)) + ): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators: Dict[str, Operator] = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs: str, op: Op, rhs: str) -> bool: + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs) + + oper: Optional[Operator] = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") + + return oper(lhs, rhs) + + +class Undefined: + pass + + +_undefined = Undefined() + + +def _get_env(environment: Dict[str, str], name: str) -> str: + value: Union[str, Undefined] = environment.get(name, _undefined) + + if isinstance(value, Undefined): + raise UndefinedEnvironmentName( + f"{name!r} does not exist in evaluation environment." + ) + + return value + + +def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool: + groups: List[List[bool]] = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, str)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + lhs_value = _get_env(environment, lhs.value) + rhs_value = rhs.value + else: + lhs_value = lhs.value + rhs_value = _get_env(environment, rhs.value) + + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info: "sys._version_info") -> str: + version = "{0.major}.{0.minor}.{0.micro}".format(info) + kind = info.releaselevel + if kind != "final": + version += kind[0] + str(info.serial) + return version + + +def default_environment() -> Dict[str, str]: + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": ".".join(platform.python_version_tuple()[:2]), + "sys_platform": sys.platform, + } + + +class Marker: + def __init__(self, marker: str) -> None: + try: + self._markers = _coerce_parse_result(MARKER.parseString(marker)) + except ParseException as e: + raise InvalidMarker( + f"Invalid marker: {marker!r}, parse error at " + f"{marker[e.loc : e.loc + 8]!r}" + ) + + def __str__(self) -> str: + return _format_marker(self._markers) + + def __repr__(self) -> str: + return f"" + + def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = default_environment() + if environment is not None: + current_environment.update(environment) + + return _evaluate_markers(self._markers, current_environment) diff --git a/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/requirements.py b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/requirements.py new file mode 100644 index 0000000000000000000000000000000000000000..6af14ec4ce49e633d030611c26f0bd9beaf13e6a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/requirements.py @@ -0,0 +1,146 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import re +import string +import urllib.parse +from typing import List, Optional as TOptional, Set + +from pkg_resources.extern.pyparsing import ( # noqa + Combine, + Literal as L, + Optional, + ParseException, + Regex, + Word, + ZeroOrMore, + originalTextFor, + stringEnd, + stringStart, +) + +from .markers import MARKER_EXPR, Marker +from .specifiers import LegacySpecifier, Specifier, SpecifierSet + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +ALPHANUM = Word(string.ascii_letters + string.digits) + +LBRACKET = L("[").suppress() +RBRACKET = L("]").suppress() +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() +COMMA = L(",").suppress() +SEMICOLON = L(";").suppress() +AT = L("@").suppress() + +PUNCTUATION = Word("-_.") +IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) +IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) + +NAME = IDENTIFIER("name") +EXTRA = IDENTIFIER + +URI = Regex(r"[^ ]+")("url") +URL = AT + URI + +EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) +EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") + +VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) +VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) + +VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY +VERSION_MANY = Combine( + VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False +)("_raw_spec") +_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY) +_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") + +VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") +VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) + +MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") +MARKER_EXPR.setParseAction( + lambda s, l, t: Marker(s[t._original_start : t._original_end]) +) +MARKER_SEPARATOR = SEMICOLON +MARKER = MARKER_SEPARATOR + MARKER_EXPR + +VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) +URL_AND_MARKER = URL + Optional(MARKER) + +NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) + +REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd +# pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see +# issue #104 +REQUIREMENT.parseString("x[]") + + +class Requirement: + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string: str) -> None: + try: + req = REQUIREMENT.parseString(requirement_string) + except ParseException as e: + raise InvalidRequirement( + f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}' + ) + + self.name: str = req.name + if req.url: + parsed_url = urllib.parse.urlparse(req.url) + if parsed_url.scheme == "file": + if urllib.parse.urlunparse(parsed_url) != req.url: + raise InvalidRequirement("Invalid URL given") + elif not (parsed_url.scheme and parsed_url.netloc) or ( + not parsed_url.scheme and not parsed_url.netloc + ): + raise InvalidRequirement(f"Invalid URL: {req.url}") + self.url: TOptional[str] = req.url + else: + self.url = None + self.extras: Set[str] = set(req.extras.asList() if req.extras else []) + self.specifier: SpecifierSet = SpecifierSet(req.specifier) + self.marker: TOptional[Marker] = req.marker if req.marker else None + + def __str__(self) -> str: + parts: List[str] = [self.name] + + if self.extras: + formatted_extras = ",".join(sorted(self.extras)) + parts.append(f"[{formatted_extras}]") + + if self.specifier: + parts.append(str(self.specifier)) + + if self.url: + parts.append(f"@ {self.url}") + if self.marker: + parts.append(" ") + + if self.marker: + parts.append(f"; {self.marker}") + + return "".join(parts) + + def __repr__(self) -> str: + return f"" diff --git a/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py new file mode 100644 index 0000000000000000000000000000000000000000..ce66bd4addbde1e332e9a42f6eb62adc471193e5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py @@ -0,0 +1,828 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import abc +import functools +import itertools +import re +import warnings +from typing import ( + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Pattern, + Set, + Tuple, + TypeVar, + Union, +) + +from .utils import canonicalize_version +from .version import LegacyVersion, Version, parse + +ParsedVersion = Union[Version, LegacyVersion] +UnparsedVersion = Union[Version, LegacyVersion, str] +VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion) +CallableOperator = Callable[[ParsedVersion, str], bool] + + +class InvalidSpecifier(ValueError): + """ + An invalid specifier was found, users should refer to PEP 440. + """ + + +class BaseSpecifier(metaclass=abc.ABCMeta): + @abc.abstractmethod + def __str__(self) -> str: + """ + Returns the str representation of this Specifier like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self) -> int: + """ + Returns a hash value for this Specifier like object. + """ + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier like + objects are equal. + """ + + @abc.abstractmethod + def __ne__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier like + objects are not equal. + """ + + @abc.abstractproperty + def prereleases(self) -> Optional[bool]: + """ + Returns whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @prereleases.setter + def prereleases(self, value: bool) -> None: + """ + Sets whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @abc.abstractmethod + def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter( + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class _IndividualSpecifier(BaseSpecifier): + + _operators: Dict[str, str] = {} + _regex: Pattern[str] + + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: '{spec}'") + + self._spec: Tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + def __repr__(self) -> str: + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return "<{}({!r}{})>".format(self.__class__.__name__, str(self), pre) + + def __str__(self) -> str: + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> Tuple[str, str]: + return self._spec[0], canonicalize_version(self._spec[1]) + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def __ne__(self, other: object) -> bool: + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec != other._spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion: + if not isinstance(version, (LegacyVersion, Version)): + version = parse(version) + return version + + @property + def operator(self) -> str: + return self._spec[0] + + @property + def version(self) -> str: + return self._spec[1] + + @property + def prereleases(self) -> Optional[bool]: + return self._prereleases + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __contains__(self, item: str) -> bool: + return self.contains(item) + + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version or LegacyVersion, this allows us to have + # a shortcut for ``"2.0" in Specifier(">=2") + normalized_item = self._coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = self._coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +class LegacySpecifier(_IndividualSpecifier): + + _regex_str = r""" + (?P(==|!=|<=|>=|<|>)) + \s* + (?P + [^,;\s)]* # Since this is a "legacy" specifier, and the version + # string can be just about anything, we match everything + # except for whitespace, a semi-colon for marker support, + # a closing paren since versions can be enclosed in + # them, and a comma since it's a version separator. + ) + """ + + _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + + _operators = { + "==": "equal", + "!=": "not_equal", + "<=": "less_than_equal", + ">=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + } + + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + super().__init__(spec, prereleases) + + warnings.warn( + "Creating a LegacyVersion has been deprecated and will be " + "removed in the next major release", + DeprecationWarning, + ) + + def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion: + if not isinstance(version, LegacyVersion): + version = LegacyVersion(str(version)) + return version + + def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective == self._coerce_version(spec) + + def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective != self._coerce_version(spec) + + def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective <= self._coerce_version(spec) + + def _compare_greater_than_equal( + self, prospective: LegacyVersion, spec: str + ) -> bool: + return prospective >= self._coerce_version(spec) + + def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective < self._coerce_version(spec) + + def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective > self._coerce_version(spec) + + +def _require_version_compare( + fn: Callable[["Specifier", ParsedVersion, str], bool] +) -> Callable[["Specifier", ParsedVersion, str], bool]: + @functools.wraps(fn) + def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool: + if not isinstance(prospective, Version): + return False + return fn(self, prospective, spec) + + return wrapped + + +class Specifier(_IndividualSpecifier): + + _regex_str = r""" + (?P(~=|==|!=|<=|>=|<|>|===)) + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s]* # We just match everything, except for whitespace + # since we are only testing for strict identity. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + + # You cannot use a wild card and a dev or local version + # together so group them with a | and make them optional. + (?: + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + | + \.\* # Wild card syntax of .* + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + @_require_version_compare + def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool: + + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore suffix segments. + prefix = ".".join( + list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( + prospective, prefix + ) + + @_require_version_compare + def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool: + + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + prospective = Version(prospective.public) + # Split the spec out by dots, and pretend that there is an implicit + # dot in between a release segment and a pre-release segment. + split_spec = _version_split(spec[:-2]) # Remove the trailing .* + + # Split the prospective version out by dots, and pretend that there + # is an implicit dot in between a release segment and a pre-release + # segment. + split_prospective = _version_split(str(prospective)) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + shortened_prospective = split_prospective[: len(split_spec)] + + # Pad out our two sides with zeros so that they both equal the same + # length. + padded_spec, padded_prospective = _pad_version( + split_spec, shortened_prospective + ) + + return padded_prospective == padded_spec + else: + # Convert our spec string into a Version + spec_version = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec_version.local: + prospective = Version(prospective.public) + + return prospective == spec_version + + @_require_version_compare + def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool: + return not self._compare_equal(prospective, spec) + + @_require_version_compare + def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) <= Version(spec) + + @_require_version_compare + def _compare_greater_than_equal( + self, prospective: ParsedVersion, spec: str + ) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) >= Version(spec) + + @_require_version_compare + def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + @_require_version_compare + def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is technically greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: + return str(prospective).lower() == str(spec).lower() + + @property + def prereleases(self) -> bool: + + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if parse(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version: str) -> List[str]: + result: List[str] = [] + for item in version.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _is_not_suffix(segment: str) -> bool: + return not any( + segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") + ) + + +def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]) :]) + right_split.append(right[len(right_split[0]) :]) + + # Insert our padding + left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) + right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) + + return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) + + +class SpecifierSet(BaseSpecifier): + def __init__( + self, specifiers: str = "", prereleases: Optional[bool] = None + ) -> None: + + # Split on , to break each individual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Parsed each individual specifier, attempting first to make it a + # Specifier and falling back to a LegacySpecifier. + parsed: Set[_IndividualSpecifier] = set() + for specifier in split_specifiers: + try: + parsed.add(Specifier(specifier)) + except InvalidSpecifier: + parsed.add(LegacySpecifier(specifier)) + + # Turn our parsed specifiers into a frozen set and save them for later. + self._specs = frozenset(parsed) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + def __repr__(self) -> str: + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return "".format(str(self), pre) + + def __str__(self) -> str: + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self) -> int: + return hash(self._specs) + + def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": + if isinstance(other, str): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other: object) -> bool: + if isinstance(other, (str, _IndividualSpecifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __ne__(self, other: object) -> bool: + if isinstance(other, (str, _IndividualSpecifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs != other._specs + + def __len__(self) -> int: + return len(self._specs) + + def __iter__(self) -> Iterator[_IndividualSpecifier]: + return iter(self._specs) + + @property + def prereleases(self) -> Optional[bool]: + + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __contains__(self, item: UnparsedVersion) -> bool: + return self.contains(item) + + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + + # Ensure that our item is a Version or LegacyVersion instance. + if not isinstance(item, (LegacyVersion, Version)): + item = parse(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all(s.contains(item, prereleases=prereleases) for s in self._specs) + + def filter( + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iterable + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases, and which will filter out LegacyVersion in general. + else: + filtered: List[VersionTypeVar] = [] + found_prereleases: List[VersionTypeVar] = [] + + item: UnparsedVersion + parsed_version: Union[Version, LegacyVersion] + + for item in iterable: + # Ensure that we some kind of Version class for this item. + if not isinstance(item, (LegacyVersion, Version)): + parsed_version = parse(item) + else: + parsed_version = item + + # Filter out any item which is parsed as a LegacyVersion + if isinstance(parsed_version, LegacyVersion): + continue + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return found_prereleases + + return filtered diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__main__.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..5b13d59a4f896719cc73cee5343872e28f92af63 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__main__.py @@ -0,0 +1,45 @@ +import sys + +from . import DATASETS + +try: + cmd = sys.argv[1] +except IndexError: + print(f"Usage: {sys.argv[0]} --check | --dump") + sys.exit(1) + +if cmd == "--check": + import hashlib + import urllib.request + + url_md5 = {} + + for item in DATASETS.values(): + if item.md5 is not None: + assert item.data + assert item.md5 + assert len(item.data) == len(item.md5) + pairs = zip(item.data, item.md5) + for url, md5_hash in pairs: + url_md5[url] = md5_hash + + for url, md5_hash in url_md5.items(): + try: + print("Downloading ", url) + with urllib.request.urlopen(url) as f: + data = f.read() + except Exception as exc: + raise (exc) + + if hashlib.md5(data).hexdigest() != md5_hash: + print("MD5 check failed for", url) +elif cmd == "--dump": + import re + + # Dumps a table in markdown format + print(f'| {"Dataset":<30} | {"Description":<115} |') + header = "| " + "-" * 30 + " | " + "-" * 115 + " |" + print(header) + for name, item in DATASETS.items(): + desc = re.sub(r"(http[s]?:\/\/\S+)", r"[URL](\1)", str(item.description)) + print(f"| {name:<30} | {desc:<115} |") diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25b179c5cebcd178d894e4a0cef2eb094e922e66 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d5435af5c58278f3ee1d0bd31f4949f989f12fc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..316ee9a76b3b48a3e0c7330083e0874d55e539d3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15a481a6d298c507d36dc2cf91e1558ec299e495 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..672e20558781f70216ddfa97234aebfcb67b3555 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fd59bfd07ac00a040c7e7b0dbc61c442455cbc2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/base.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/base.py new file mode 100644 index 0000000000000000000000000000000000000000..cf3c092fae7ae206d2606680f0313dae65d7bccb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/base.py @@ -0,0 +1,195 @@ +""" +The base class for all types of datasets. +""" +import os +import re +from abc import ABCMeta, abstractmethod +from typing import Dict, List, Optional + +from ..utils import SACREBLEU_DIR, download_file, smart_open + + +class Dataset(metaclass=ABCMeta): + def __init__( + self, + name: str, + data: Optional[List[str]] = None, + description: Optional[str] = None, + citation: Optional[str] = None, + md5: Optional[List[str]] = None, + langpairs=Dict[str, List[str]], + **kwargs, + ): + """ + Params come from the values in DATASETS. + + :param name: Name of the dataset. + :param data: URL of the raw data of the dataset. + :param description: Description of the dataset. + :param citation: Citation for the dataset. + :param md5: MD5 checksum of the dataset. + :param langpairs: List of available language pairs. + """ + self.name = name + self.data = data + self.description = description + self.citation = citation + self.md5 = md5 + self.langpairs = langpairs + self.kwargs = kwargs + + # Don't do any downloading or further processing now. + # Only do that lazily, when asked. + + # where to store the dataset + self._outdir = os.path.join(SACREBLEU_DIR, self.name) + self._rawdir = os.path.join(self._outdir, "raw") + + def maybe_download(self): + """ + If the dataset isn't downloaded, use utils/download_file() + This can be implemented here in the base class. It should write + to ~/.sacreleu/DATASET/raw exactly as it does now. + """ + os.makedirs(self._rawdir, exist_ok=True) + + expected_checksums = self.md5 if self.md5 else [None] * len(self.data) + + for url, expected_md5 in zip(self.data, expected_checksums): + tarball = os.path.join(self._rawdir, self._get_tarball_filename(url)) + + download_file( + url, tarball, extract_to=self._rawdir, expected_md5=expected_md5 + ) + + @staticmethod + def _clean(s): + """ + Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one. + + :param s: The string. + :return: A cleaned-up string. + """ + return re.sub(r"\s+", " ", s.strip()) + + def _get_tarball_filename(self, url): + """ + Produces a local filename for tarball. + :param url: The url to download. + :return: A name produced from the dataset identifier and the URL basename. + """ + return self.name.replace("/", "_") + "." + os.path.basename(url) + + def _get_txt_file_path(self, langpair, fieldname): + """ + Given the language pair and fieldname, return the path to the text file. + The format is: ~/.sacrebleu/DATASET/DATASET.LANGPAIR.FIELDNAME + + :param langpair: The language pair. + :param fieldname: The fieldname. + :return: The path to the text file. + """ + # handle the special case of subsets. e.g. "wmt21/dev" > "wmt21_dev" + name = self.name.replace("/", "_") + # Colons are used to distinguish multiple references, but are not supported in Windows filenames + fieldname = fieldname.replace(":", "-") + return os.path.join(self._outdir, f"{name}.{langpair}.{fieldname}") + + def _get_langpair_metadata(self, langpair): + """ + Given a language pair, return the metadata for that language pair. + Deal with errors if the language pair is not available. + + :param langpair: The language pair. e.g. "en-de" + :return: Dict format which is same as self.langpairs. + """ + if langpair is None: + langpairs = self.langpairs + elif langpair not in self.langpairs: + raise Exception(f"No such language pair {self.name}/{langpair}") + else: + langpairs = {langpair: self.langpairs[langpair]} + + return langpairs + + @abstractmethod + def process_to_text(self, langpair=None) -> None: + """Processes raw files to plain text files. + + :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed. + """ + pass + + def fieldnames(self, langpair) -> List[str]: + """ + Return a list of all the field names. For most source, this is just + the source and the reference. For others, it might include the document + ID for each line, or the original language (origLang). + + get_files() should return the same number of items as this. + + :param langpair: The language pair (e.g., "de-en") + :return: a list of field names + """ + return ["src", "ref"] + + def __iter__(self, langpair): + """ + Iterates over all fields (source, references, and other metadata) defined + by the dataset. + """ + all_files = self.get_files(langpair) + all_fins = [smart_open(f) for f in all_files] + + for item in zip(*all_fins): + yield item + + def source(self, langpair): + """ + Return an iterable over the source lines. + """ + source_file = self.get_source_file(langpair) + with smart_open(source_file) as fin: + for line in fin: + yield line.strip() + + def references(self, langpair): + """ + Return an iterable over the references. + """ + ref_files = self.get_reference_files(langpair) + ref_fins = [smart_open(f) for f in ref_files] + + for item in zip(*ref_fins): + yield item + + def get_source_file(self, langpair): + all_files = self.get_files(langpair) + all_fields = self.fieldnames(langpair) + index = all_fields.index("src") + return all_files[index] + + def get_reference_files(self, langpair): + all_files = self.get_files(langpair) + all_fields = self.fieldnames(langpair) + ref_files = [ + f for f, field in zip(all_files, all_fields) if field.startswith("ref") + ] + return ref_files + + def get_files(self, langpair): + """ + Returns the path of the source file and all reference files for + the provided test set / language pair. + Downloads the references first if they are not already local. + + :param langpair: The language pair (e.g., "de-en") + :return: a list of the source file and all reference files + """ + fields = self.fieldnames(langpair) + files = [self._get_txt_file_path(langpair, field) for field in fields] + + for file in files: + if not os.path.exists(file): + self.process_to_text(langpair) + return files diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/iwslt_xml.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/iwslt_xml.py new file mode 100644 index 0000000000000000000000000000000000000000..4381271d0ed69338e975f50eb0cf10977a9df96b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/iwslt_xml.py @@ -0,0 +1,8 @@ +from .fake_sgml import FakeSGMLDataset + + +class IWSLTXMLDataset(FakeSGMLDataset): + """IWSLT dataset format. Can be parsed with the lxml parser.""" + + # Same as FakeSGMLDataset. Nothing to do here. + pass diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/plain_text.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/plain_text.py new file mode 100644 index 0000000000000000000000000000000000000000..7f7a93db5339cdfa563fff390d4fb246da8350b4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/dataset/plain_text.py @@ -0,0 +1,36 @@ +import os + +from ..utils import smart_open +from .base import Dataset + + +class PlainTextDataset(Dataset): + """ + The plain text format. Data is separated into source and reference files. + Each line of the two files is aligned. + """ + + def process_to_text(self, langpair=None): + """Processes raw files to plain text files. + + :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed. + """ + # ensure that the dataset is downloaded + self.maybe_download() + langpairs = self._get_langpair_metadata(langpair) + + for langpair in langpairs: + fieldnames = self.fieldnames(langpair) + origin_files = [ + os.path.join(self._rawdir, path) for path in langpairs[langpair] + ] + + for field, origin_file in zip(fieldnames, origin_files): + + origin_file = os.path.join(self._rawdir, origin_file) + output_file = self._get_txt_file_path(langpair, field) + + with smart_open(origin_file) as fin: + with smart_open(output_file, "wt") as fout: + for line in fin: + print(line.rstrip(), file=fout) diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/base.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/base.py new file mode 100644 index 0000000000000000000000000000000000000000..93fb10815a1a8b08c69bad19d2cbed58e251afc7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/base.py @@ -0,0 +1,438 @@ +"""The base `Score`, `Metric` and `Signature` classes to derive from. + +`Metric` is an abstract class that enforces the implementation of a set +of abstract methods. This way, a correctly implemented metric will work +seamlessly with the rest of the codebase. +""" + +import json +import logging +import statistics +from typing import List, Sequence, Any, Optional, Dict +from abc import ABCMeta, abstractmethod + +from .. import __version__ + +sacrelogger = logging.getLogger('sacrebleu') + + +class Score: + """A base score class to derive from. + + :param name: The name of the underlying metric. + :param score: A floating point number for the final metric. + """ + def __init__(self, name: str, score: float): + """`Score` initializer.""" + self.name = name + self.score = score + + # Statistical test related fields + self._mean = -1.0 + self._ci = -1.0 + + # More info can be added right after the score + self._verbose = '' + + def format(self, width: int = 2, score_only: bool = False, + signature: str = '', is_json: bool = False) -> str: + """Returns a pretty representation of the score. + :param width: Floating point decimal precision width. + :param score_only: If `True`, and the format is not `json`, + returns a single score string. + :param signature: A string representation of the given `Signature` + instance. + :param is_json: If `True`, will output the score in JSON string. + :return: A plain or JSON-formatted string representation. + """ + d = { + 'name': self.name, + 'score': float(f'{self.score:.{width}f}'), + 'signature': signature, + } + + sc = f'{self.score:.{width}f}' + + if self._mean > 0: + confidence_mean = f'{self._mean:.{width}f}' + confidence_var = f'{self._ci:.{width}f}' + confidence_str = f'μ = {confidence_mean} ± {confidence_var}' + + sc += f' ({confidence_str})' + if is_json: + d['confidence_mean'] = float(confidence_mean) + d['confidence_var'] = float(confidence_var) + d['confidence'] = confidence_str + + # Construct full score line + full_score = f"{self.name}|{signature}" if signature else self.name + full_score = f"{full_score} = {sc}" + if self._verbose: + full_score += f' {self._verbose}' + d['verbose_score'] = self._verbose + + if score_only: + return sc + + if is_json: + for param in signature.split('|'): + key, value = param.split(':') + d[key] = value + return json.dumps(d, indent=1, ensure_ascii=False) + + return full_score + + def estimate_ci(self, scores: List['Score']): + """Takes a list of scores and stores mean, stdev and 95% confidence + interval around the mean. + + :param scores: A list of `Score` objects obtained from bootstrap + resampling for example. + """ + # Sort the scores + raw_scores = sorted([x.score for x in scores]) + n = len(raw_scores) + + # Get CI bounds (95%, i.e. 1/40 from left) + lower_idx = n // 40 + upper_idx = n - lower_idx - 1 + lower, upper = raw_scores[lower_idx], raw_scores[upper_idx] + self._ci = 0.5 * (upper - lower) + self._mean = statistics.mean(raw_scores) + + def __repr__(self): + """Returns a human readable score string.""" + return self.format() + + +class Signature: + """A convenience class to represent sacreBLEU reproducibility signatures. + + :param args: key-value dictionary passed from the actual metric instance. + """ + def __init__(self, args: dict): + """`Signature` initializer.""" + # Global items that are shared across all metrics + self._abbr = { + 'version': 'v', + 'nrefs': '#', + 'test': 't', + 'lang': 'l', + 'subset': 'S', + 'origlang': 'o', + 'bs': 'bs', # Bootstrap resampling trials + 'ar': 'ar', # Approximate randomization trials + 'seed': 'rs', # RNG's seed + } + + if 'num_refs' not in args: + raise ValueError( + 'Number of references unknown, please evaluate the metric first.') + + num_refs = args['num_refs'] + if num_refs == -1: + # Detect variable number of refs + num_refs = 'var' + + # Global items that are shared across all metrics + # None's will be ignored + self.info = { + 'version': __version__, + 'nrefs': num_refs, + 'bs': args.get('n_bootstrap', None), + 'ar': None, + 'seed': args.get('seed', None), + 'test': args.get('test_set', None), + 'lang': args.get('langpair', None), + 'origlang': args.get('origlang', None), + 'subset': args.get('subset', None), + } + + def format(self, short: bool = False) -> str: + """Returns a string representation of the signature. + + :param short: If True, shortened signature is produced. + :return: A string representation of the signature. + """ + pairs = [] + keys = list(self.info.keys()) + # keep version always at end + keys.remove('version') + for name in keys + ['version']: + value = self.info[name] + if value is not None: + if isinstance(value, bool): + # Replace True/False with yes/no + value = 'yes' if value else 'no' + final_name = self._abbr[name] if short else name + pairs.append(f'{final_name}:{value}') + + return '|'.join(pairs) + + def update(self, key: str, value: Any): + """Add a new item or update an existing one. + + :param key: The key to use in the dictionary. + :param value: The associated value for the `key`. + """ + self.info[key] = value + + def __str__(self): + """Returns a human-readable signature string.""" + return self.format() + + def __repr__(self): + """Returns a human-readable signature string.""" + return self.format() + + +class Metric(metaclass=ABCMeta): + """A base class for all metrics that ensures the implementation of some + methods. Much of the common functionality is moved to this base class + from other metrics.""" + + # Each metric should define its Signature class' name here + _SIGNATURE_TYPE = Signature + + def __init__(self): + """`Metric` initializer.""" + # The pre-computed reference cache + self._ref_cache = None + + # only useful for BLEU tokenized warnings. Set to True so that + # warnings are not issued for other metrics. + self._force = True + + # Will be used by the signature when bootstrap resampling + self.n_bootstrap = None + self.seed = None + + def _check_sentence_score_args(self, hyp: str, refs: Sequence[str]): + """Performs sanity checks on `sentence_score` method's arguments. + + :param hyp: A single hypothesis string. + :param refs: A sequence of reference strings. + """ + prefix = self.__class__.__name__ + err_msg = None + + if not isinstance(hyp, str): + err_msg = 'The argument `hyp` should be a string.' + elif isinstance(refs, str) or not isinstance(refs, Sequence): + err_msg = 'The argument `refs` should be a sequence of strings.' + elif not isinstance(refs[0], str) and refs[0] is not None: + err_msg = 'Each element of `refs` should be a string.' + + if err_msg: + raise TypeError(f'{prefix}: {err_msg}') + + def _check_corpus_score_args(self, hyps: Sequence[str], + refs: Optional[Sequence[Sequence[str]]]): + """Performs sanity checks on `corpus_score` method's arguments. + + :param hypses: A sequence of hypothesis strings. + :param refs: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, cached references + will be used. + """ + + prefix = self.__class__.__name__ + err_msg = None + + if not isinstance(hyps, Sequence): + err_msg = "`hyps` should be a sequence of strings." + elif not isinstance(hyps[0], str): + err_msg = 'Each element of `hyps` should be a string.' + elif any(line is None for line in hyps): + err_msg = "Undefined line in hypotheses stream!" + + if refs is not None: + if not isinstance(refs, Sequence): + err_msg = "`refs` should be a sequence of sequence of strings." + elif not isinstance(refs[0], Sequence): + err_msg = "Each element of `refs` should be a sequence of strings." + elif not isinstance(refs[0][0], str) and refs[0][0] is not None: + err_msg = "`refs` should be a sequence of sequence of strings." + + if err_msg: + raise TypeError(f'{prefix}: {err_msg}') + + @abstractmethod + def _aggregate_and_compute(self, stats: List[List[Any]]) -> Any: + """Computes the final score given the pre-computed match statistics. + + :param stats: A list of segment-level statistics. + :return: A `Score` instance. + """ + pass + + @abstractmethod + def _compute_score_from_stats(self, stats: List[Any]) -> Any: + """Computes the final score from already aggregated statistics. + + :param stats: A list or numpy array of segment-level statistics. + :return: A `Score` object. + """ + pass + + @abstractmethod + def _preprocess_segment(self, sent: str) -> str: + """A wrapper around the metric's tokenization and pre-processing logic. + This should be implemented for reference caching to work correctly. + + :param sent: The input sentence. + :return: The pre-processed output sentence. + """ + pass + + @abstractmethod + def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]: + """Given a list of reference segments, extract the required + information (such as n-grams for BLEU and chrF). This should be implemented + for the generic `_cache_references()` to work across all metrics. + + :param refs: A sequence of strings. + """ + pass + + @abstractmethod + def _compute_segment_statistics(self, hypothesis: str, ref_kwargs: Dict) -> List[Any]: + """Given a (pre-processed) hypothesis sentence and already computed + reference info, returns the best match statistics across the + references. The return type is usually a List of ints or floats. + + :param hypothesis: A pre-processed hypothesis sentence. + :param ref_kwargs: A dictionary with reference-related information + within. This is formulated as a dictionary as different metrics may + require different information regarding a reference segment. + """ + pass + + def _cache_references(self, references: Sequence[Sequence[str]]) -> List[Any]: + """Given the full set of document references, extract segment n-grams + (or other necessary information) for caching purposes. + + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. A particular reference + segment can be '' or `None` to allow the use of variable number + of references per segment. + :return: A list where each element is a tuple of segment n-grams and + reference lengths, as returned by `_extract_reference_info()`. + """ + ref_cache = [] + + # Decide on final number of refs here as well + num_refs = set() + + for refs in zip(*references): + # Remove undefined references + lines = [x for x in refs if x is not None] + + # Keep track of reference counts to allow variable reference + # info in the signature + num_refs.add(len(lines)) + + lines = [self._preprocess_segment(x) for x in lines] + + # Get n-grams + ref_cache.append(self._extract_reference_info(lines)) + + if len(num_refs) == 1: + self.num_refs = list(num_refs)[0] + else: + # A variable number of refs exist + self.num_refs = -1 + + return ref_cache + + def _extract_corpus_statistics(self, hypotheses: Sequence[str], + references: Optional[Sequence[Sequence[str]]]) -> Any: + """Reads the corpus and returns sentence-level match statistics for + faster re-computations esp. during statistical tests. + + :param hypotheses: A sequence of hypothesis strings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, cached references + will be used. + :return: A list where each sublist corresponds to segment statistics. + """ + # Pre-compute references + # Don't store the cache as the user is explicitly passing refs + if references: + ref_cache = self._cache_references(references) + elif self._ref_cache: + ref_cache = self._ref_cache + else: + raise RuntimeError('No references provided and the cache is empty.') + + stats = [] + tok_count = 0 + + for hyp, ref_kwargs in zip(hypotheses, ref_cache): + # Check for already-tokenized input problem (only for BLEU) + if not self._force and hyp.endswith(' .'): + tok_count += 1 + + hyp = self._preprocess_segment(hyp) + + # Collect stats + stats.append(self._compute_segment_statistics(hyp, ref_kwargs)) + + if tok_count >= 100: + sacrelogger.warning("That's 100 lines that end in a tokenized period ('.')") + sacrelogger.warning("It looks like you forgot to detokenize your test data, which may hurt your score.") + sacrelogger.warning("If you insist your data is detokenized, or don't care, you can suppress this message with the `force` parameter.") + + return stats + + def sentence_score(self, hypothesis: str, references: Sequence[str]) -> Any: + """Compute the metric for a single sentence against a single (or multiple) reference(s). + + :param hypothesis: A single hypothesis string. + :param references: A sequence of reference strings. + :return: A `Score` object. + """ + self._check_sentence_score_args(hypothesis, references) + + stats = self._extract_corpus_statistics( + [hypothesis], [[refs] for refs in references]) + return self._aggregate_and_compute(stats) + + def corpus_score(self, hypotheses: Sequence[str], + references: Optional[Sequence[Sequence[str]]], + n_bootstrap: int = 1) -> Any: + """Compute the metric for a corpus against a single (or multiple) reference(s). + + :param hypotheses: A sequence of hypothesis strings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, cached references + will be used. + :param n_bootstrap: If > 1, provides 95% confidence interval around true mean + using bootstrap resampling with `n_bootstrap` samples. + :return: A `Score` object. + """ + self._check_corpus_score_args(hypotheses, references) + + # Collect corpus stats + stats = self._extract_corpus_statistics(hypotheses, references) + + # Compute the actual system score + actual_score = self._aggregate_and_compute(stats) + + if n_bootstrap > 1: + # Compute bootstrap estimate as well + # Delayed import is to escape from numpy import if bootstrap + # is not requested. + from ..significance import _bootstrap_resample + + self.n_bootstrap = n_bootstrap + self.seed, bs_scores = _bootstrap_resample(stats, self, n_bootstrap) + actual_score.estimate_ci(bs_scores) + + return actual_score + + def get_signature(self) -> Signature: + """Creates and returns the signature for the metric. The creation + of signatures is delayed as the number of references is resolved + only at the point of reference caching.""" + return self._SIGNATURE_TYPE(self.__dict__) diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py new file mode 100644 index 0000000000000000000000000000000000000000..f7d4f6858d0c6005f97ad8011a0b17bd97c2bcea --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py @@ -0,0 +1,284 @@ +"""The implementation of chrF (Popović 2015) and chrF++ (Popović 2017) metrics.""" + +from typing import List, Sequence, Optional, Dict +from collections import Counter + +from ..utils import sum_of_lists +from .base import Score, Signature, Metric +from .helpers import extract_all_char_ngrams, extract_word_ngrams + + +class CHRFSignature(Signature): + """A convenience class to represent the reproducibility signature for chrF. + + :param args: key-value dictionary passed from the actual metric instance. + """ + def __init__(self, args: dict): + """`CHRFSignature` initializer.""" + super().__init__(args) + self._abbr.update({ + 'case': 'c', + 'eff': 'e', + 'nc': 'nc', + 'nw': 'nw', + 'space': 's', + }) + + self.info.update({ + 'case': 'lc' if args['lowercase'] else 'mixed', + 'eff': 'yes' if not args['eps_smoothing'] else 'no', + 'nc': args['char_order'], + 'nw': args['word_order'], + 'space': 'yes' if args['whitespace'] else 'no', + }) + + +class CHRFScore(Score): + """A convenience class to represent chrF scores. + + :param score: The chrF (chrF++) score. + :param char_order: The character n-gram order. + :param word_order: The word n-gram order. If equals to 2, the metric is referred to as chrF++. + :param beta: Determine the importance of recall w.r.t precision. + """ + def __init__(self, score: float, char_order: int, word_order: int, beta: int): + """`CHRFScore` initializer.""" + self.beta = beta + self.char_order = char_order + self.word_order = word_order + + # Add + signs to denote chrF+ variant + name = f'chrF{self.beta}' + '+' * self.word_order + + super().__init__(name, score) + + +class CHRF(Metric): + """Computes the chrF(++) metric given hypotheses and references. + + :param char_order: Character n-gram order. + :param word_order: Word n-gram order. If equals to 2, the metric is referred to as chrF++. + :param beta: Determine the importance of recall w.r.t precision. + :param lowercase: Enable case-insensitivity. + :param whitespace: If `True`, include whitespaces when extracting character n-grams. + :param eps_smoothing: If `True`, applies epsilon smoothing similar + to reference chrF++.py, NLTK and Moses implementations. Otherwise, + it takes into account effective match order similar to sacreBLEU < 2.0.0. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If given, the reference n-grams + will be pre-computed and cached for faster re-computation across many systems. + """ + + # Maximum character n-gram order to take into account + CHAR_ORDER = 6 + + # chrF+ additionally takes into account some of the word n-grams + WORD_ORDER = 0 + + # Defaults to 2 (per http://www.aclweb.org/anthology/W16-2341) + BETA = 2 + + # Cache string.punctuation for chrF+' punctuation stripper + _PUNCTS = set('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~') + + _SIGNATURE_TYPE = CHRFSignature + + def __init__(self, char_order: int = CHAR_ORDER, + word_order: int = WORD_ORDER, + beta: int = BETA, + lowercase: bool = False, + whitespace: bool = False, + eps_smoothing: bool = False, + references: Optional[Sequence[Sequence[str]]] = None): + """`CHRF` initializer.""" + super().__init__() + + self.beta = beta + self.char_order = char_order + self.word_order = word_order + self.order = self.char_order + self.word_order + self.lowercase = lowercase + self.whitespace = whitespace + self.eps_smoothing = eps_smoothing + + if references is not None: + # Pre-compute reference ngrams + self._ref_cache = self._cache_references(references) + + @staticmethod + def _get_match_statistics(hyp_ngrams: Counter, ref_ngrams: Counter) -> List[int]: + """Computes the match statistics between hypothesis and reference n-grams. + + :param hyp_ngrams: A `Counter` holding hypothesis n-grams. + :param ref_ngrams: A `Counter` holding reference n-grams. + :return: A list of three numbers denoting hypothesis n-gram count, + reference n-gram count and the intersection count. + """ + # Counter's internal intersection is not that fast, count manually + match_count, hyp_count = 0, 0 + for ng, count in hyp_ngrams.items(): + hyp_count += count + if ng in ref_ngrams: + match_count += min(count, ref_ngrams[ng]) + + return [ + # Don't count hits if no reference exists for that n-gram + hyp_count if ref_ngrams else 0, + sum(ref_ngrams.values()), + match_count, + ] + + def _remove_punctuation(self, sent: str) -> List[str]: + """Separates out punctuations from beginning and end of words for chrF. + Adapted from https://github.com/m-popovic/chrF + + :param sent: A string. + :return: A list of words. + """ + tokenized = [] + for w in sent.split(): + if len(w) == 1: + tokenized.append(w) + else: + # NOTE: This splits '(hi)' to '(hi' and ')' (issue #124) + if w[-1] in self._PUNCTS: + tokenized += [w[:-1], w[-1]] + elif w[0] in self._PUNCTS: + tokenized += [w[0], w[1:]] + else: + tokenized.append(w) + return tokenized + + def _preprocess_segment(self, sent: str) -> str: + """Given a sentence, apply optional lowercasing. + + :param sent: The input sentence string. + :return: The pre-processed output string. + """ + return sent.lower() if self.lowercase else sent + + def _compute_f_score(self, statistics: List[int]) -> float: + """Compute the chrF score given the n-gram match statistics. + + :param statistics: A flattened list of 3 * (`char_order` + `word_order`) + elements giving the [hyp, ref, match] counts for each order. + :return: The final f_beta score between [0, 100]. + """ + eps = 1e-16 + score = 0.0 + effective_order = 0 + factor = self.beta ** 2 + avg_prec, avg_rec = 0.0, 0.0 + + for i in range(self.order): + n_hyp, n_ref, n_match = statistics[3 * i: 3 * i + 3] + + # chrF++.py style EPS smoothing (also used by Moses and NLTK) + prec = n_match / n_hyp if n_hyp > 0 else eps + rec = n_match / n_ref if n_ref > 0 else eps + + denom = factor * prec + rec + score += ((1 + factor) * prec * rec / denom) if denom > 0 else eps + + # sacreBLEU <2.0.0 style effective order smoothing + if n_hyp > 0 and n_ref > 0: + avg_prec += prec + avg_rec += rec + effective_order += 1 + + if self.eps_smoothing: + return 100 * score / self.order + + if effective_order == 0: + avg_prec = avg_rec = 0.0 + else: + avg_prec /= effective_order + avg_rec /= effective_order + + if avg_prec + avg_rec: + score = (1 + factor) * avg_prec * avg_rec + score /= ((factor * avg_prec) + avg_rec) + return 100 * score + else: + return 0.0 + + def _compute_score_from_stats(self, stats: List[int]) -> CHRFScore: + """Computes the final score from already aggregated statistics. + + :param stats: A list or numpy array of segment-level statistics. + :return: A `CHRFScore` object. + """ + return CHRFScore( + self._compute_f_score(stats), self.char_order, + self.word_order, self.beta) + + def _aggregate_and_compute(self, stats: List[List[int]]) -> CHRFScore: + """Computes the final score given the pre-computed corpus statistics. + + :param stats: A list of segment-level statistics + :return: A `CHRFScore` object. + """ + return self._compute_score_from_stats(sum_of_lists(stats)) + + def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, List[List[Counter]]]: + """Given a list of reference segments, extract the character and word n-grams. + + :param refs: A sequence of reference segments. + :return: A list where each element contains n-grams per reference segment. + """ + ngrams = [] + + for ref in refs: + # extract character n-grams + stats = extract_all_char_ngrams(ref, self.char_order, self.whitespace) + + # Check chrF+ mode + if self.word_order > 0: + ref_words = self._remove_punctuation(ref) + + for n in range(self.word_order): + stats.append(extract_word_ngrams(ref_words, n + 1)) + + ngrams.append(stats) + + return {'ref_ngrams': ngrams} + + def _compute_segment_statistics( + self, hypothesis: str, ref_kwargs: Dict) -> List[int]: + """Given a (pre-processed) hypothesis sentence and already computed + reference n-grams, returns the best match statistics across the + references. + + :param hypothesis: Hypothesis sentence. + :param ref_kwargs: A dictionary with key `ref_ngrams` which is a list + where each sublist contains n-gram counters for a particular reference sentence. + :return: A list of integers where each triplet denotes [hyp, ref, match] + statistics. + """ + best_stats = [] + best_f_score = -1.0 + + # extract character n-grams + all_hyp_ngrams = extract_all_char_ngrams( + hypothesis, self.char_order, self.whitespace) + + # Check chrF+ mode to see if we'll add word n-grams as well + if self.word_order > 0: + # Primitive tokenization: separate out punctuations + hwords = self._remove_punctuation(hypothesis) + _range = range(1, self.word_order + 1) + all_hyp_ngrams.extend([extract_word_ngrams(hwords, n) for n in _range]) + + # Iterate over multiple references, pick the one with best F score + for _ref_ngrams in ref_kwargs['ref_ngrams']: + stats = [] + # Traverse all orders + for h, r in zip(all_hyp_ngrams, _ref_ngrams): + stats.extend(self._get_match_statistics(h, r)) + f_score = self._compute_f_score(stats) + + if f_score > best_f_score: + best_f_score = f_score + best_stats = stats + + return best_stats diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..72ec14461658249fcd63a139623f3ead9a4aa057 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py @@ -0,0 +1,69 @@ +"""Various utility functions for word and character n-gram extraction.""" + +from collections import Counter +from typing import List, Tuple + + +def extract_all_word_ngrams(line: str, min_order: int, max_order: int) -> Tuple[Counter, int]: + """Extracts all ngrams (min_order <= n <= max_order) from a sentence. + + :param line: A string sentence. + :param min_order: Minimum n-gram order. + :param max_order: Maximum n-gram order. + :return: a Counter object with n-grams counts and the sequence length. + """ + + ngrams = [] + tokens = line.split() + + for n in range(min_order, max_order + 1): + for i in range(0, len(tokens) - n + 1): + ngrams.append(tuple(tokens[i: i + n])) + + return Counter(ngrams), len(tokens) + + +def extract_word_ngrams(tokens: List[str], n: int) -> Counter: + """Extracts n-grams with order `n` from a list of tokens. + + :param tokens: A list of tokens. + :param n: The order of n-grams. + :return: a Counter object with n-grams counts. + """ + return Counter([' '.join(tokens[i:i + n]) for i in range(len(tokens) - n + 1)]) + + +def extract_char_ngrams(line: str, n: int, include_whitespace: bool = False) -> Counter: + """Yields counts of character n-grams from a sentence. + + :param line: A segment containing a sequence of words. + :param n: The order of the n-grams. + :param include_whitespace: If given, will not strip whitespaces from the line. + :return: a dictionary containing ngrams and counts + """ + if not include_whitespace: + line = ''.join(line.split()) + + return Counter([line[i:i + n] for i in range(len(line) - n + 1)]) + + +def extract_all_char_ngrams( + line: str, max_order: int, include_whitespace: bool = False) -> List[Counter]: + """Extracts all character n-grams at once for convenience. + + :param line: A segment containing a sequence of words. + :param max_order: The maximum order of the n-grams. + :param include_whitespace: If given, will not strip whitespaces from the line. + :return: a list of Counter objects containing ngrams and counts. + """ + + counters = [] + + if not include_whitespace: + line = ''.join(line.split()) + + for n in range(1, max_order + 1): + ngrams = Counter([line[i:i + n] for i in range(len(line) - n + 1)]) + counters.append(ngrams) + + return counters diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py new file mode 100644 index 0000000000000000000000000000000000000000..2d2de4944c955ebf0c8b37fce7f04eb16f79c026 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py @@ -0,0 +1,478 @@ +"""This module implements various utility functions for the TER metric.""" + +# Copyright 2020 Memsource +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import math +from typing import List, Tuple, Dict + + +_COST_INS = 1 +_COST_DEL = 1 +_COST_SUB = 1 + +# Tercom-inspired limits +_MAX_SHIFT_SIZE = 10 +_MAX_SHIFT_DIST = 50 +_BEAM_WIDTH = 25 + +# Our own limits +_MAX_CACHE_SIZE = 10000 +_MAX_SHIFT_CANDIDATES = 1000 +_INT_INFINITY = int(1e16) + +_OP_INS = 'i' +_OP_DEL = 'd' +_OP_NOP = ' ' +_OP_SUB = 's' +_OP_UNDEF = 'x' + +_FLIP_OPS = str.maketrans(_OP_INS + _OP_DEL, _OP_DEL + _OP_INS) + + +def translation_edit_rate(words_hyp: List[str], words_ref: List[str]) -> Tuple[int, int]: + """Calculate the translation edit rate. + + :param words_hyp: Tokenized translation hypothesis. + :param words_ref: Tokenized reference translation. + :return: tuple (number of edits, length) + """ + n_words_ref = len(words_ref) + n_words_hyp = len(words_hyp) + if n_words_ref == 0: + # FIXME: This trace here is not used? + trace = _OP_DEL * n_words_hyp + # special treatment of empty refs + return n_words_hyp, 0 + + cached_ed = BeamEditDistance(words_ref) + shifts = 0 + + input_words = words_hyp + checked_candidates = 0 + while True: + # do shifts until they stop reducing the edit distance + delta, new_input_words, checked_candidates = _shift( + input_words, words_ref, cached_ed, checked_candidates) + + if checked_candidates >= _MAX_SHIFT_CANDIDATES: + break + + if delta <= 0: + break + shifts += 1 + input_words = new_input_words + + edit_distance, trace = cached_ed(input_words) + total_edits = shifts + edit_distance + + return total_edits, n_words_ref + + +def _shift(words_h: List[str], words_r: List[str], cached_ed, + checked_candidates: int) -> Tuple[int, List[str], int]: + """Attempt to shift words in hypothesis to match reference. + + Returns the shift that reduces the edit distance the most. + + Note that the filtering of possible shifts and shift selection are heavily + based on somewhat arbitrary heuristics. The code here follows as closely + as possible the logic in Tercom, not always justifying the particular design + choices. + + :param words_h: Hypothesis. + :param words_r: Reference. + :param cached_ed: Cached edit distance. + :param checked_candidates: Number of shift candidates that were already + evaluated. + :return: (score, shifted_words, checked_candidates). Best shift and updated + number of evaluated shift candidates. + """ + pre_score, inv_trace = cached_ed(words_h) + + # to get alignment, we pretend we are rewriting reference into hypothesis, + # so we need to flip the trace of edit operations + trace = _flip_trace(inv_trace) + align, ref_err, hyp_err = trace_to_alignment(trace) + + best = None + + for start_h, start_r, length in _find_shifted_pairs(words_h, words_r): + # don't do the shift unless both the hypothesis was wrong and the + # reference doesn't match hypothesis at the target position + if sum(hyp_err[start_h: start_h + length]) == 0: + continue + + if sum(ref_err[start_r: start_r + length]) == 0: + continue + + # don't try to shift within the subsequence + if start_h <= align[start_r] < start_h + length: + continue + + prev_idx = -1 + for offset in range(-1, length): + if start_r + offset == -1: + idx = 0 # insert before the beginning + elif start_r + offset in align: + # Unlike Tercom which inserts *after* the index, we insert + # *before* the index. + idx = align[start_r + offset] + 1 + else: + break # offset is out of bounds => aims past reference + + if idx == prev_idx: + continue # skip idx if already tried + + prev_idx = idx + + shifted_words = _perform_shift(words_h, start_h, length, idx) + assert(len(shifted_words) == len(words_h)) + + # Elements of the tuple are designed to replicate Tercom ranking + # of shifts: + candidate = ( + pre_score - cached_ed(shifted_words)[0], # highest score first + length, # then, longest match first + -start_h, # then, earliest match first + -idx, # then, earliest target position first + shifted_words, + ) + + checked_candidates += 1 + + if not best or candidate > best: + best = candidate + + if checked_candidates >= _MAX_SHIFT_CANDIDATES: + break + + if not best: + return 0, words_h, checked_candidates + else: + best_score, _, _, _, shifted_words = best + return best_score, shifted_words, checked_candidates + + +def _perform_shift(words: List[str], start: int, length: int, target: int) -> List[str]: + """Perform a shift in `words` from `start` to `target`. + + :param words: Words to shift. + :param start: Where from. + :param length: How many words. + :param target: Where to. + :return: Shifted words. + """ + if target < start: + # shift before previous position + return words[:target] + words[start: start + length] \ + + words[target: start] + words[start + length:] + elif target > start + length: + # shift after previous position + return words[:start] + words[start + length: target] \ + + words[start: start + length] + words[target:] + else: + # shift within the shifted string + return words[:start] + words[start + length: length + target] \ + + words[start: start + length] + words[length + target:] + + +def _find_shifted_pairs(words_h: List[str], words_r: List[str]): + """Find matching word sub-sequences in two lists of words. + + Ignores sub-sequences starting at the same position. + + :param words_h: First word list. + :param words_r: Second word list. + :return: Yields tuples of (h_start, r_start, length) such that: + words_h[h_start:h_start+length] = words_r[r_start:r_start+length] + """ + n_words_h = len(words_h) + n_words_r = len(words_r) + for start_h in range(n_words_h): + for start_r in range(n_words_r): + # this is slightly different from what tercom does but this should + # really only kick in in degenerate cases + if abs(start_r - start_h) > _MAX_SHIFT_DIST: + continue + + length = 0 + while words_h[start_h + length] == words_r[start_r + length] and length < _MAX_SHIFT_SIZE: + length += 1 + + yield start_h, start_r, length + + # If one sequence is consumed, stop processing + if n_words_h == start_h + length or n_words_r == start_r + length: + break + + +def _flip_trace(trace): + """Flip the trace of edit operations. + + Instead of rewriting a->b, get a recipe for rewriting b->a. + + Simply flips insertions and deletions. + """ + return trace.translate(_FLIP_OPS) + + +def trace_to_alignment(trace: str) -> Tuple[Dict, List, List]: + """Transform trace of edit operations into an alignment of the sequences. + + :param trace: Trace of edit operations (' '=no change or 's'/'i'/'d'). + :return: Alignment, error positions in reference, error positions in hypothesis. + """ + pos_hyp = -1 + pos_ref = -1 + hyp_err = [] + ref_err = [] + align = {} + + # we are rewriting a into b + for op in trace: + if op == _OP_NOP: + pos_hyp += 1 + pos_ref += 1 + align[pos_ref] = pos_hyp + hyp_err.append(0) + ref_err.append(0) + elif op == _OP_SUB: + pos_hyp += 1 + pos_ref += 1 + align[pos_ref] = pos_hyp + hyp_err.append(1) + ref_err.append(1) + elif op == _OP_INS: + pos_hyp += 1 + hyp_err.append(1) + elif op == _OP_DEL: + pos_ref += 1 + align[pos_ref] = pos_hyp + ref_err.append(1) + else: + raise Exception(f"unknown operation {op!r}") + + return align, ref_err, hyp_err + + +class BeamEditDistance: + """Edit distance with several features required for TER calculation. + + * internal cache + * "beam" search + * tracking of edit operations + + The internal self._cache works like this: + + Keys are words of the hypothesis. Values are tuples (next_node, row) where: + + * next_node is the cache for the next word in the sequence + * row is the stored row of the edit distance matrix + + Effectively, caching allows to skip several rows in the edit distance + matrix calculation and instead, to initialize the computation with the last + matching matrix row. + + Beam search, as implemented here, only explores a fixed-size sub-row of + candidates around the matrix diagonal (more precisely, it's a + "pseudo"-diagonal since we take the ratio of sequence lengths into account). + + Tracking allows to reconstruct the optimal sequence of edit operations. + + :param words_ref: A list of reference tokens. + """ + def __init__(self, words_ref: List[str]): + """`BeamEditDistance` initializer.""" + self._words_ref = words_ref + self._n_words_ref = len(self._words_ref) + + # first row corresponds to insertion operations of the reference, + # so we do 1 edit operation per reference word + self._initial_row = [(i * _COST_INS, _OP_INS) + for i in range(self._n_words_ref + 1)] + + self._cache = {} # type: Dict[str, Tuple] + self._cache_size = 0 + + # Precomputed empty matrix row. Contains infinities so that beam search + # avoids using the uninitialized cells. + self._empty_row = [(_INT_INFINITY, _OP_UNDEF)] * (self._n_words_ref + 1) + + def __call__(self, words_hyp: List[str]) -> Tuple[int, str]: + """Calculate edit distance between self._words_ref and the hypothesis. + + Uses cache to skip some of the computation. + + :param words_hyp: Words in translation hypothesis. + :return: Edit distance score. + """ + + # skip initial words in the hypothesis for which we already know the + # edit distance + start_position, dist = self._find_cache(words_hyp) + + # calculate the rest of the edit distance matrix + edit_distance, newly_created_matrix, trace = self._edit_distance( + words_hyp, start_position, dist) + + # update our cache with the newly calculated rows + self._add_cache(words_hyp, newly_created_matrix) + + return edit_distance, trace + + def _edit_distance(self, words_h: List[str], start_h: int, + cache: List[List[Tuple[int, str]]]) -> Tuple[int, List, str]: + """Actual edit distance calculation. + + Can be initialized with the last cached row and a start position in + the hypothesis that it corresponds to. + + :param words_h: Words in translation hypothesis. + :param start_h: Position from which to start the calculation. + (This is zero if no cache match was found.) + :param cache: Precomputed rows corresponding to edit distance matrix + before `start_h`. + :return: Edit distance value, newly computed rows to update the + cache, trace. + """ + + n_words_h = len(words_h) + + # initialize the rest of the matrix with infinite edit distances + rest_empty = [list(self._empty_row) + for _ in range(n_words_h - start_h)] + + dist = cache + rest_empty + + assert len(dist) == n_words_h + 1 + + length_ratio = self._n_words_ref / n_words_h if words_h else 1 + + # in some crazy sentences, the difference in length is so large that + # we may end up with zero overlap with previous row + if _BEAM_WIDTH < length_ratio / 2: + beam_width = math.ceil(length_ratio / 2 + _BEAM_WIDTH) + else: + beam_width = _BEAM_WIDTH + + # calculate the Levenshtein distance + for i in range(start_h + 1, n_words_h + 1): + pseudo_diag = math.floor(i * length_ratio) + min_j = max(0, pseudo_diag - beam_width) + max_j = min(self._n_words_ref + 1, pseudo_diag + beam_width) + + if i == n_words_h: + max_j = self._n_words_ref + 1 + + for j in range(min_j, max_j): + if j == 0: + dist[i][j] = (dist[i - 1][j][0] + _COST_DEL, _OP_DEL) + else: + if words_h[i - 1] == self._words_ref[j - 1]: + cost_sub = 0 + op_sub = _OP_NOP + else: + cost_sub = _COST_SUB + op_sub = _OP_SUB + + # Tercom prefers no-op/sub, then insertion, then deletion. + # But since we flip the trace and compute the alignment from + # the inverse, we need to swap order of insertion and + # deletion in the preference. + ops = ( + (dist[i - 1][j - 1][0] + cost_sub, op_sub), + (dist[i - 1][j][0] + _COST_DEL, _OP_DEL), + (dist[i][j - 1][0] + _COST_INS, _OP_INS), + ) + + for op_cost, op_name in ops: + if dist[i][j][0] > op_cost: + dist[i][j] = op_cost, op_name + + # get the trace + trace = "" + i = n_words_h + j = self._n_words_ref + + while i > 0 or j > 0: + op = dist[i][j][1] + trace = op + trace + if op in (_OP_SUB, _OP_NOP): + i -= 1 + j -= 1 + elif op == _OP_INS: + j -= 1 + elif op == _OP_DEL: + i -= 1 + else: + raise Exception(f"unknown operation {op!r}") + + return dist[-1][-1][0], dist[len(cache):], trace + + def _add_cache(self, words_hyp: List[str], mat: List[List[Tuple]]): + """Add newly computed rows to cache. + + Since edit distance is only calculated on the hypothesis suffix that + was not in cache, the number of rows in `mat` may be shorter than + hypothesis length. In that case, we skip over these initial words. + + :param words_hyp: Hypothesis words. + :param mat: Edit distance matrix rows for each position. + """ + if self._cache_size >= _MAX_CACHE_SIZE: + return + + node = self._cache + + n_mat = len(mat) + + # how many initial words to skip + skip_num = len(words_hyp) - n_mat + + # jump through the cache to the current position + for i in range(skip_num): + node = node[words_hyp[i]][0] + + assert len(words_hyp[skip_num:]) == n_mat + + # update cache with newly computed rows + for word, row in zip(words_hyp[skip_num:], mat): + if word not in node: + node[word] = ({}, tuple(row)) + self._cache_size += 1 + value = node[word] + node = value[0] + + def _find_cache(self, words_hyp: List[str]) -> Tuple[int, List[List]]: + """Find the already computed rows of the edit distance matrix in cache. + + Returns a partially computed edit distance matrix. + + :param words_hyp: Translation hypothesis. + :return: Tuple (start position, dist). + """ + node = self._cache + start_position = 0 + dist = [self._initial_row] + for word in words_hyp: + if word in node: + start_position += 1 + node, row = node[word] + dist.append(row) + else: + break + + return start_position, dist diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9a02e536b55801f062b26e0afd93fac92c17a62 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_13a.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_13a.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0c5c92ce80abb73f4f0d2b7035be22cbbfec14c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_13a.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33a8785ab55d85785eda836add1951153b5b5ae2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_char.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_char.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f796ee7727117da50b04c1c4e4ed5b41ee1d849 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_char.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_intl.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_intl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a5eab6273404a938f7d433030bffa438c079447 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_intl.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ja_mecab.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ja_mecab.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c727fcb9b6c7b01fd6b4d9a03aa2594e74b508c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ja_mecab.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ko_mecab.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ko_mecab.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71600534593f549502d4bf29107760e32a3910ef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ko_mecab.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_none.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_none.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b4019209b129d91bc5b1541d1cea41ee1cc78ba Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_none.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_re.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_re.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..001f5576361686fe3e0b9bbdfb4a6a5c9ac5f957 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_re.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_spm.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_spm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e0db321a4dbafb88528138e977e5c501f18c554 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_spm.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05a7a9040a4d10e63f1c27d891493fdf51dd572f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_zh.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_zh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8d0a5e2418c41107c2104db051de0679a8ad482 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_zh.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_13a.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_13a.py new file mode 100644 index 0000000000000000000000000000000000000000..6441a7621882007faf99d261f7506550ee8164bd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_13a.py @@ -0,0 +1,34 @@ +from functools import lru_cache +from .tokenizer_base import BaseTokenizer +from .tokenizer_re import TokenizerRegexp + + +class Tokenizer13a(BaseTokenizer): + + def signature(self): + return '13a' + + def __init__(self): + self._post_tokenizer = TokenizerRegexp() + + @lru_cache(maxsize=2**16) + def __call__(self, line): + """Tokenizes an input line using a relatively minimal tokenization + that is however equivalent to mteval-v13a, used by WMT. + + :param line: a segment to tokenize + :return: the tokenized line + """ + + # language-independent part: + line = line.replace('', '') + line = line.replace('-\n', '') + line = line.replace('\n', ' ') + + if '&' in line: + line = line.replace('"', '"') + line = line.replace('&', '&') + line = line.replace('<', '<') + line = line.replace('>', '>') + + return self._post_tokenizer(f' {line} ') diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_base.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_base.py new file mode 100644 index 0000000000000000000000000000000000000000..faf3de4ca6310bd879f1aa2e4c35df59210e280d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_base.py @@ -0,0 +1,19 @@ +class BaseTokenizer: + """A base dummy tokenizer to derive from.""" + + def signature(self): + """ + Returns a signature for the tokenizer. + + :return: signature string + """ + raise NotImplementedError() + + def __call__(self, line): + """ + Tokenizes an input line with the tokenizer. + + :param line: a segment to tokenize + :return: the tokenized line + """ + raise NotImplementedError() diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_re.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_re.py new file mode 100644 index 0000000000000000000000000000000000000000..7eb67eb5126c4882d330387f662c0c865ce56f0c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_re.py @@ -0,0 +1,38 @@ +from functools import lru_cache +import re + +from .tokenizer_base import BaseTokenizer + + +class TokenizerRegexp(BaseTokenizer): + + def signature(self): + return 're' + + def __init__(self): + self._re = [ + # language-dependent part (assuming Western languages) + (re.compile(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])'), r' \1 '), + # tokenize period and comma unless preceded by a digit + (re.compile(r'([^0-9])([\.,])'), r'\1 \2 '), + # tokenize period and comma unless followed by a digit + (re.compile(r'([\.,])([^0-9])'), r' \1 \2'), + # tokenize dash when preceded by a digit + (re.compile(r'([0-9])(-)'), r'\1 \2 '), + # one space only between words + # NOTE: Doing this in Python (below) is faster + # (re.compile(r'\s+'), r' '), + ] + + @lru_cache(maxsize=2**16) + def __call__(self, line): + """Common post-processing tokenizer for `13a` and `zh` tokenizers. + + :param line: a segment to tokenize + :return: the tokenized line + """ + for (_re, repl) in self._re: + line = _re.sub(repl, line) + + # no leading or trailing spaces, single space within words + return ' '.join(line.split()) diff --git a/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_spm.py b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_spm.py new file mode 100644 index 0000000000000000000000000000000000000000..92729b5be351622ccfc23d25b7f85e6221b56dd3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_spm.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- + +import os +import logging + +from functools import lru_cache +from ..utils import SACREBLEU_DIR, download_file +from .tokenizer_base import BaseTokenizer + +sacrelogger = logging.getLogger('sacrebleu') + + +SPM_MODELS = { + "spm": { + "url": "https://dl.fbaipublicfiles.com/fairseq/models/flores/sacrebleu_tokenizer_spm.model", + "signature": "flores101", + }, + # same as the default of "spm" + "flores101": { + "url": "https://dl.fbaipublicfiles.com/fairseq/models/flores/sacrebleu_tokenizer_spm.model", + "signature": "flores101", + }, + "flores200": { + "url": "https://tinyurl.com/flores200sacrebleuspm", + "signature": "flores200", + }, +} + +class TokenizerSPM(BaseTokenizer): + def signature(self): + return self.name + + def __init__(self, key="spm"): + self.name = SPM_MODELS[key]["signature"] + + if key == "spm": + sacrelogger.warn("Tokenizer 'spm' has been changed to 'flores101', and may be removed in the future.") + + try: + import sentencepiece as spm + except (ImportError, ModuleNotFoundError): + raise ImportError( + '\n\nPlease install the sentencepiece library for SPM tokenization:' + '\n\n pip install sentencepiece ' + ) + self.sp = spm.SentencePieceProcessor() + + model_path = os.path.join(SACREBLEU_DIR, "models", os.path.basename(SPM_MODELS[key]["url"])) + if not os.path.exists(model_path): + url = SPM_MODELS[self.name]["url"] + download_file(url, model_path) + self.sp.Load(model_path) + + @lru_cache(maxsize=2**16) + def __call__(self, line): + """Tokenizes all the characters in the input line. + + :param line: a segment to tokenize + :return: the tokenized line + """ + return " ".join(self.sp.EncodeAsPieces(line)) + + +class Flores200Tokenizer(TokenizerSPM): + def __init__(self): + super().__init__("flores200") + +class Flores101Tokenizer(TokenizerSPM): + def __init__(self): + super().__init__("flores101")