diff --git a/llmeval-env/lib/python3.10/site-packages/aiosignal/__init__.py b/llmeval-env/lib/python3.10/site-packages/aiosignal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d288e6ede67df2bb8e5660e30372e190eb23e65 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/aiosignal/__init__.py @@ -0,0 +1,36 @@ +from frozenlist import FrozenList + +__version__ = "1.3.1" + +__all__ = ("Signal",) + + +class Signal(FrozenList): + """Coroutine-based signal implementation. + + To connect a callback to a signal, use any list method. + + Signals are fired using the send() coroutine, which takes named + arguments. + """ + + __slots__ = ("_owner",) + + def __init__(self, owner): + super().__init__() + self._owner = owner + + def __repr__(self): + return "".format( + self._owner, self.frozen, list(self) + ) + + async def send(self, *args, **kwargs): + """ + Sends data to all registered receivers. + """ + if not self.frozen: + raise RuntimeError("Cannot send non-frozen signal.") + + for receiver in self: + await receiver(*args, **kwargs) # type: ignore diff --git a/llmeval-env/lib/python3.10/site-packages/aiosignal/__init__.pyi b/llmeval-env/lib/python3.10/site-packages/aiosignal/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d4e3416d72246058259061578a82697e2bc0706e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/aiosignal/__init__.pyi @@ -0,0 +1,12 @@ +from typing import Any, Generic, TypeVar + +from frozenlist import FrozenList + +__all__ = ("Signal",) + +_T = TypeVar("_T") + +class Signal(FrozenList[_T], Generic[_T]): + def __init__(self, owner: Any) -> None: ... + def __repr__(self) -> str: ... + async def send(self, *args: Any, **kwargs: Any) -> None: ... diff --git a/llmeval-env/lib/python3.10/site-packages/aiosignal/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/aiosignal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4611fb4b9c750c7e0c8cb4a34c859784ad50e001 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/aiosignal/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/aiosignal/py.typed b/llmeval-env/lib/python3.10/site-packages/aiosignal/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/charset_normalizer/__init__.py b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..55991fc38062b9c800805437ee49b0cf42b98103 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/__init__.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +""" +Charset-Normalizer +~~~~~~~~~~~~~~ +The Real First Universal Charset Detector. +A library that helps you read text from an unknown charset encoding. +Motivated by chardet, This package is trying to resolve the issue by taking a new approach. +All IANA character set names for which the Python core library provides codecs are supported. + +Basic usage: + >>> from charset_normalizer import from_bytes + >>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8')) + >>> best_guess = results.best() + >>> str(best_guess) + 'Bсеки човек има право на образование. Oбразованието!' + +Others methods and usages are available - see the full documentation +at . +:copyright: (c) 2021 by Ahmed TAHRI +:license: MIT, see LICENSE for more details. +""" +import logging + +from .api import from_bytes, from_fp, from_path, is_binary +from .legacy import detect +from .models import CharsetMatch, CharsetMatches +from .utils import set_logging_handler +from .version import VERSION, __version__ + +__all__ = ( + "from_fp", + "from_path", + "from_bytes", + "is_binary", + "detect", + "CharsetMatch", + "CharsetMatches", + "__version__", + "VERSION", + "set_logging_handler", +) + +# Attach a NullHandler to the top level logger by default +# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library + +logging.getLogger("charset_normalizer").addHandler(logging.NullHandler()) diff --git a/llmeval-env/lib/python3.10/site-packages/charset_normalizer/__main__.py b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..beae2ef77490c9f9c9255dd68facbb6de132841f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/__main__.py @@ -0,0 +1,4 @@ +from .cli import cli_detect + +if __name__ == "__main__": + cli_detect() diff --git a/llmeval-env/lib/python3.10/site-packages/charset_normalizer/api.py b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/api.py new file mode 100644 index 0000000000000000000000000000000000000000..0ba08e3a50ba6d61e75f3f31772eb4dfdd3f8f05 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/api.py @@ -0,0 +1,626 @@ +import logging +from os import PathLike +from typing import BinaryIO, List, Optional, Set, Union + +from .cd import ( + coherence_ratio, + encoding_languages, + mb_encoding_languages, + merge_coherence_ratios, +) +from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE +from .md import mess_ratio +from .models import CharsetMatch, CharsetMatches +from .utils import ( + any_specified_encoding, + cut_sequence_chunks, + iana_name, + identify_sig_or_bom, + is_cp_similar, + is_multi_byte_encoding, + should_strip_sig_or_bom, +) + +# Will most likely be controversial +# logging.addLevelName(TRACE, "TRACE") +logger = logging.getLogger("charset_normalizer") +explain_handler = logging.StreamHandler() +explain_handler.setFormatter( + logging.Formatter("%(asctime)s | %(levelname)s | %(message)s") +) + + +def from_bytes( + sequences: Union[bytes, bytearray], + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.2, + cp_isolation: Optional[List[str]] = None, + cp_exclusion: Optional[List[str]] = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = True, +) -> CharsetMatches: + """ + Given a raw bytes sequence, return the best possibles charset usable to render str objects. + If there is no results, it is a strong indicator that the source is binary/not text. + By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence. + And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will. + + The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page + but never take it for granted. Can improve the performance. + + You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that + purpose. + + This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32. + By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain' + toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging. + Custom logging format and handler can be set manually. + """ + + if not isinstance(sequences, (bytearray, bytes)): + raise TypeError( + "Expected object of type bytes or bytearray, got: {0}".format( + type(sequences) + ) + ) + + if explain: + previous_logger_level: int = logger.level + logger.addHandler(explain_handler) + logger.setLevel(TRACE) + + length: int = len(sequences) + + if length == 0: + logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.") + if explain: + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level or logging.WARNING) + return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")]) + + if cp_isolation is not None: + logger.log( + TRACE, + "cp_isolation is set. use this flag for debugging purpose. " + "limited list of encoding allowed : %s.", + ", ".join(cp_isolation), + ) + cp_isolation = [iana_name(cp, False) for cp in cp_isolation] + else: + cp_isolation = [] + + if cp_exclusion is not None: + logger.log( + TRACE, + "cp_exclusion is set. use this flag for debugging purpose. " + "limited list of encoding excluded : %s.", + ", ".join(cp_exclusion), + ) + cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion] + else: + cp_exclusion = [] + + if length <= (chunk_size * steps): + logger.log( + TRACE, + "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.", + steps, + chunk_size, + length, + ) + steps = 1 + chunk_size = length + + if steps > 1 and length / steps < chunk_size: + chunk_size = int(length / steps) + + is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE + is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE + + if is_too_small_sequence: + logger.log( + TRACE, + "Trying to detect encoding from a tiny portion of ({}) byte(s).".format( + length + ), + ) + elif is_too_large_sequence: + logger.log( + TRACE, + "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format( + length + ), + ) + + prioritized_encodings: List[str] = [] + + specified_encoding: Optional[str] = ( + any_specified_encoding(sequences) if preemptive_behaviour else None + ) + + if specified_encoding is not None: + prioritized_encodings.append(specified_encoding) + logger.log( + TRACE, + "Detected declarative mark in sequence. Priority +1 given for %s.", + specified_encoding, + ) + + tested: Set[str] = set() + tested_but_hard_failure: List[str] = [] + tested_but_soft_failure: List[str] = [] + + fallback_ascii: Optional[CharsetMatch] = None + fallback_u8: Optional[CharsetMatch] = None + fallback_specified: Optional[CharsetMatch] = None + + results: CharsetMatches = CharsetMatches() + + sig_encoding, sig_payload = identify_sig_or_bom(sequences) + + if sig_encoding is not None: + prioritized_encodings.append(sig_encoding) + logger.log( + TRACE, + "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.", + len(sig_payload), + sig_encoding, + ) + + prioritized_encodings.append("ascii") + + if "utf_8" not in prioritized_encodings: + prioritized_encodings.append("utf_8") + + for encoding_iana in prioritized_encodings + IANA_SUPPORTED: + if cp_isolation and encoding_iana not in cp_isolation: + continue + + if cp_exclusion and encoding_iana in cp_exclusion: + continue + + if encoding_iana in tested: + continue + + tested.add(encoding_iana) + + decoded_payload: Optional[str] = None + bom_or_sig_available: bool = sig_encoding == encoding_iana + strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom( + encoding_iana + ) + + if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available: + logger.log( + TRACE, + "Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.", + encoding_iana, + ) + continue + if encoding_iana in {"utf_7"} and not bom_or_sig_available: + logger.log( + TRACE, + "Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.", + encoding_iana, + ) + continue + + try: + is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana) + except (ModuleNotFoundError, ImportError): + logger.log( + TRACE, + "Encoding %s does not provide an IncrementalDecoder", + encoding_iana, + ) + continue + + try: + if is_too_large_sequence and is_multi_byte_decoder is False: + str( + sequences[: int(50e4)] + if strip_sig_or_bom is False + else sequences[len(sig_payload) : int(50e4)], + encoding=encoding_iana, + ) + else: + decoded_payload = str( + sequences + if strip_sig_or_bom is False + else sequences[len(sig_payload) :], + encoding=encoding_iana, + ) + except (UnicodeDecodeError, LookupError) as e: + if not isinstance(e, LookupError): + logger.log( + TRACE, + "Code page %s does not fit given bytes sequence at ALL. %s", + encoding_iana, + str(e), + ) + tested_but_hard_failure.append(encoding_iana) + continue + + similar_soft_failure_test: bool = False + + for encoding_soft_failed in tested_but_soft_failure: + if is_cp_similar(encoding_iana, encoding_soft_failed): + similar_soft_failure_test = True + break + + if similar_soft_failure_test: + logger.log( + TRACE, + "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!", + encoding_iana, + encoding_soft_failed, + ) + continue + + r_ = range( + 0 if not bom_or_sig_available else len(sig_payload), + length, + int(length / steps), + ) + + multi_byte_bonus: bool = ( + is_multi_byte_decoder + and decoded_payload is not None + and len(decoded_payload) < length + ) + + if multi_byte_bonus: + logger.log( + TRACE, + "Code page %s is a multi byte encoding table and it appear that at least one character " + "was encoded using n-bytes.", + encoding_iana, + ) + + max_chunk_gave_up: int = int(len(r_) / 4) + + max_chunk_gave_up = max(max_chunk_gave_up, 2) + early_stop_count: int = 0 + lazy_str_hard_failure = False + + md_chunks: List[str] = [] + md_ratios = [] + + try: + for chunk in cut_sequence_chunks( + sequences, + encoding_iana, + r_, + chunk_size, + bom_or_sig_available, + strip_sig_or_bom, + sig_payload, + is_multi_byte_decoder, + decoded_payload, + ): + md_chunks.append(chunk) + + md_ratios.append( + mess_ratio( + chunk, + threshold, + explain is True and 1 <= len(cp_isolation) <= 2, + ) + ) + + if md_ratios[-1] >= threshold: + early_stop_count += 1 + + if (early_stop_count >= max_chunk_gave_up) or ( + bom_or_sig_available and strip_sig_or_bom is False + ): + break + except ( + UnicodeDecodeError + ) as e: # Lazy str loading may have missed something there + logger.log( + TRACE, + "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s", + encoding_iana, + str(e), + ) + early_stop_count = max_chunk_gave_up + lazy_str_hard_failure = True + + # We might want to check the sequence again with the whole content + # Only if initial MD tests passes + if ( + not lazy_str_hard_failure + and is_too_large_sequence + and not is_multi_byte_decoder + ): + try: + sequences[int(50e3) :].decode(encoding_iana, errors="strict") + except UnicodeDecodeError as e: + logger.log( + TRACE, + "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s", + encoding_iana, + str(e), + ) + tested_but_hard_failure.append(encoding_iana) + continue + + mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0 + if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up: + tested_but_soft_failure.append(encoding_iana) + logger.log( + TRACE, + "%s was excluded because of initial chaos probing. Gave up %i time(s). " + "Computed mean chaos is %f %%.", + encoding_iana, + early_stop_count, + round(mean_mess_ratio * 100, ndigits=3), + ) + # Preparing those fallbacks in case we got nothing. + if ( + enable_fallback + and encoding_iana in ["ascii", "utf_8", specified_encoding] + and not lazy_str_hard_failure + ): + fallback_entry = CharsetMatch( + sequences, encoding_iana, threshold, False, [], decoded_payload + ) + if encoding_iana == specified_encoding: + fallback_specified = fallback_entry + elif encoding_iana == "ascii": + fallback_ascii = fallback_entry + else: + fallback_u8 = fallback_entry + continue + + logger.log( + TRACE, + "%s passed initial chaos probing. Mean measured chaos is %f %%", + encoding_iana, + round(mean_mess_ratio * 100, ndigits=3), + ) + + if not is_multi_byte_decoder: + target_languages: List[str] = encoding_languages(encoding_iana) + else: + target_languages = mb_encoding_languages(encoding_iana) + + if target_languages: + logger.log( + TRACE, + "{} should target any language(s) of {}".format( + encoding_iana, str(target_languages) + ), + ) + + cd_ratios = [] + + # We shall skip the CD when its about ASCII + # Most of the time its not relevant to run "language-detection" on it. + if encoding_iana != "ascii": + for chunk in md_chunks: + chunk_languages = coherence_ratio( + chunk, + language_threshold, + ",".join(target_languages) if target_languages else None, + ) + + cd_ratios.append(chunk_languages) + + cd_ratios_merged = merge_coherence_ratios(cd_ratios) + + if cd_ratios_merged: + logger.log( + TRACE, + "We detected language {} using {}".format( + cd_ratios_merged, encoding_iana + ), + ) + + results.append( + CharsetMatch( + sequences, + encoding_iana, + mean_mess_ratio, + bom_or_sig_available, + cd_ratios_merged, + decoded_payload, + ) + ) + + if ( + encoding_iana in [specified_encoding, "ascii", "utf_8"] + and mean_mess_ratio < 0.1 + ): + logger.debug( + "Encoding detection: %s is most likely the one.", encoding_iana + ) + if explain: + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level) + return CharsetMatches([results[encoding_iana]]) + + if encoding_iana == sig_encoding: + logger.debug( + "Encoding detection: %s is most likely the one as we detected a BOM or SIG within " + "the beginning of the sequence.", + encoding_iana, + ) + if explain: + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level) + return CharsetMatches([results[encoding_iana]]) + + if len(results) == 0: + if fallback_u8 or fallback_ascii or fallback_specified: + logger.log( + TRACE, + "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.", + ) + + if fallback_specified: + logger.debug( + "Encoding detection: %s will be used as a fallback match", + fallback_specified.encoding, + ) + results.append(fallback_specified) + elif ( + (fallback_u8 and fallback_ascii is None) + or ( + fallback_u8 + and fallback_ascii + and fallback_u8.fingerprint != fallback_ascii.fingerprint + ) + or (fallback_u8 is not None) + ): + logger.debug("Encoding detection: utf_8 will be used as a fallback match") + results.append(fallback_u8) + elif fallback_ascii: + logger.debug("Encoding detection: ascii will be used as a fallback match") + results.append(fallback_ascii) + + if results: + logger.debug( + "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.", + results.best().encoding, # type: ignore + len(results) - 1, + ) + else: + logger.debug("Encoding detection: Unable to determine any suitable charset.") + + if explain: + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level) + + return results + + +def from_fp( + fp: BinaryIO, + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.20, + cp_isolation: Optional[List[str]] = None, + cp_exclusion: Optional[List[str]] = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = True, +) -> CharsetMatches: + """ + Same thing than the function from_bytes but using a file pointer that is already ready. + Will not close the file pointer. + """ + return from_bytes( + fp.read(), + steps, + chunk_size, + threshold, + cp_isolation, + cp_exclusion, + preemptive_behaviour, + explain, + language_threshold, + enable_fallback, + ) + + +def from_path( + path: Union[str, bytes, PathLike], # type: ignore[type-arg] + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.20, + cp_isolation: Optional[List[str]] = None, + cp_exclusion: Optional[List[str]] = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = True, +) -> CharsetMatches: + """ + Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode. + Can raise IOError. + """ + with open(path, "rb") as fp: + return from_fp( + fp, + steps, + chunk_size, + threshold, + cp_isolation, + cp_exclusion, + preemptive_behaviour, + explain, + language_threshold, + enable_fallback, + ) + + +def is_binary( + fp_or_path_or_payload: Union[PathLike, str, BinaryIO, bytes], # type: ignore[type-arg] + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.20, + cp_isolation: Optional[List[str]] = None, + cp_exclusion: Optional[List[str]] = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = False, +) -> bool: + """ + Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string. + Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match + are disabled to be stricter around ASCII-compatible but unlikely to be a string. + """ + if isinstance(fp_or_path_or_payload, (str, PathLike)): + guesses = from_path( + fp_or_path_or_payload, + steps=steps, + chunk_size=chunk_size, + threshold=threshold, + cp_isolation=cp_isolation, + cp_exclusion=cp_exclusion, + preemptive_behaviour=preemptive_behaviour, + explain=explain, + language_threshold=language_threshold, + enable_fallback=enable_fallback, + ) + elif isinstance( + fp_or_path_or_payload, + ( + bytes, + bytearray, + ), + ): + guesses = from_bytes( + fp_or_path_or_payload, + steps=steps, + chunk_size=chunk_size, + threshold=threshold, + cp_isolation=cp_isolation, + cp_exclusion=cp_exclusion, + preemptive_behaviour=preemptive_behaviour, + explain=explain, + language_threshold=language_threshold, + enable_fallback=enable_fallback, + ) + else: + guesses = from_fp( + fp_or_path_or_payload, + steps=steps, + chunk_size=chunk_size, + threshold=threshold, + cp_isolation=cp_isolation, + cp_exclusion=cp_exclusion, + preemptive_behaviour=preemptive_behaviour, + explain=explain, + language_threshold=language_threshold, + enable_fallback=enable_fallback, + ) + + return not guesses diff --git a/llmeval-env/lib/python3.10/site-packages/charset_normalizer/cd.py b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/cd.py new file mode 100644 index 0000000000000000000000000000000000000000..4ea6760c45bce5773bfe4b46d7b3c07c2c139d49 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/cd.py @@ -0,0 +1,395 @@ +import importlib +from codecs import IncrementalDecoder +from collections import Counter +from functools import lru_cache +from typing import Counter as TypeCounter, Dict, List, Optional, Tuple + +from .constant import ( + FREQUENCIES, + KO_NAMES, + LANGUAGE_SUPPORTED_COUNT, + TOO_SMALL_SEQUENCE, + ZH_NAMES, +) +from .md import is_suspiciously_successive_range +from .models import CoherenceMatches +from .utils import ( + is_accentuated, + is_latin, + is_multi_byte_encoding, + is_unicode_range_secondary, + unicode_range, +) + + +def encoding_unicode_range(iana_name: str) -> List[str]: + """ + Return associated unicode ranges in a single byte code page. + """ + if is_multi_byte_encoding(iana_name): + raise IOError("Function not supported on multi-byte code page") + + decoder = importlib.import_module( + "encodings.{}".format(iana_name) + ).IncrementalDecoder + + p: IncrementalDecoder = decoder(errors="ignore") + seen_ranges: Dict[str, int] = {} + character_count: int = 0 + + for i in range(0x40, 0xFF): + chunk: str = p.decode(bytes([i])) + + if chunk: + character_range: Optional[str] = unicode_range(chunk) + + if character_range is None: + continue + + if is_unicode_range_secondary(character_range) is False: + if character_range not in seen_ranges: + seen_ranges[character_range] = 0 + seen_ranges[character_range] += 1 + character_count += 1 + + return sorted( + [ + character_range + for character_range in seen_ranges + if seen_ranges[character_range] / character_count >= 0.15 + ] + ) + + +def unicode_range_languages(primary_range: str) -> List[str]: + """ + Return inferred languages used with a unicode range. + """ + languages: List[str] = [] + + for language, characters in FREQUENCIES.items(): + for character in characters: + if unicode_range(character) == primary_range: + languages.append(language) + break + + return languages + + +@lru_cache() +def encoding_languages(iana_name: str) -> List[str]: + """ + Single-byte encoding language association. Some code page are heavily linked to particular language(s). + This function does the correspondence. + """ + unicode_ranges: List[str] = encoding_unicode_range(iana_name) + primary_range: Optional[str] = None + + for specified_range in unicode_ranges: + if "Latin" not in specified_range: + primary_range = specified_range + break + + if primary_range is None: + return ["Latin Based"] + + return unicode_range_languages(primary_range) + + +@lru_cache() +def mb_encoding_languages(iana_name: str) -> List[str]: + """ + Multi-byte encoding language association. Some code page are heavily linked to particular language(s). + This function does the correspondence. + """ + if ( + iana_name.startswith("shift_") + or iana_name.startswith("iso2022_jp") + or iana_name.startswith("euc_j") + or iana_name == "cp932" + ): + return ["Japanese"] + if iana_name.startswith("gb") or iana_name in ZH_NAMES: + return ["Chinese"] + if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: + return ["Korean"] + + return [] + + +@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) +def get_target_features(language: str) -> Tuple[bool, bool]: + """ + Determine main aspects from a supported language if it contains accents and if is pure Latin. + """ + target_have_accents: bool = False + target_pure_latin: bool = True + + for character in FREQUENCIES[language]: + if not target_have_accents and is_accentuated(character): + target_have_accents = True + if target_pure_latin and is_latin(character) is False: + target_pure_latin = False + + return target_have_accents, target_pure_latin + + +def alphabet_languages( + characters: List[str], ignore_non_latin: bool = False +) -> List[str]: + """ + Return associated languages associated to given characters. + """ + languages: List[Tuple[str, float]] = [] + + source_have_accents = any(is_accentuated(character) for character in characters) + + for language, language_characters in FREQUENCIES.items(): + target_have_accents, target_pure_latin = get_target_features(language) + + if ignore_non_latin and target_pure_latin is False: + continue + + if target_have_accents is False and source_have_accents: + continue + + character_count: int = len(language_characters) + + character_match_count: int = len( + [c for c in language_characters if c in characters] + ) + + ratio: float = character_match_count / character_count + + if ratio >= 0.2: + languages.append((language, ratio)) + + languages = sorted(languages, key=lambda x: x[1], reverse=True) + + return [compatible_language[0] for compatible_language in languages] + + +def characters_popularity_compare( + language: str, ordered_characters: List[str] +) -> float: + """ + Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. + The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). + Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) + """ + if language not in FREQUENCIES: + raise ValueError("{} not available".format(language)) + + character_approved_count: int = 0 + FREQUENCIES_language_set = set(FREQUENCIES[language]) + + ordered_characters_count: int = len(ordered_characters) + target_language_characters_count: int = len(FREQUENCIES[language]) + + large_alphabet: bool = target_language_characters_count > 26 + + for character, character_rank in zip( + ordered_characters, range(0, ordered_characters_count) + ): + if character not in FREQUENCIES_language_set: + continue + + character_rank_in_language: int = FREQUENCIES[language].index(character) + expected_projection_ratio: float = ( + target_language_characters_count / ordered_characters_count + ) + character_rank_projection: int = int(character_rank * expected_projection_ratio) + + if ( + large_alphabet is False + and abs(character_rank_projection - character_rank_in_language) > 4 + ): + continue + + if ( + large_alphabet is True + and abs(character_rank_projection - character_rank_in_language) + < target_language_characters_count / 3 + ): + character_approved_count += 1 + continue + + characters_before_source: List[str] = FREQUENCIES[language][ + 0:character_rank_in_language + ] + characters_after_source: List[str] = FREQUENCIES[language][ + character_rank_in_language: + ] + characters_before: List[str] = ordered_characters[0:character_rank] + characters_after: List[str] = ordered_characters[character_rank:] + + before_match_count: int = len( + set(characters_before) & set(characters_before_source) + ) + + after_match_count: int = len( + set(characters_after) & set(characters_after_source) + ) + + if len(characters_before_source) == 0 and before_match_count <= 4: + character_approved_count += 1 + continue + + if len(characters_after_source) == 0 and after_match_count <= 4: + character_approved_count += 1 + continue + + if ( + before_match_count / len(characters_before_source) >= 0.4 + or after_match_count / len(characters_after_source) >= 0.4 + ): + character_approved_count += 1 + continue + + return character_approved_count / len(ordered_characters) + + +def alpha_unicode_split(decoded_sequence: str) -> List[str]: + """ + Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. + Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; + One containing the latin letters and the other hebrew. + """ + layers: Dict[str, str] = {} + + for character in decoded_sequence: + if character.isalpha() is False: + continue + + character_range: Optional[str] = unicode_range(character) + + if character_range is None: + continue + + layer_target_range: Optional[str] = None + + for discovered_range in layers: + if ( + is_suspiciously_successive_range(discovered_range, character_range) + is False + ): + layer_target_range = discovered_range + break + + if layer_target_range is None: + layer_target_range = character_range + + if layer_target_range not in layers: + layers[layer_target_range] = character.lower() + continue + + layers[layer_target_range] += character.lower() + + return list(layers.values()) + + +def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches: + """ + This function merge results previously given by the function coherence_ratio. + The return type is the same as coherence_ratio. + """ + per_language_ratios: Dict[str, List[float]] = {} + for result in results: + for sub_result in result: + language, ratio = sub_result + if language not in per_language_ratios: + per_language_ratios[language] = [ratio] + continue + per_language_ratios[language].append(ratio) + + merge = [ + ( + language, + round( + sum(per_language_ratios[language]) / len(per_language_ratios[language]), + 4, + ), + ) + for language in per_language_ratios + ] + + return sorted(merge, key=lambda x: x[1], reverse=True) + + +def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches: + """ + We shall NOT return "English—" in CoherenceMatches because it is an alternative + of "English". This function only keeps the best match and remove the em-dash in it. + """ + index_results: Dict[str, List[float]] = dict() + + for result in results: + language, ratio = result + no_em_name: str = language.replace("—", "") + + if no_em_name not in index_results: + index_results[no_em_name] = [] + + index_results[no_em_name].append(ratio) + + if any(len(index_results[e]) > 1 for e in index_results): + filtered_results: CoherenceMatches = [] + + for language in index_results: + filtered_results.append((language, max(index_results[language]))) + + return filtered_results + + return results + + +@lru_cache(maxsize=2048) +def coherence_ratio( + decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None +) -> CoherenceMatches: + """ + Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers. + A layer = Character extraction by alphabets/ranges. + """ + + results: List[Tuple[str, float]] = [] + ignore_non_latin: bool = False + + sufficient_match_count: int = 0 + + lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else [] + if "Latin Based" in lg_inclusion_list: + ignore_non_latin = True + lg_inclusion_list.remove("Latin Based") + + for layer in alpha_unicode_split(decoded_sequence): + sequence_frequencies: TypeCounter[str] = Counter(layer) + most_common = sequence_frequencies.most_common() + + character_count: int = sum(o for c, o in most_common) + + if character_count <= TOO_SMALL_SEQUENCE: + continue + + popular_character_ordered: List[str] = [c for c, o in most_common] + + for language in lg_inclusion_list or alphabet_languages( + popular_character_ordered, ignore_non_latin + ): + ratio: float = characters_popularity_compare( + language, popular_character_ordered + ) + + if ratio < threshold: + continue + elif ratio >= 0.8: + sufficient_match_count += 1 + + results.append((language, round(ratio, 4))) + + if sufficient_match_count >= 3: + break + + return sorted( + filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True + ) diff --git a/llmeval-env/lib/python3.10/site-packages/charset_normalizer/constant.py b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/constant.py new file mode 100644 index 0000000000000000000000000000000000000000..863490461eacf57ca5f62658b713685476987149 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/constant.py @@ -0,0 +1,1995 @@ +# -*- coding: utf-8 -*- +from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE +from encodings.aliases import aliases +from re import IGNORECASE, compile as re_compile +from typing import Dict, List, Set, Union + +# Contain for each eligible encoding a list of/item bytes SIG/BOM +ENCODING_MARKS: Dict[str, Union[bytes, List[bytes]]] = { + "utf_8": BOM_UTF8, + "utf_7": [ + b"\x2b\x2f\x76\x38", + b"\x2b\x2f\x76\x39", + b"\x2b\x2f\x76\x2b", + b"\x2b\x2f\x76\x2f", + b"\x2b\x2f\x76\x38\x2d", + ], + "gb18030": b"\x84\x31\x95\x33", + "utf_32": [BOM_UTF32_BE, BOM_UTF32_LE], + "utf_16": [BOM_UTF16_BE, BOM_UTF16_LE], +} + +TOO_SMALL_SEQUENCE: int = 32 +TOO_BIG_SEQUENCE: int = int(10e6) + +UTF8_MAXIMAL_ALLOCATION: int = 1_112_064 + +# Up-to-date Unicode ucd/15.0.0 +UNICODE_RANGES_COMBINED: Dict[str, range] = { + "Control character": range(32), + "Basic Latin": range(32, 128), + "Latin-1 Supplement": range(128, 256), + "Latin Extended-A": range(256, 384), + "Latin Extended-B": range(384, 592), + "IPA Extensions": range(592, 688), + "Spacing Modifier Letters": range(688, 768), + "Combining Diacritical Marks": range(768, 880), + "Greek and Coptic": range(880, 1024), + "Cyrillic": range(1024, 1280), + "Cyrillic Supplement": range(1280, 1328), + "Armenian": range(1328, 1424), + "Hebrew": range(1424, 1536), + "Arabic": range(1536, 1792), + "Syriac": range(1792, 1872), + "Arabic Supplement": range(1872, 1920), + "Thaana": range(1920, 1984), + "NKo": range(1984, 2048), + "Samaritan": range(2048, 2112), + "Mandaic": range(2112, 2144), + "Syriac Supplement": range(2144, 2160), + "Arabic Extended-B": range(2160, 2208), + "Arabic Extended-A": range(2208, 2304), + "Devanagari": range(2304, 2432), + "Bengali": range(2432, 2560), + "Gurmukhi": range(2560, 2688), + "Gujarati": range(2688, 2816), + "Oriya": range(2816, 2944), + "Tamil": range(2944, 3072), + "Telugu": range(3072, 3200), + "Kannada": range(3200, 3328), + "Malayalam": range(3328, 3456), + "Sinhala": range(3456, 3584), + "Thai": range(3584, 3712), + "Lao": range(3712, 3840), + "Tibetan": range(3840, 4096), + "Myanmar": range(4096, 4256), + "Georgian": range(4256, 4352), + "Hangul Jamo": range(4352, 4608), + "Ethiopic": range(4608, 4992), + "Ethiopic Supplement": range(4992, 5024), + "Cherokee": range(5024, 5120), + "Unified Canadian Aboriginal Syllabics": range(5120, 5760), + "Ogham": range(5760, 5792), + "Runic": range(5792, 5888), + "Tagalog": range(5888, 5920), + "Hanunoo": range(5920, 5952), + "Buhid": range(5952, 5984), + "Tagbanwa": range(5984, 6016), + "Khmer": range(6016, 6144), + "Mongolian": range(6144, 6320), + "Unified Canadian Aboriginal Syllabics Extended": range(6320, 6400), + "Limbu": range(6400, 6480), + "Tai Le": range(6480, 6528), + "New Tai Lue": range(6528, 6624), + "Khmer Symbols": range(6624, 6656), + "Buginese": range(6656, 6688), + "Tai Tham": range(6688, 6832), + "Combining Diacritical Marks Extended": range(6832, 6912), + "Balinese": range(6912, 7040), + "Sundanese": range(7040, 7104), + "Batak": range(7104, 7168), + "Lepcha": range(7168, 7248), + "Ol Chiki": range(7248, 7296), + "Cyrillic Extended-C": range(7296, 7312), + "Georgian Extended": range(7312, 7360), + "Sundanese Supplement": range(7360, 7376), + "Vedic Extensions": range(7376, 7424), + "Phonetic Extensions": range(7424, 7552), + "Phonetic Extensions Supplement": range(7552, 7616), + "Combining Diacritical Marks Supplement": range(7616, 7680), + "Latin Extended Additional": range(7680, 7936), + "Greek Extended": range(7936, 8192), + "General Punctuation": range(8192, 8304), + "Superscripts and Subscripts": range(8304, 8352), + "Currency Symbols": range(8352, 8400), + "Combining Diacritical Marks for Symbols": range(8400, 8448), + "Letterlike Symbols": range(8448, 8528), + "Number Forms": range(8528, 8592), + "Arrows": range(8592, 8704), + "Mathematical Operators": range(8704, 8960), + "Miscellaneous Technical": range(8960, 9216), + "Control Pictures": range(9216, 9280), + "Optical Character Recognition": range(9280, 9312), + "Enclosed Alphanumerics": range(9312, 9472), + "Box Drawing": range(9472, 9600), + "Block Elements": range(9600, 9632), + "Geometric Shapes": range(9632, 9728), + "Miscellaneous Symbols": range(9728, 9984), + "Dingbats": range(9984, 10176), + "Miscellaneous Mathematical Symbols-A": range(10176, 10224), + "Supplemental Arrows-A": range(10224, 10240), + "Braille Patterns": range(10240, 10496), + "Supplemental Arrows-B": range(10496, 10624), + "Miscellaneous Mathematical Symbols-B": range(10624, 10752), + "Supplemental Mathematical Operators": range(10752, 11008), + "Miscellaneous Symbols and Arrows": range(11008, 11264), + "Glagolitic": range(11264, 11360), + "Latin Extended-C": range(11360, 11392), + "Coptic": range(11392, 11520), + "Georgian Supplement": range(11520, 11568), + "Tifinagh": range(11568, 11648), + "Ethiopic Extended": range(11648, 11744), + "Cyrillic Extended-A": range(11744, 11776), + "Supplemental Punctuation": range(11776, 11904), + "CJK Radicals Supplement": range(11904, 12032), + "Kangxi Radicals": range(12032, 12256), + "Ideographic Description Characters": range(12272, 12288), + "CJK Symbols and Punctuation": range(12288, 12352), + "Hiragana": range(12352, 12448), + "Katakana": range(12448, 12544), + "Bopomofo": range(12544, 12592), + "Hangul Compatibility Jamo": range(12592, 12688), + "Kanbun": range(12688, 12704), + "Bopomofo Extended": range(12704, 12736), + "CJK Strokes": range(12736, 12784), + "Katakana Phonetic Extensions": range(12784, 12800), + "Enclosed CJK Letters and Months": range(12800, 13056), + "CJK Compatibility": range(13056, 13312), + "CJK Unified Ideographs Extension A": range(13312, 19904), + "Yijing Hexagram Symbols": range(19904, 19968), + "CJK Unified Ideographs": range(19968, 40960), + "Yi Syllables": range(40960, 42128), + "Yi Radicals": range(42128, 42192), + "Lisu": range(42192, 42240), + "Vai": range(42240, 42560), + "Cyrillic Extended-B": range(42560, 42656), + "Bamum": range(42656, 42752), + "Modifier Tone Letters": range(42752, 42784), + "Latin Extended-D": range(42784, 43008), + "Syloti Nagri": range(43008, 43056), + "Common Indic Number Forms": range(43056, 43072), + "Phags-pa": range(43072, 43136), + "Saurashtra": range(43136, 43232), + "Devanagari Extended": range(43232, 43264), + "Kayah Li": range(43264, 43312), + "Rejang": range(43312, 43360), + "Hangul Jamo Extended-A": range(43360, 43392), + "Javanese": range(43392, 43488), + "Myanmar Extended-B": range(43488, 43520), + "Cham": range(43520, 43616), + "Myanmar Extended-A": range(43616, 43648), + "Tai Viet": range(43648, 43744), + "Meetei Mayek Extensions": range(43744, 43776), + "Ethiopic Extended-A": range(43776, 43824), + "Latin Extended-E": range(43824, 43888), + "Cherokee Supplement": range(43888, 43968), + "Meetei Mayek": range(43968, 44032), + "Hangul Syllables": range(44032, 55216), + "Hangul Jamo Extended-B": range(55216, 55296), + "High Surrogates": range(55296, 56192), + "High Private Use Surrogates": range(56192, 56320), + "Low Surrogates": range(56320, 57344), + "Private Use Area": range(57344, 63744), + "CJK Compatibility Ideographs": range(63744, 64256), + "Alphabetic Presentation Forms": range(64256, 64336), + "Arabic Presentation Forms-A": range(64336, 65024), + "Variation Selectors": range(65024, 65040), + "Vertical Forms": range(65040, 65056), + "Combining Half Marks": range(65056, 65072), + "CJK Compatibility Forms": range(65072, 65104), + "Small Form Variants": range(65104, 65136), + "Arabic Presentation Forms-B": range(65136, 65280), + "Halfwidth and Fullwidth Forms": range(65280, 65520), + "Specials": range(65520, 65536), + "Linear B Syllabary": range(65536, 65664), + "Linear B Ideograms": range(65664, 65792), + "Aegean Numbers": range(65792, 65856), + "Ancient Greek Numbers": range(65856, 65936), + "Ancient Symbols": range(65936, 66000), + "Phaistos Disc": range(66000, 66048), + "Lycian": range(66176, 66208), + "Carian": range(66208, 66272), + "Coptic Epact Numbers": range(66272, 66304), + "Old Italic": range(66304, 66352), + "Gothic": range(66352, 66384), + "Old Permic": range(66384, 66432), + "Ugaritic": range(66432, 66464), + "Old Persian": range(66464, 66528), + "Deseret": range(66560, 66640), + "Shavian": range(66640, 66688), + "Osmanya": range(66688, 66736), + "Osage": range(66736, 66816), + "Elbasan": range(66816, 66864), + "Caucasian Albanian": range(66864, 66928), + "Vithkuqi": range(66928, 67008), + "Linear A": range(67072, 67456), + "Latin Extended-F": range(67456, 67520), + "Cypriot Syllabary": range(67584, 67648), + "Imperial Aramaic": range(67648, 67680), + "Palmyrene": range(67680, 67712), + "Nabataean": range(67712, 67760), + "Hatran": range(67808, 67840), + "Phoenician": range(67840, 67872), + "Lydian": range(67872, 67904), + "Meroitic Hieroglyphs": range(67968, 68000), + "Meroitic Cursive": range(68000, 68096), + "Kharoshthi": range(68096, 68192), + "Old South Arabian": range(68192, 68224), + "Old North Arabian": range(68224, 68256), + "Manichaean": range(68288, 68352), + "Avestan": range(68352, 68416), + "Inscriptional Parthian": range(68416, 68448), + "Inscriptional Pahlavi": range(68448, 68480), + "Psalter Pahlavi": range(68480, 68528), + "Old Turkic": range(68608, 68688), + "Old Hungarian": range(68736, 68864), + "Hanifi Rohingya": range(68864, 68928), + "Rumi Numeral Symbols": range(69216, 69248), + "Yezidi": range(69248, 69312), + "Arabic Extended-C": range(69312, 69376), + "Old Sogdian": range(69376, 69424), + "Sogdian": range(69424, 69488), + "Old Uyghur": range(69488, 69552), + "Chorasmian": range(69552, 69600), + "Elymaic": range(69600, 69632), + "Brahmi": range(69632, 69760), + "Kaithi": range(69760, 69840), + "Sora Sompeng": range(69840, 69888), + "Chakma": range(69888, 69968), + "Mahajani": range(69968, 70016), + "Sharada": range(70016, 70112), + "Sinhala Archaic Numbers": range(70112, 70144), + "Khojki": range(70144, 70224), + "Multani": range(70272, 70320), + "Khudawadi": range(70320, 70400), + "Grantha": range(70400, 70528), + "Newa": range(70656, 70784), + "Tirhuta": range(70784, 70880), + "Siddham": range(71040, 71168), + "Modi": range(71168, 71264), + "Mongolian Supplement": range(71264, 71296), + "Takri": range(71296, 71376), + "Ahom": range(71424, 71504), + "Dogra": range(71680, 71760), + "Warang Citi": range(71840, 71936), + "Dives Akuru": range(71936, 72032), + "Nandinagari": range(72096, 72192), + "Zanabazar Square": range(72192, 72272), + "Soyombo": range(72272, 72368), + "Unified Canadian Aboriginal Syllabics Extended-A": range(72368, 72384), + "Pau Cin Hau": range(72384, 72448), + "Devanagari Extended-A": range(72448, 72544), + "Bhaiksuki": range(72704, 72816), + "Marchen": range(72816, 72896), + "Masaram Gondi": range(72960, 73056), + "Gunjala Gondi": range(73056, 73136), + "Makasar": range(73440, 73472), + "Kawi": range(73472, 73568), + "Lisu Supplement": range(73648, 73664), + "Tamil Supplement": range(73664, 73728), + "Cuneiform": range(73728, 74752), + "Cuneiform Numbers and Punctuation": range(74752, 74880), + "Early Dynastic Cuneiform": range(74880, 75088), + "Cypro-Minoan": range(77712, 77824), + "Egyptian Hieroglyphs": range(77824, 78896), + "Egyptian Hieroglyph Format Controls": range(78896, 78944), + "Anatolian Hieroglyphs": range(82944, 83584), + "Bamum Supplement": range(92160, 92736), + "Mro": range(92736, 92784), + "Tangsa": range(92784, 92880), + "Bassa Vah": range(92880, 92928), + "Pahawh Hmong": range(92928, 93072), + "Medefaidrin": range(93760, 93856), + "Miao": range(93952, 94112), + "Ideographic Symbols and Punctuation": range(94176, 94208), + "Tangut": range(94208, 100352), + "Tangut Components": range(100352, 101120), + "Khitan Small Script": range(101120, 101632), + "Tangut Supplement": range(101632, 101760), + "Kana Extended-B": range(110576, 110592), + "Kana Supplement": range(110592, 110848), + "Kana Extended-A": range(110848, 110896), + "Small Kana Extension": range(110896, 110960), + "Nushu": range(110960, 111360), + "Duployan": range(113664, 113824), + "Shorthand Format Controls": range(113824, 113840), + "Znamenny Musical Notation": range(118528, 118736), + "Byzantine Musical Symbols": range(118784, 119040), + "Musical Symbols": range(119040, 119296), + "Ancient Greek Musical Notation": range(119296, 119376), + "Kaktovik Numerals": range(119488, 119520), + "Mayan Numerals": range(119520, 119552), + "Tai Xuan Jing Symbols": range(119552, 119648), + "Counting Rod Numerals": range(119648, 119680), + "Mathematical Alphanumeric Symbols": range(119808, 120832), + "Sutton SignWriting": range(120832, 121520), + "Latin Extended-G": range(122624, 122880), + "Glagolitic Supplement": range(122880, 122928), + "Cyrillic Extended-D": range(122928, 123024), + "Nyiakeng Puachue Hmong": range(123136, 123216), + "Toto": range(123536, 123584), + "Wancho": range(123584, 123648), + "Nag Mundari": range(124112, 124160), + "Ethiopic Extended-B": range(124896, 124928), + "Mende Kikakui": range(124928, 125152), + "Adlam": range(125184, 125280), + "Indic Siyaq Numbers": range(126064, 126144), + "Ottoman Siyaq Numbers": range(126208, 126288), + "Arabic Mathematical Alphabetic Symbols": range(126464, 126720), + "Mahjong Tiles": range(126976, 127024), + "Domino Tiles": range(127024, 127136), + "Playing Cards": range(127136, 127232), + "Enclosed Alphanumeric Supplement": range(127232, 127488), + "Enclosed Ideographic Supplement": range(127488, 127744), + "Miscellaneous Symbols and Pictographs": range(127744, 128512), + "Emoticons range(Emoji)": range(128512, 128592), + "Ornamental Dingbats": range(128592, 128640), + "Transport and Map Symbols": range(128640, 128768), + "Alchemical Symbols": range(128768, 128896), + "Geometric Shapes Extended": range(128896, 129024), + "Supplemental Arrows-C": range(129024, 129280), + "Supplemental Symbols and Pictographs": range(129280, 129536), + "Chess Symbols": range(129536, 129648), + "Symbols and Pictographs Extended-A": range(129648, 129792), + "Symbols for Legacy Computing": range(129792, 130048), + "CJK Unified Ideographs Extension B": range(131072, 173792), + "CJK Unified Ideographs Extension C": range(173824, 177984), + "CJK Unified Ideographs Extension D": range(177984, 178208), + "CJK Unified Ideographs Extension E": range(178208, 183984), + "CJK Unified Ideographs Extension F": range(183984, 191472), + "CJK Compatibility Ideographs Supplement": range(194560, 195104), + "CJK Unified Ideographs Extension G": range(196608, 201552), + "CJK Unified Ideographs Extension H": range(201552, 205744), + "Tags": range(917504, 917632), + "Variation Selectors Supplement": range(917760, 918000), + "Supplementary Private Use Area-A": range(983040, 1048576), + "Supplementary Private Use Area-B": range(1048576, 1114112), +} + + +UNICODE_SECONDARY_RANGE_KEYWORD: List[str] = [ + "Supplement", + "Extended", + "Extensions", + "Modifier", + "Marks", + "Punctuation", + "Symbols", + "Forms", + "Operators", + "Miscellaneous", + "Drawing", + "Block", + "Shapes", + "Supplemental", + "Tags", +] + +RE_POSSIBLE_ENCODING_INDICATION = re_compile( + r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)", + IGNORECASE, +) + +IANA_NO_ALIASES = [ + "cp720", + "cp737", + "cp856", + "cp874", + "cp875", + "cp1006", + "koi8_r", + "koi8_t", + "koi8_u", +] + +IANA_SUPPORTED: List[str] = sorted( + filter( + lambda x: x.endswith("_codec") is False + and x not in {"rot_13", "tactis", "mbcs"}, + list(set(aliases.values())) + IANA_NO_ALIASES, + ) +) + +IANA_SUPPORTED_COUNT: int = len(IANA_SUPPORTED) + +# pre-computed code page that are similar using the function cp_similarity. +IANA_SUPPORTED_SIMILAR: Dict[str, List[str]] = { + "cp037": ["cp1026", "cp1140", "cp273", "cp500"], + "cp1026": ["cp037", "cp1140", "cp273", "cp500"], + "cp1125": ["cp866"], + "cp1140": ["cp037", "cp1026", "cp273", "cp500"], + "cp1250": ["iso8859_2"], + "cp1251": ["kz1048", "ptcp154"], + "cp1252": ["iso8859_15", "iso8859_9", "latin_1"], + "cp1253": ["iso8859_7"], + "cp1254": ["iso8859_15", "iso8859_9", "latin_1"], + "cp1257": ["iso8859_13"], + "cp273": ["cp037", "cp1026", "cp1140", "cp500"], + "cp437": ["cp850", "cp858", "cp860", "cp861", "cp862", "cp863", "cp865"], + "cp500": ["cp037", "cp1026", "cp1140", "cp273"], + "cp850": ["cp437", "cp857", "cp858", "cp865"], + "cp857": ["cp850", "cp858", "cp865"], + "cp858": ["cp437", "cp850", "cp857", "cp865"], + "cp860": ["cp437", "cp861", "cp862", "cp863", "cp865"], + "cp861": ["cp437", "cp860", "cp862", "cp863", "cp865"], + "cp862": ["cp437", "cp860", "cp861", "cp863", "cp865"], + "cp863": ["cp437", "cp860", "cp861", "cp862", "cp865"], + "cp865": ["cp437", "cp850", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863"], + "cp866": ["cp1125"], + "iso8859_10": ["iso8859_14", "iso8859_15", "iso8859_4", "iso8859_9", "latin_1"], + "iso8859_11": ["tis_620"], + "iso8859_13": ["cp1257"], + "iso8859_14": [ + "iso8859_10", + "iso8859_15", + "iso8859_16", + "iso8859_3", + "iso8859_9", + "latin_1", + ], + "iso8859_15": [ + "cp1252", + "cp1254", + "iso8859_10", + "iso8859_14", + "iso8859_16", + "iso8859_3", + "iso8859_9", + "latin_1", + ], + "iso8859_16": [ + "iso8859_14", + "iso8859_15", + "iso8859_2", + "iso8859_3", + "iso8859_9", + "latin_1", + ], + "iso8859_2": ["cp1250", "iso8859_16", "iso8859_4"], + "iso8859_3": ["iso8859_14", "iso8859_15", "iso8859_16", "iso8859_9", "latin_1"], + "iso8859_4": ["iso8859_10", "iso8859_2", "iso8859_9", "latin_1"], + "iso8859_7": ["cp1253"], + "iso8859_9": [ + "cp1252", + "cp1254", + "cp1258", + "iso8859_10", + "iso8859_14", + "iso8859_15", + "iso8859_16", + "iso8859_3", + "iso8859_4", + "latin_1", + ], + "kz1048": ["cp1251", "ptcp154"], + "latin_1": [ + "cp1252", + "cp1254", + "cp1258", + "iso8859_10", + "iso8859_14", + "iso8859_15", + "iso8859_16", + "iso8859_3", + "iso8859_4", + "iso8859_9", + ], + "mac_iceland": ["mac_roman", "mac_turkish"], + "mac_roman": ["mac_iceland", "mac_turkish"], + "mac_turkish": ["mac_iceland", "mac_roman"], + "ptcp154": ["cp1251", "kz1048"], + "tis_620": ["iso8859_11"], +} + + +CHARDET_CORRESPONDENCE: Dict[str, str] = { + "iso2022_kr": "ISO-2022-KR", + "iso2022_jp": "ISO-2022-JP", + "euc_kr": "EUC-KR", + "tis_620": "TIS-620", + "utf_32": "UTF-32", + "euc_jp": "EUC-JP", + "koi8_r": "KOI8-R", + "iso8859_1": "ISO-8859-1", + "iso8859_2": "ISO-8859-2", + "iso8859_5": "ISO-8859-5", + "iso8859_6": "ISO-8859-6", + "iso8859_7": "ISO-8859-7", + "iso8859_8": "ISO-8859-8", + "utf_16": "UTF-16", + "cp855": "IBM855", + "mac_cyrillic": "MacCyrillic", + "gb2312": "GB2312", + "gb18030": "GB18030", + "cp932": "CP932", + "cp866": "IBM866", + "utf_8": "utf-8", + "utf_8_sig": "UTF-8-SIG", + "shift_jis": "SHIFT_JIS", + "big5": "Big5", + "cp1250": "windows-1250", + "cp1251": "windows-1251", + "cp1252": "Windows-1252", + "cp1253": "windows-1253", + "cp1255": "windows-1255", + "cp1256": "windows-1256", + "cp1254": "Windows-1254", + "cp949": "CP949", +} + + +COMMON_SAFE_ASCII_CHARACTERS: Set[str] = { + "<", + ">", + "=", + ":", + "/", + "&", + ";", + "{", + "}", + "[", + "]", + ",", + "|", + '"', + "-", +} + + +KO_NAMES: Set[str] = {"johab", "cp949", "euc_kr"} +ZH_NAMES: Set[str] = {"big5", "cp950", "big5hkscs", "hz"} + +# Logging LEVEL below DEBUG +TRACE: int = 5 + + +# Language label that contain the em dash "—" +# character are to be considered alternative seq to origin +FREQUENCIES: Dict[str, List[str]] = { + "English": [ + "e", + "a", + "t", + "i", + "o", + "n", + "s", + "r", + "h", + "l", + "d", + "c", + "u", + "m", + "f", + "p", + "g", + "w", + "y", + "b", + "v", + "k", + "x", + "j", + "z", + "q", + ], + "English—": [ + "e", + "a", + "t", + "i", + "o", + "n", + "s", + "r", + "h", + "l", + "d", + "c", + "m", + "u", + "f", + "p", + "g", + "w", + "b", + "y", + "v", + "k", + "j", + "x", + "z", + "q", + ], + "German": [ + "e", + "n", + "i", + "r", + "s", + "t", + "a", + "d", + "h", + "u", + "l", + "g", + "o", + "c", + "m", + "b", + "f", + "k", + "w", + "z", + "p", + "v", + "ü", + "ä", + "ö", + "j", + ], + "French": [ + "e", + "a", + "s", + "n", + "i", + "t", + "r", + "l", + "u", + "o", + "d", + "c", + "p", + "m", + "é", + "v", + "g", + "f", + "b", + "h", + "q", + "à", + "x", + "è", + "y", + "j", + ], + "Dutch": [ + "e", + "n", + "a", + "i", + "r", + "t", + "o", + "d", + "s", + "l", + "g", + "h", + "v", + "m", + "u", + "k", + "c", + "p", + "b", + "w", + "j", + "z", + "f", + "y", + "x", + "ë", + ], + "Italian": [ + "e", + "i", + "a", + "o", + "n", + "l", + "t", + "r", + "s", + "c", + "d", + "u", + "p", + "m", + "g", + "v", + "f", + "b", + "z", + "h", + "q", + "è", + "à", + "k", + "y", + "ò", + ], + "Polish": [ + "a", + "i", + "o", + "e", + "n", + "r", + "z", + "w", + "s", + "c", + "t", + "k", + "y", + "d", + "p", + "m", + "u", + "l", + "j", + "ł", + "g", + "b", + "h", + "ą", + "ę", + "ó", + ], + "Spanish": [ + "e", + "a", + "o", + "n", + "s", + "r", + "i", + "l", + "d", + "t", + "c", + "u", + "m", + "p", + "b", + "g", + "v", + "f", + "y", + "ó", + "h", + "q", + "í", + "j", + "z", + "á", + ], + "Russian": [ + "о", + "а", + "е", + "и", + "н", + "с", + "т", + "р", + "в", + "л", + "к", + "м", + "д", + "п", + "у", + "г", + "я", + "ы", + "з", + "б", + "й", + "ь", + "ч", + "х", + "ж", + "ц", + ], + # Jap-Kanji + "Japanese": [ + "人", + "一", + "大", + "亅", + "丁", + "丨", + "竹", + "笑", + "口", + "日", + "今", + "二", + "彳", + "行", + "十", + "土", + "丶", + "寸", + "寺", + "時", + "乙", + "丿", + "乂", + "气", + "気", + "冂", + "巾", + "亠", + "市", + "目", + "儿", + "見", + "八", + "小", + "凵", + "県", + "月", + "彐", + "門", + "間", + "木", + "東", + "山", + "出", + "本", + "中", + "刀", + "分", + "耳", + "又", + "取", + "最", + "言", + "田", + "心", + "思", + "刂", + "前", + "京", + "尹", + "事", + "生", + "厶", + "云", + "会", + "未", + "来", + "白", + "冫", + "楽", + "灬", + "馬", + "尸", + "尺", + "駅", + "明", + "耂", + "者", + "了", + "阝", + "都", + "高", + "卜", + "占", + "厂", + "广", + "店", + "子", + "申", + "奄", + "亻", + "俺", + "上", + "方", + "冖", + "学", + "衣", + "艮", + "食", + "自", + ], + # Jap-Katakana + "Japanese—": [ + "ー", + "ン", + "ス", + "・", + "ル", + "ト", + "リ", + "イ", + "ア", + "ラ", + "ッ", + "ク", + "ド", + "シ", + "レ", + "ジ", + "タ", + "フ", + "ロ", + "カ", + "テ", + "マ", + "ィ", + "グ", + "バ", + "ム", + "プ", + "オ", + "コ", + "デ", + "ニ", + "ウ", + "メ", + "サ", + "ビ", + "ナ", + "ブ", + "ャ", + "エ", + "ュ", + "チ", + "キ", + "ズ", + "ダ", + "パ", + "ミ", + "ェ", + "ョ", + "ハ", + "セ", + "ベ", + "ガ", + "モ", + "ツ", + "ネ", + "ボ", + "ソ", + "ノ", + "ァ", + "ヴ", + "ワ", + "ポ", + "ペ", + "ピ", + "ケ", + "ゴ", + "ギ", + "ザ", + "ホ", + "ゲ", + "ォ", + "ヤ", + "ヒ", + "ユ", + "ヨ", + "ヘ", + "ゼ", + "ヌ", + "ゥ", + "ゾ", + "ヶ", + "ヂ", + "ヲ", + "ヅ", + "ヵ", + "ヱ", + "ヰ", + "ヮ", + "ヽ", + "゠", + "ヾ", + "ヷ", + "ヿ", + "ヸ", + "ヹ", + "ヺ", + ], + # Jap-Hiragana + "Japanese——": [ + "の", + "に", + "る", + "た", + "と", + "は", + "し", + "い", + "を", + "で", + "て", + "が", + "な", + "れ", + "か", + "ら", + "さ", + "っ", + "り", + "す", + "あ", + "も", + "こ", + "ま", + "う", + "く", + "よ", + "き", + "ん", + "め", + "お", + "け", + "そ", + "つ", + "だ", + "や", + "え", + "ど", + "わ", + "ち", + "み", + "せ", + "じ", + "ば", + "へ", + "び", + "ず", + "ろ", + "ほ", + "げ", + "む", + "べ", + "ひ", + "ょ", + "ゆ", + "ぶ", + "ご", + "ゃ", + "ね", + "ふ", + "ぐ", + "ぎ", + "ぼ", + "ゅ", + "づ", + "ざ", + "ぞ", + "ぬ", + "ぜ", + "ぱ", + "ぽ", + "ぷ", + "ぴ", + "ぃ", + "ぁ", + "ぇ", + "ぺ", + "ゞ", + "ぢ", + "ぉ", + "ぅ", + "ゐ", + "ゝ", + "ゑ", + "゛", + "゜", + "ゎ", + "ゔ", + "゚", + "ゟ", + "゙", + "ゕ", + "ゖ", + ], + "Portuguese": [ + "a", + "e", + "o", + "s", + "i", + "r", + "d", + "n", + "t", + "m", + "u", + "c", + "l", + "p", + "g", + "v", + "b", + "f", + "h", + "ã", + "q", + "é", + "ç", + "á", + "z", + "í", + ], + "Swedish": [ + "e", + "a", + "n", + "r", + "t", + "s", + "i", + "l", + "d", + "o", + "m", + "k", + "g", + "v", + "h", + "f", + "u", + "p", + "ä", + "c", + "b", + "ö", + "å", + "y", + "j", + "x", + ], + "Chinese": [ + "的", + "一", + "是", + "不", + "了", + "在", + "人", + "有", + "我", + "他", + "这", + "个", + "们", + "中", + "来", + "上", + "大", + "为", + "和", + "国", + "地", + "到", + "以", + "说", + "时", + "要", + "就", + "出", + "会", + "可", + "也", + "你", + "对", + "生", + "能", + "而", + "子", + "那", + "得", + "于", + "着", + "下", + "自", + "之", + "年", + "过", + "发", + "后", + "作", + "里", + "用", + "道", + "行", + "所", + "然", + "家", + "种", + "事", + "成", + "方", + "多", + "经", + "么", + "去", + "法", + "学", + "如", + "都", + "同", + "现", + "当", + "没", + "动", + "面", + "起", + "看", + "定", + "天", + "分", + "还", + "进", + "好", + "小", + "部", + "其", + "些", + "主", + "样", + "理", + "心", + "她", + "本", + "前", + "开", + "但", + "因", + "只", + "从", + "想", + "实", + ], + "Ukrainian": [ + "о", + "а", + "н", + "і", + "и", + "р", + "в", + "т", + "е", + "с", + "к", + "л", + "у", + "д", + "м", + "п", + "з", + "я", + "ь", + "б", + "г", + "й", + "ч", + "х", + "ц", + "ї", + ], + "Norwegian": [ + "e", + "r", + "n", + "t", + "a", + "s", + "i", + "o", + "l", + "d", + "g", + "k", + "m", + "v", + "f", + "p", + "u", + "b", + "h", + "å", + "y", + "j", + "ø", + "c", + "æ", + "w", + ], + "Finnish": [ + "a", + "i", + "n", + "t", + "e", + "s", + "l", + "o", + "u", + "k", + "ä", + "m", + "r", + "v", + "j", + "h", + "p", + "y", + "d", + "ö", + "g", + "c", + "b", + "f", + "w", + "z", + ], + "Vietnamese": [ + "n", + "h", + "t", + "i", + "c", + "g", + "a", + "o", + "u", + "m", + "l", + "r", + "à", + "đ", + "s", + "e", + "v", + "p", + "b", + "y", + "ư", + "d", + "á", + "k", + "ộ", + "ế", + ], + "Czech": [ + "o", + "e", + "a", + "n", + "t", + "s", + "i", + "l", + "v", + "r", + "k", + "d", + "u", + "m", + "p", + "í", + "c", + "h", + "z", + "á", + "y", + "j", + "b", + "ě", + "é", + "ř", + ], + "Hungarian": [ + "e", + "a", + "t", + "l", + "s", + "n", + "k", + "r", + "i", + "o", + "z", + "á", + "é", + "g", + "m", + "b", + "y", + "v", + "d", + "h", + "u", + "p", + "j", + "ö", + "f", + "c", + ], + "Korean": [ + "이", + "다", + "에", + "의", + "는", + "로", + "하", + "을", + "가", + "고", + "지", + "서", + "한", + "은", + "기", + "으", + "년", + "대", + "사", + "시", + "를", + "리", + "도", + "인", + "스", + "일", + ], + "Indonesian": [ + "a", + "n", + "e", + "i", + "r", + "t", + "u", + "s", + "d", + "k", + "m", + "l", + "g", + "p", + "b", + "o", + "h", + "y", + "j", + "c", + "w", + "f", + "v", + "z", + "x", + "q", + ], + "Turkish": [ + "a", + "e", + "i", + "n", + "r", + "l", + "ı", + "k", + "d", + "t", + "s", + "m", + "y", + "u", + "o", + "b", + "ü", + "ş", + "v", + "g", + "z", + "h", + "c", + "p", + "ç", + "ğ", + ], + "Romanian": [ + "e", + "i", + "a", + "r", + "n", + "t", + "u", + "l", + "o", + "c", + "s", + "d", + "p", + "m", + "ă", + "f", + "v", + "î", + "g", + "b", + "ș", + "ț", + "z", + "h", + "â", + "j", + ], + "Farsi": [ + "ا", + "ی", + "ر", + "د", + "ن", + "ه", + "و", + "م", + "ت", + "ب", + "س", + "ل", + "ک", + "ش", + "ز", + "ف", + "گ", + "ع", + "خ", + "ق", + "ج", + "آ", + "پ", + "ح", + "ط", + "ص", + ], + "Arabic": [ + "ا", + "ل", + "ي", + "م", + "و", + "ن", + "ر", + "ت", + "ب", + "ة", + "ع", + "د", + "س", + "ف", + "ه", + "ك", + "ق", + "أ", + "ح", + "ج", + "ش", + "ط", + "ص", + "ى", + "خ", + "إ", + ], + "Danish": [ + "e", + "r", + "n", + "t", + "a", + "i", + "s", + "d", + "l", + "o", + "g", + "m", + "k", + "f", + "v", + "u", + "b", + "h", + "p", + "å", + "y", + "ø", + "æ", + "c", + "j", + "w", + ], + "Serbian": [ + "а", + "и", + "о", + "е", + "н", + "р", + "с", + "у", + "т", + "к", + "ј", + "в", + "д", + "м", + "п", + "л", + "г", + "з", + "б", + "a", + "i", + "e", + "o", + "n", + "ц", + "ш", + ], + "Lithuanian": [ + "i", + "a", + "s", + "o", + "r", + "e", + "t", + "n", + "u", + "k", + "m", + "l", + "p", + "v", + "d", + "j", + "g", + "ė", + "b", + "y", + "ų", + "š", + "ž", + "c", + "ą", + "į", + ], + "Slovene": [ + "e", + "a", + "i", + "o", + "n", + "r", + "s", + "l", + "t", + "j", + "v", + "k", + "d", + "p", + "m", + "u", + "z", + "b", + "g", + "h", + "č", + "c", + "š", + "ž", + "f", + "y", + ], + "Slovak": [ + "o", + "a", + "e", + "n", + "i", + "r", + "v", + "t", + "s", + "l", + "k", + "d", + "m", + "p", + "u", + "c", + "h", + "j", + "b", + "z", + "á", + "y", + "ý", + "í", + "č", + "é", + ], + "Hebrew": [ + "י", + "ו", + "ה", + "ל", + "ר", + "ב", + "ת", + "מ", + "א", + "ש", + "נ", + "ע", + "ם", + "ד", + "ק", + "ח", + "פ", + "ס", + "כ", + "ג", + "ט", + "צ", + "ן", + "ז", + "ך", + ], + "Bulgarian": [ + "а", + "и", + "о", + "е", + "н", + "т", + "р", + "с", + "в", + "л", + "к", + "д", + "п", + "м", + "з", + "г", + "я", + "ъ", + "у", + "б", + "ч", + "ц", + "й", + "ж", + "щ", + "х", + ], + "Croatian": [ + "a", + "i", + "o", + "e", + "n", + "r", + "j", + "s", + "t", + "u", + "k", + "l", + "v", + "d", + "m", + "p", + "g", + "z", + "b", + "c", + "č", + "h", + "š", + "ž", + "ć", + "f", + ], + "Hindi": [ + "क", + "र", + "स", + "न", + "त", + "म", + "ह", + "प", + "य", + "ल", + "व", + "ज", + "द", + "ग", + "ब", + "श", + "ट", + "अ", + "ए", + "थ", + "भ", + "ड", + "च", + "ध", + "ष", + "इ", + ], + "Estonian": [ + "a", + "i", + "e", + "s", + "t", + "l", + "u", + "n", + "o", + "k", + "r", + "d", + "m", + "v", + "g", + "p", + "j", + "h", + "ä", + "b", + "õ", + "ü", + "f", + "c", + "ö", + "y", + ], + "Thai": [ + "า", + "น", + "ร", + "อ", + "ก", + "เ", + "ง", + "ม", + "ย", + "ล", + "ว", + "ด", + "ท", + "ส", + "ต", + "ะ", + "ป", + "บ", + "ค", + "ห", + "แ", + "จ", + "พ", + "ช", + "ข", + "ใ", + ], + "Greek": [ + "α", + "τ", + "ο", + "ι", + "ε", + "ν", + "ρ", + "σ", + "κ", + "η", + "π", + "ς", + "υ", + "μ", + "λ", + "ί", + "ό", + "ά", + "γ", + "έ", + "δ", + "ή", + "ω", + "χ", + "θ", + "ύ", + ], + "Tamil": [ + "க", + "த", + "ப", + "ட", + "ர", + "ம", + "ல", + "ன", + "வ", + "ற", + "ய", + "ள", + "ச", + "ந", + "இ", + "ண", + "அ", + "ஆ", + "ழ", + "ங", + "எ", + "உ", + "ஒ", + "ஸ", + ], + "Kazakh": [ + "а", + "ы", + "е", + "н", + "т", + "р", + "л", + "і", + "д", + "с", + "м", + "қ", + "к", + "о", + "б", + "и", + "у", + "ғ", + "ж", + "ң", + "з", + "ш", + "й", + "п", + "г", + "ө", + ], +} + +LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES) diff --git a/llmeval-env/lib/python3.10/site-packages/charset_normalizer/legacy.py b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/legacy.py new file mode 100644 index 0000000000000000000000000000000000000000..43aad21a9dd1c08c8d31e38908485d46b14efbd2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/legacy.py @@ -0,0 +1,54 @@ +from typing import Any, Dict, Optional, Union +from warnings import warn + +from .api import from_bytes +from .constant import CHARDET_CORRESPONDENCE + + +def detect( + byte_str: bytes, should_rename_legacy: bool = False, **kwargs: Any +) -> Dict[str, Optional[Union[str, float]]]: + """ + chardet legacy method + Detect the encoding of the given byte string. It should be mostly backward-compatible. + Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it) + This function is deprecated and should be used to migrate your project easily, consult the documentation for + further information. Not planned for removal. + + :param byte_str: The byte sequence to examine. + :param should_rename_legacy: Should we rename legacy encodings + to their more modern equivalents? + """ + if len(kwargs): + warn( + f"charset-normalizer disregard arguments '{','.join(list(kwargs.keys()))}' in legacy function detect()" + ) + + if not isinstance(byte_str, (bytearray, bytes)): + raise TypeError( # pragma: nocover + "Expected object of type bytes or bytearray, got: " + "{0}".format(type(byte_str)) + ) + + if isinstance(byte_str, bytearray): + byte_str = bytes(byte_str) + + r = from_bytes(byte_str).best() + + encoding = r.encoding if r is not None else None + language = r.language if r is not None and r.language != "Unknown" else "" + confidence = 1.0 - r.chaos if r is not None else None + + # Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process + # but chardet does return 'utf-8-sig' and it is a valid codec name. + if r is not None and encoding == "utf_8" and r.bom: + encoding += "_sig" + + if should_rename_legacy is False and encoding in CHARDET_CORRESPONDENCE: + encoding = CHARDET_CORRESPONDENCE[encoding] + + return { + "encoding": encoding, + "language": language, + "confidence": confidence, + } diff --git a/llmeval-env/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3824a428ffd621958e1f1f22dfd105c58417ffd0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/charset_normalizer/md.py b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/md.py new file mode 100644 index 0000000000000000000000000000000000000000..77897aae4f44d084d6a59d7f7f1665035ff0047d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/md.py @@ -0,0 +1,615 @@ +from functools import lru_cache +from logging import getLogger +from typing import List, Optional + +from .constant import ( + COMMON_SAFE_ASCII_CHARACTERS, + TRACE, + UNICODE_SECONDARY_RANGE_KEYWORD, +) +from .utils import ( + is_accentuated, + is_arabic, + is_arabic_isolated_form, + is_case_variable, + is_cjk, + is_emoticon, + is_hangul, + is_hiragana, + is_katakana, + is_latin, + is_punctuation, + is_separator, + is_symbol, + is_thai, + is_unprintable, + remove_accent, + unicode_range, +) + + +class MessDetectorPlugin: + """ + Base abstract class used for mess detection plugins. + All detectors MUST extend and implement given methods. + """ + + def eligible(self, character: str) -> bool: + """ + Determine if given character should be fed in. + """ + raise NotImplementedError # pragma: nocover + + def feed(self, character: str) -> None: + """ + The main routine to be executed upon character. + Insert the logic in witch the text would be considered chaotic. + """ + raise NotImplementedError # pragma: nocover + + def reset(self) -> None: # pragma: no cover + """ + Permit to reset the plugin to the initial state. + """ + raise NotImplementedError + + @property + def ratio(self) -> float: + """ + Compute the chaos ratio based on what your feed() has seen. + Must NOT be lower than 0.; No restriction gt 0. + """ + raise NotImplementedError # pragma: nocover + + +class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._punctuation_count: int = 0 + self._symbol_count: int = 0 + self._character_count: int = 0 + + self._last_printable_char: Optional[str] = None + self._frenzy_symbol_in_word: bool = False + + def eligible(self, character: str) -> bool: + return character.isprintable() + + def feed(self, character: str) -> None: + self._character_count += 1 + + if ( + character != self._last_printable_char + and character not in COMMON_SAFE_ASCII_CHARACTERS + ): + if is_punctuation(character): + self._punctuation_count += 1 + elif ( + character.isdigit() is False + and is_symbol(character) + and is_emoticon(character) is False + ): + self._symbol_count += 2 + + self._last_printable_char = character + + def reset(self) -> None: # pragma: no cover + self._punctuation_count = 0 + self._character_count = 0 + self._symbol_count = 0 + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + ratio_of_punctuation: float = ( + self._punctuation_count + self._symbol_count + ) / self._character_count + + return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0 + + +class TooManyAccentuatedPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._character_count: int = 0 + self._accentuated_count: int = 0 + + def eligible(self, character: str) -> bool: + return character.isalpha() + + def feed(self, character: str) -> None: + self._character_count += 1 + + if is_accentuated(character): + self._accentuated_count += 1 + + def reset(self) -> None: # pragma: no cover + self._character_count = 0 + self._accentuated_count = 0 + + @property + def ratio(self) -> float: + if self._character_count < 8: + return 0.0 + + ratio_of_accentuation: float = self._accentuated_count / self._character_count + return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0 + + +class UnprintablePlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._unprintable_count: int = 0 + self._character_count: int = 0 + + def eligible(self, character: str) -> bool: + return True + + def feed(self, character: str) -> None: + if is_unprintable(character): + self._unprintable_count += 1 + self._character_count += 1 + + def reset(self) -> None: # pragma: no cover + self._unprintable_count = 0 + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + return (self._unprintable_count * 8) / self._character_count + + +class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._successive_count: int = 0 + self._character_count: int = 0 + + self._last_latin_character: Optional[str] = None + + def eligible(self, character: str) -> bool: + return character.isalpha() and is_latin(character) + + def feed(self, character: str) -> None: + self._character_count += 1 + if ( + self._last_latin_character is not None + and is_accentuated(character) + and is_accentuated(self._last_latin_character) + ): + if character.isupper() and self._last_latin_character.isupper(): + self._successive_count += 1 + # Worse if its the same char duplicated with different accent. + if remove_accent(character) == remove_accent(self._last_latin_character): + self._successive_count += 1 + self._last_latin_character = character + + def reset(self) -> None: # pragma: no cover + self._successive_count = 0 + self._character_count = 0 + self._last_latin_character = None + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + return (self._successive_count * 2) / self._character_count + + +class SuspiciousRange(MessDetectorPlugin): + def __init__(self) -> None: + self._suspicious_successive_range_count: int = 0 + self._character_count: int = 0 + self._last_printable_seen: Optional[str] = None + + def eligible(self, character: str) -> bool: + return character.isprintable() + + def feed(self, character: str) -> None: + self._character_count += 1 + + if ( + character.isspace() + or is_punctuation(character) + or character in COMMON_SAFE_ASCII_CHARACTERS + ): + self._last_printable_seen = None + return + + if self._last_printable_seen is None: + self._last_printable_seen = character + return + + unicode_range_a: Optional[str] = unicode_range(self._last_printable_seen) + unicode_range_b: Optional[str] = unicode_range(character) + + if is_suspiciously_successive_range(unicode_range_a, unicode_range_b): + self._suspicious_successive_range_count += 1 + + self._last_printable_seen = character + + def reset(self) -> None: # pragma: no cover + self._character_count = 0 + self._suspicious_successive_range_count = 0 + self._last_printable_seen = None + + @property + def ratio(self) -> float: + if self._character_count <= 24: + return 0.0 + + ratio_of_suspicious_range_usage: float = ( + self._suspicious_successive_range_count * 2 + ) / self._character_count + + return ratio_of_suspicious_range_usage + + +class SuperWeirdWordPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._word_count: int = 0 + self._bad_word_count: int = 0 + self._foreign_long_count: int = 0 + + self._is_current_word_bad: bool = False + self._foreign_long_watch: bool = False + + self._character_count: int = 0 + self._bad_character_count: int = 0 + + self._buffer: str = "" + self._buffer_accent_count: int = 0 + + def eligible(self, character: str) -> bool: + return True + + def feed(self, character: str) -> None: + if character.isalpha(): + self._buffer += character + if is_accentuated(character): + self._buffer_accent_count += 1 + if ( + self._foreign_long_watch is False + and (is_latin(character) is False or is_accentuated(character)) + and is_cjk(character) is False + and is_hangul(character) is False + and is_katakana(character) is False + and is_hiragana(character) is False + and is_thai(character) is False + ): + self._foreign_long_watch = True + return + if not self._buffer: + return + if ( + character.isspace() or is_punctuation(character) or is_separator(character) + ) and self._buffer: + self._word_count += 1 + buffer_length: int = len(self._buffer) + + self._character_count += buffer_length + + if buffer_length >= 4: + if self._buffer_accent_count / buffer_length > 0.34: + self._is_current_word_bad = True + # Word/Buffer ending with an upper case accentuated letter are so rare, + # that we will consider them all as suspicious. Same weight as foreign_long suspicious. + if ( + is_accentuated(self._buffer[-1]) + and self._buffer[-1].isupper() + and all(_.isupper() for _ in self._buffer) is False + ): + self._foreign_long_count += 1 + self._is_current_word_bad = True + if buffer_length >= 24 and self._foreign_long_watch: + camel_case_dst = [ + i + for c, i in zip(self._buffer, range(0, buffer_length)) + if c.isupper() + ] + probable_camel_cased: bool = False + + if camel_case_dst and (len(camel_case_dst) / buffer_length <= 0.3): + probable_camel_cased = True + + if not probable_camel_cased: + self._foreign_long_count += 1 + self._is_current_word_bad = True + + if self._is_current_word_bad: + self._bad_word_count += 1 + self._bad_character_count += len(self._buffer) + self._is_current_word_bad = False + + self._foreign_long_watch = False + self._buffer = "" + self._buffer_accent_count = 0 + elif ( + character not in {"<", ">", "-", "=", "~", "|", "_"} + and character.isdigit() is False + and is_symbol(character) + ): + self._is_current_word_bad = True + self._buffer += character + + def reset(self) -> None: # pragma: no cover + self._buffer = "" + self._is_current_word_bad = False + self._foreign_long_watch = False + self._bad_word_count = 0 + self._word_count = 0 + self._character_count = 0 + self._bad_character_count = 0 + self._foreign_long_count = 0 + + @property + def ratio(self) -> float: + if self._word_count <= 10 and self._foreign_long_count == 0: + return 0.0 + + return self._bad_character_count / self._character_count + + +class CjkInvalidStopPlugin(MessDetectorPlugin): + """ + GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and + can be easily detected. Searching for the overuse of '丅' and '丄'. + """ + + def __init__(self) -> None: + self._wrong_stop_count: int = 0 + self._cjk_character_count: int = 0 + + def eligible(self, character: str) -> bool: + return True + + def feed(self, character: str) -> None: + if character in {"丅", "丄"}: + self._wrong_stop_count += 1 + return + if is_cjk(character): + self._cjk_character_count += 1 + + def reset(self) -> None: # pragma: no cover + self._wrong_stop_count = 0 + self._cjk_character_count = 0 + + @property + def ratio(self) -> float: + if self._cjk_character_count < 16: + return 0.0 + return self._wrong_stop_count / self._cjk_character_count + + +class ArchaicUpperLowerPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._buf: bool = False + + self._character_count_since_last_sep: int = 0 + + self._successive_upper_lower_count: int = 0 + self._successive_upper_lower_count_final: int = 0 + + self._character_count: int = 0 + + self._last_alpha_seen: Optional[str] = None + self._current_ascii_only: bool = True + + def eligible(self, character: str) -> bool: + return True + + def feed(self, character: str) -> None: + is_concerned = character.isalpha() and is_case_variable(character) + chunk_sep = is_concerned is False + + if chunk_sep and self._character_count_since_last_sep > 0: + if ( + self._character_count_since_last_sep <= 64 + and character.isdigit() is False + and self._current_ascii_only is False + ): + self._successive_upper_lower_count_final += ( + self._successive_upper_lower_count + ) + + self._successive_upper_lower_count = 0 + self._character_count_since_last_sep = 0 + self._last_alpha_seen = None + self._buf = False + self._character_count += 1 + self._current_ascii_only = True + + return + + if self._current_ascii_only is True and character.isascii() is False: + self._current_ascii_only = False + + if self._last_alpha_seen is not None: + if (character.isupper() and self._last_alpha_seen.islower()) or ( + character.islower() and self._last_alpha_seen.isupper() + ): + if self._buf is True: + self._successive_upper_lower_count += 2 + self._buf = False + else: + self._buf = True + else: + self._buf = False + + self._character_count += 1 + self._character_count_since_last_sep += 1 + self._last_alpha_seen = character + + def reset(self) -> None: # pragma: no cover + self._character_count = 0 + self._character_count_since_last_sep = 0 + self._successive_upper_lower_count = 0 + self._successive_upper_lower_count_final = 0 + self._last_alpha_seen = None + self._buf = False + self._current_ascii_only = True + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + return self._successive_upper_lower_count_final / self._character_count + + +class ArabicIsolatedFormPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._character_count: int = 0 + self._isolated_form_count: int = 0 + + def reset(self) -> None: # pragma: no cover + self._character_count = 0 + self._isolated_form_count = 0 + + def eligible(self, character: str) -> bool: + return is_arabic(character) + + def feed(self, character: str) -> None: + self._character_count += 1 + + if is_arabic_isolated_form(character): + self._isolated_form_count += 1 + + @property + def ratio(self) -> float: + if self._character_count < 8: + return 0.0 + + isolated_form_usage: float = self._isolated_form_count / self._character_count + + return isolated_form_usage + + +@lru_cache(maxsize=1024) +def is_suspiciously_successive_range( + unicode_range_a: Optional[str], unicode_range_b: Optional[str] +) -> bool: + """ + Determine if two Unicode range seen next to each other can be considered as suspicious. + """ + if unicode_range_a is None or unicode_range_b is None: + return True + + if unicode_range_a == unicode_range_b: + return False + + if "Latin" in unicode_range_a and "Latin" in unicode_range_b: + return False + + if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b: + return False + + # Latin characters can be accompanied with a combining diacritical mark + # eg. Vietnamese. + if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and ( + "Combining" in unicode_range_a or "Combining" in unicode_range_b + ): + return False + + keywords_range_a, keywords_range_b = unicode_range_a.split( + " " + ), unicode_range_b.split(" ") + + for el in keywords_range_a: + if el in UNICODE_SECONDARY_RANGE_KEYWORD: + continue + if el in keywords_range_b: + return False + + # Japanese Exception + range_a_jp_chars, range_b_jp_chars = ( + unicode_range_a + in ( + "Hiragana", + "Katakana", + ), + unicode_range_b in ("Hiragana", "Katakana"), + ) + if (range_a_jp_chars or range_b_jp_chars) and ( + "CJK" in unicode_range_a or "CJK" in unicode_range_b + ): + return False + if range_a_jp_chars and range_b_jp_chars: + return False + + if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b: + if "CJK" in unicode_range_a or "CJK" in unicode_range_b: + return False + if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin": + return False + + # Chinese/Japanese use dedicated range for punctuation and/or separators. + if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or ( + unicode_range_a in ["Katakana", "Hiragana"] + and unicode_range_b in ["Katakana", "Hiragana"] + ): + if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b: + return False + if "Forms" in unicode_range_a or "Forms" in unicode_range_b: + return False + if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin": + return False + + return True + + +@lru_cache(maxsize=2048) +def mess_ratio( + decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False +) -> float: + """ + Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier. + """ + + detectors: List[MessDetectorPlugin] = [ + md_class() for md_class in MessDetectorPlugin.__subclasses__() + ] + + length: int = len(decoded_sequence) + 1 + + mean_mess_ratio: float = 0.0 + + if length < 512: + intermediary_mean_mess_ratio_calc: int = 32 + elif length <= 1024: + intermediary_mean_mess_ratio_calc = 64 + else: + intermediary_mean_mess_ratio_calc = 128 + + for character, index in zip(decoded_sequence + "\n", range(length)): + for detector in detectors: + if detector.eligible(character): + detector.feed(character) + + if ( + index > 0 and index % intermediary_mean_mess_ratio_calc == 0 + ) or index == length - 1: + mean_mess_ratio = sum(dt.ratio for dt in detectors) + + if mean_mess_ratio >= maximum_threshold: + break + + if debug: + logger = getLogger("charset_normalizer") + + logger.log( + TRACE, + "Mess-detector extended-analysis start. " + f"intermediary_mean_mess_ratio_calc={intermediary_mean_mess_ratio_calc} mean_mess_ratio={mean_mess_ratio} " + f"maximum_threshold={maximum_threshold}", + ) + + if len(decoded_sequence) > 16: + logger.log(TRACE, f"Starting with: {decoded_sequence[:16]}") + logger.log(TRACE, f"Ending with: {decoded_sequence[-16::]}") + + for dt in detectors: # pragma: nocover + logger.log(TRACE, f"{dt.__class__}: {dt.ratio}") + + return round(mean_mess_ratio, 3) diff --git a/llmeval-env/lib/python3.10/site-packages/charset_normalizer/md__mypyc.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/md__mypyc.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7ebead5dcde902718372fb8aa618afd1d973956b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/md__mypyc.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/charset_normalizer/models.py b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/models.py new file mode 100644 index 0000000000000000000000000000000000000000..a760b9c558d953f6907d29fa31844d07d06f9ce1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/models.py @@ -0,0 +1,340 @@ +from encodings.aliases import aliases +from hashlib import sha256 +from json import dumps +from typing import Any, Dict, Iterator, List, Optional, Tuple, Union + +from .constant import TOO_BIG_SEQUENCE +from .utils import iana_name, is_multi_byte_encoding, unicode_range + + +class CharsetMatch: + def __init__( + self, + payload: bytes, + guessed_encoding: str, + mean_mess_ratio: float, + has_sig_or_bom: bool, + languages: "CoherenceMatches", + decoded_payload: Optional[str] = None, + ): + self._payload: bytes = payload + + self._encoding: str = guessed_encoding + self._mean_mess_ratio: float = mean_mess_ratio + self._languages: CoherenceMatches = languages + self._has_sig_or_bom: bool = has_sig_or_bom + self._unicode_ranges: Optional[List[str]] = None + + self._leaves: List[CharsetMatch] = [] + self._mean_coherence_ratio: float = 0.0 + + self._output_payload: Optional[bytes] = None + self._output_encoding: Optional[str] = None + + self._string: Optional[str] = decoded_payload + + def __eq__(self, other: object) -> bool: + if not isinstance(other, CharsetMatch): + raise TypeError( + "__eq__ cannot be invoked on {} and {}.".format( + str(other.__class__), str(self.__class__) + ) + ) + return self.encoding == other.encoding and self.fingerprint == other.fingerprint + + def __lt__(self, other: object) -> bool: + """ + Implemented to make sorted available upon CharsetMatches items. + """ + if not isinstance(other, CharsetMatch): + raise ValueError + + chaos_difference: float = abs(self.chaos - other.chaos) + coherence_difference: float = abs(self.coherence - other.coherence) + + # Below 1% difference --> Use Coherence + if chaos_difference < 0.01 and coherence_difference > 0.02: + return self.coherence > other.coherence + elif chaos_difference < 0.01 and coherence_difference <= 0.02: + # When having a difficult decision, use the result that decoded as many multi-byte as possible. + # preserve RAM usage! + if len(self._payload) >= TOO_BIG_SEQUENCE: + return self.chaos < other.chaos + return self.multi_byte_usage > other.multi_byte_usage + + return self.chaos < other.chaos + + @property + def multi_byte_usage(self) -> float: + return 1.0 - (len(str(self)) / len(self.raw)) + + def __str__(self) -> str: + # Lazy Str Loading + if self._string is None: + self._string = str(self._payload, self._encoding, "strict") + return self._string + + def __repr__(self) -> str: + return "".format(self.encoding, self.fingerprint) + + def add_submatch(self, other: "CharsetMatch") -> None: + if not isinstance(other, CharsetMatch) or other == self: + raise ValueError( + "Unable to add instance <{}> as a submatch of a CharsetMatch".format( + other.__class__ + ) + ) + + other._string = None # Unload RAM usage; dirty trick. + self._leaves.append(other) + + @property + def encoding(self) -> str: + return self._encoding + + @property + def encoding_aliases(self) -> List[str]: + """ + Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855. + """ + also_known_as: List[str] = [] + for u, p in aliases.items(): + if self.encoding == u: + also_known_as.append(p) + elif self.encoding == p: + also_known_as.append(u) + return also_known_as + + @property + def bom(self) -> bool: + return self._has_sig_or_bom + + @property + def byte_order_mark(self) -> bool: + return self._has_sig_or_bom + + @property + def languages(self) -> List[str]: + """ + Return the complete list of possible languages found in decoded sequence. + Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'. + """ + return [e[0] for e in self._languages] + + @property + def language(self) -> str: + """ + Most probable language found in decoded sequence. If none were detected or inferred, the property will return + "Unknown". + """ + if not self._languages: + # Trying to infer the language based on the given encoding + # Its either English or we should not pronounce ourselves in certain cases. + if "ascii" in self.could_be_from_charset: + return "English" + + # doing it there to avoid circular import + from charset_normalizer.cd import encoding_languages, mb_encoding_languages + + languages = ( + mb_encoding_languages(self.encoding) + if is_multi_byte_encoding(self.encoding) + else encoding_languages(self.encoding) + ) + + if len(languages) == 0 or "Latin Based" in languages: + return "Unknown" + + return languages[0] + + return self._languages[0][0] + + @property + def chaos(self) -> float: + return self._mean_mess_ratio + + @property + def coherence(self) -> float: + if not self._languages: + return 0.0 + return self._languages[0][1] + + @property + def percent_chaos(self) -> float: + return round(self.chaos * 100, ndigits=3) + + @property + def percent_coherence(self) -> float: + return round(self.coherence * 100, ndigits=3) + + @property + def raw(self) -> bytes: + """ + Original untouched bytes. + """ + return self._payload + + @property + def submatch(self) -> List["CharsetMatch"]: + return self._leaves + + @property + def has_submatch(self) -> bool: + return len(self._leaves) > 0 + + @property + def alphabets(self) -> List[str]: + if self._unicode_ranges is not None: + return self._unicode_ranges + # list detected ranges + detected_ranges: List[Optional[str]] = [ + unicode_range(char) for char in str(self) + ] + # filter and sort + self._unicode_ranges = sorted(list({r for r in detected_ranges if r})) + return self._unicode_ranges + + @property + def could_be_from_charset(self) -> List[str]: + """ + The complete list of encoding that output the exact SAME str result and therefore could be the originating + encoding. + This list does include the encoding available in property 'encoding'. + """ + return [self._encoding] + [m.encoding for m in self._leaves] + + def output(self, encoding: str = "utf_8") -> bytes: + """ + Method to get re-encoded bytes payload using given target encoding. Default to UTF-8. + Any errors will be simply ignored by the encoder NOT replaced. + """ + if self._output_encoding is None or self._output_encoding != encoding: + self._output_encoding = encoding + self._output_payload = str(self).encode(encoding, "replace") + + return self._output_payload # type: ignore + + @property + def fingerprint(self) -> str: + """ + Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one. + """ + return sha256(self.output()).hexdigest() + + +class CharsetMatches: + """ + Container with every CharsetMatch items ordered by default from most probable to the less one. + Act like a list(iterable) but does not implements all related methods. + """ + + def __init__(self, results: Optional[List[CharsetMatch]] = None): + self._results: List[CharsetMatch] = sorted(results) if results else [] + + def __iter__(self) -> Iterator[CharsetMatch]: + yield from self._results + + def __getitem__(self, item: Union[int, str]) -> CharsetMatch: + """ + Retrieve a single item either by its position or encoding name (alias may be used here). + Raise KeyError upon invalid index or encoding not present in results. + """ + if isinstance(item, int): + return self._results[item] + if isinstance(item, str): + item = iana_name(item, False) + for result in self._results: + if item in result.could_be_from_charset: + return result + raise KeyError + + def __len__(self) -> int: + return len(self._results) + + def __bool__(self) -> bool: + return len(self._results) > 0 + + def append(self, item: CharsetMatch) -> None: + """ + Insert a single match. Will be inserted accordingly to preserve sort. + Can be inserted as a submatch. + """ + if not isinstance(item, CharsetMatch): + raise ValueError( + "Cannot append instance '{}' to CharsetMatches".format( + str(item.__class__) + ) + ) + # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage) + if len(item.raw) <= TOO_BIG_SEQUENCE: + for match in self._results: + if match.fingerprint == item.fingerprint and match.chaos == item.chaos: + match.add_submatch(item) + return + self._results.append(item) + self._results = sorted(self._results) + + def best(self) -> Optional["CharsetMatch"]: + """ + Simply return the first match. Strict equivalent to matches[0]. + """ + if not self._results: + return None + return self._results[0] + + def first(self) -> Optional["CharsetMatch"]: + """ + Redundant method, call the method best(). Kept for BC reasons. + """ + return self.best() + + +CoherenceMatch = Tuple[str, float] +CoherenceMatches = List[CoherenceMatch] + + +class CliDetectionResult: + def __init__( + self, + path: str, + encoding: Optional[str], + encoding_aliases: List[str], + alternative_encodings: List[str], + language: str, + alphabets: List[str], + has_sig_or_bom: bool, + chaos: float, + coherence: float, + unicode_path: Optional[str], + is_preferred: bool, + ): + self.path: str = path + self.unicode_path: Optional[str] = unicode_path + self.encoding: Optional[str] = encoding + self.encoding_aliases: List[str] = encoding_aliases + self.alternative_encodings: List[str] = alternative_encodings + self.language: str = language + self.alphabets: List[str] = alphabets + self.has_sig_or_bom: bool = has_sig_or_bom + self.chaos: float = chaos + self.coherence: float = coherence + self.is_preferred: bool = is_preferred + + @property + def __dict__(self) -> Dict[str, Any]: # type: ignore + return { + "path": self.path, + "encoding": self.encoding, + "encoding_aliases": self.encoding_aliases, + "alternative_encodings": self.alternative_encodings, + "language": self.language, + "alphabets": self.alphabets, + "has_sig_or_bom": self.has_sig_or_bom, + "chaos": self.chaos, + "coherence": self.coherence, + "unicode_path": self.unicode_path, + "is_preferred": self.is_preferred, + } + + def to_json(self) -> str: + return dumps(self.__dict__, ensure_ascii=True, indent=4) diff --git a/llmeval-env/lib/python3.10/site-packages/charset_normalizer/py.typed b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/charset_normalizer/utils.py b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e5cbbf4c0ddfa5c1b5898d8a4405e27292100d41 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/utils.py @@ -0,0 +1,421 @@ +import importlib +import logging +import unicodedata +from codecs import IncrementalDecoder +from encodings.aliases import aliases +from functools import lru_cache +from re import findall +from typing import Generator, List, Optional, Set, Tuple, Union + +from _multibytecodec import MultibyteIncrementalDecoder + +from .constant import ( + ENCODING_MARKS, + IANA_SUPPORTED_SIMILAR, + RE_POSSIBLE_ENCODING_INDICATION, + UNICODE_RANGES_COMBINED, + UNICODE_SECONDARY_RANGE_KEYWORD, + UTF8_MAXIMAL_ALLOCATION, +) + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_accentuated(character: str) -> bool: + try: + description: str = unicodedata.name(character) + except ValueError: + return False + return ( + "WITH GRAVE" in description + or "WITH ACUTE" in description + or "WITH CEDILLA" in description + or "WITH DIAERESIS" in description + or "WITH CIRCUMFLEX" in description + or "WITH TILDE" in description + or "WITH MACRON" in description + or "WITH RING ABOVE" in description + ) + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def remove_accent(character: str) -> str: + decomposed: str = unicodedata.decomposition(character) + if not decomposed: + return character + + codes: List[str] = decomposed.split(" ") + + return chr(int(codes[0], 16)) + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def unicode_range(character: str) -> Optional[str]: + """ + Retrieve the Unicode range official name from a single character. + """ + character_ord: int = ord(character) + + for range_name, ord_range in UNICODE_RANGES_COMBINED.items(): + if character_ord in ord_range: + return range_name + + return None + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_latin(character: str) -> bool: + try: + description: str = unicodedata.name(character) + except ValueError: + return False + return "LATIN" in description + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_punctuation(character: str) -> bool: + character_category: str = unicodedata.category(character) + + if "P" in character_category: + return True + + character_range: Optional[str] = unicode_range(character) + + if character_range is None: + return False + + return "Punctuation" in character_range + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_symbol(character: str) -> bool: + character_category: str = unicodedata.category(character) + + if "S" in character_category or "N" in character_category: + return True + + character_range: Optional[str] = unicode_range(character) + + if character_range is None: + return False + + return "Forms" in character_range and character_category != "Lo" + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_emoticon(character: str) -> bool: + character_range: Optional[str] = unicode_range(character) + + if character_range is None: + return False + + return "Emoticons" in character_range or "Pictographs" in character_range + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_separator(character: str) -> bool: + if character.isspace() or character in {"|", "+", "<", ">"}: + return True + + character_category: str = unicodedata.category(character) + + return "Z" in character_category or character_category in {"Po", "Pd", "Pc"} + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_case_variable(character: str) -> bool: + return character.islower() != character.isupper() + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_cjk(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: + return False + + return "CJK" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_hiragana(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: + return False + + return "HIRAGANA" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_katakana(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: + return False + + return "KATAKANA" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_hangul(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: + return False + + return "HANGUL" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_thai(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: + return False + + return "THAI" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_arabic(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: + return False + + return "ARABIC" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_arabic_isolated_form(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: + return False + + return "ARABIC" in character_name and "ISOLATED FORM" in character_name + + +@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED)) +def is_unicode_range_secondary(range_name: str) -> bool: + return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD) + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_unprintable(character: str) -> bool: + return ( + character.isspace() is False # includes \n \t \r \v + and character.isprintable() is False + and character != "\x1A" # Why? Its the ASCII substitute character. + and character != "\ufeff" # bug discovered in Python, + # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space. + ) + + +def any_specified_encoding(sequence: bytes, search_zone: int = 8192) -> Optional[str]: + """ + Extract using ASCII-only decoder any specified encoding in the first n-bytes. + """ + if not isinstance(sequence, bytes): + raise TypeError + + seq_len: int = len(sequence) + + results: List[str] = findall( + RE_POSSIBLE_ENCODING_INDICATION, + sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"), + ) + + if len(results) == 0: + return None + + for specified_encoding in results: + specified_encoding = specified_encoding.lower().replace("-", "_") + + encoding_alias: str + encoding_iana: str + + for encoding_alias, encoding_iana in aliases.items(): + if encoding_alias == specified_encoding: + return encoding_iana + if encoding_iana == specified_encoding: + return encoding_iana + + return None + + +@lru_cache(maxsize=128) +def is_multi_byte_encoding(name: str) -> bool: + """ + Verify is a specific encoding is a multi byte one based on it IANA name + """ + return name in { + "utf_8", + "utf_8_sig", + "utf_16", + "utf_16_be", + "utf_16_le", + "utf_32", + "utf_32_le", + "utf_32_be", + "utf_7", + } or issubclass( + importlib.import_module("encodings.{}".format(name)).IncrementalDecoder, + MultibyteIncrementalDecoder, + ) + + +def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]: + """ + Identify and extract SIG/BOM in given sequence. + """ + + for iana_encoding in ENCODING_MARKS: + marks: Union[bytes, List[bytes]] = ENCODING_MARKS[iana_encoding] + + if isinstance(marks, bytes): + marks = [marks] + + for mark in marks: + if sequence.startswith(mark): + return iana_encoding, mark + + return None, b"" + + +def should_strip_sig_or_bom(iana_encoding: str) -> bool: + return iana_encoding not in {"utf_16", "utf_32"} + + +def iana_name(cp_name: str, strict: bool = True) -> str: + cp_name = cp_name.lower().replace("-", "_") + + encoding_alias: str + encoding_iana: str + + for encoding_alias, encoding_iana in aliases.items(): + if cp_name in [encoding_alias, encoding_iana]: + return encoding_iana + + if strict: + raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name)) + + return cp_name + + +def range_scan(decoded_sequence: str) -> List[str]: + ranges: Set[str] = set() + + for character in decoded_sequence: + character_range: Optional[str] = unicode_range(character) + + if character_range is None: + continue + + ranges.add(character_range) + + return list(ranges) + + +def cp_similarity(iana_name_a: str, iana_name_b: str) -> float: + if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b): + return 0.0 + + decoder_a = importlib.import_module( + "encodings.{}".format(iana_name_a) + ).IncrementalDecoder + decoder_b = importlib.import_module( + "encodings.{}".format(iana_name_b) + ).IncrementalDecoder + + id_a: IncrementalDecoder = decoder_a(errors="ignore") + id_b: IncrementalDecoder = decoder_b(errors="ignore") + + character_match_count: int = 0 + + for i in range(255): + to_be_decoded: bytes = bytes([i]) + if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded): + character_match_count += 1 + + return character_match_count / 254 + + +def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool: + """ + Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using + the function cp_similarity. + """ + return ( + iana_name_a in IANA_SUPPORTED_SIMILAR + and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a] + ) + + +def set_logging_handler( + name: str = "charset_normalizer", + level: int = logging.INFO, + format_string: str = "%(asctime)s | %(levelname)s | %(message)s", +) -> None: + logger = logging.getLogger(name) + logger.setLevel(level) + + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter(format_string)) + logger.addHandler(handler) + + +def cut_sequence_chunks( + sequences: bytes, + encoding_iana: str, + offsets: range, + chunk_size: int, + bom_or_sig_available: bool, + strip_sig_or_bom: bool, + sig_payload: bytes, + is_multi_byte_decoder: bool, + decoded_payload: Optional[str] = None, +) -> Generator[str, None, None]: + if decoded_payload and is_multi_byte_decoder is False: + for i in offsets: + chunk = decoded_payload[i : i + chunk_size] + if not chunk: + break + yield chunk + else: + for i in offsets: + chunk_end = i + chunk_size + if chunk_end > len(sequences) + 8: + continue + + cut_sequence = sequences[i : i + chunk_size] + + if bom_or_sig_available and strip_sig_or_bom is False: + cut_sequence = sig_payload + cut_sequence + + chunk = cut_sequence.decode( + encoding_iana, + errors="ignore" if is_multi_byte_decoder else "strict", + ) + + # multi-byte bad cutting detector and adjustment + # not the cleanest way to perform that fix but clever enough for now. + if is_multi_byte_decoder and i > 0: + chunk_partial_size_chk: int = min(chunk_size, 16) + + if ( + decoded_payload + and chunk[:chunk_partial_size_chk] not in decoded_payload + ): + for j in range(i, i - 4, -1): + cut_sequence = sequences[j:chunk_end] + + if bom_or_sig_available and strip_sig_or_bom is False: + cut_sequence = sig_payload + cut_sequence + + chunk = cut_sequence.decode(encoding_iana, errors="ignore") + + if chunk[:chunk_partial_size_chk] in decoded_payload: + break + + yield chunk diff --git a/llmeval-env/lib/python3.10/site-packages/charset_normalizer/version.py b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/version.py new file mode 100644 index 0000000000000000000000000000000000000000..5a4da4ff49bc80ef49e8aa7e01cc8555518bd1b1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/charset_normalizer/version.py @@ -0,0 +1,6 @@ +""" +Expose version +""" + +__version__ = "3.3.2" +VERSION = __version__.split(".") diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..492d05e3cdc489ee62adfd32d2fa454da6d1de1c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58a8d086ff616b2ef75ab0d788d990e749f96e8d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py @@ -0,0 +1,18 @@ +from . import cloudpickle +from .cloudpickle import * # noqa + +__doc__ = cloudpickle.__doc__ + +__version__ = "3.0.0" + +__all__ = [ # noqa + "__version__", + "Pickler", + "CloudPickler", + "dumps", + "loads", + "dump", + "load", + "register_pickle_by_value", + "unregister_pickle_by_value", +] diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39b80094b8f9a8a5e28146fd22160d9dee8895ad Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbaf9b07db2e3aadb74616efe7c7e82d64f72e48 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91a884a4a1bc0e70358dc933ff9fb3fc5dbdc34c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py new file mode 100644 index 0000000000000000000000000000000000000000..eb43a9676bbb11bdecf187e7f6cde51f793ff3fc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py @@ -0,0 +1,1487 @@ +"""Pickler class to extend the standard pickle.Pickler functionality + +The main objective is to make it natural to perform distributed computing on +clusters (such as PySpark, Dask, Ray...) with interactively defined code +(functions, classes, ...) written in notebooks or console. + +In particular this pickler adds the following features: +- serialize interactively-defined or locally-defined functions, classes, + enums, typevars, lambdas and nested functions to compiled byte code; +- deal with some other non-serializable objects in an ad-hoc manner where + applicable. + +This pickler is therefore meant to be used for the communication between short +lived Python processes running the same version of Python and libraries. In +particular, it is not meant to be used for long term storage of Python objects. + +It does not include an unpickler, as standard Python unpickling suffices. + +This module was extracted from the `cloud` package, developed by `PiCloud, Inc. +`_. + +Copyright (c) 2012-now, CloudPickle developers and contributors. +Copyright (c) 2012, Regents of the University of California. +Copyright (c) 2009 `PiCloud, Inc. `_. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the University of California, Berkeley nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import _collections_abc +from collections import ChainMap, OrderedDict +import abc +import builtins +import copyreg +import dataclasses +import dis +from enum import Enum +import io +import itertools +import logging +import opcode +import pickle +from pickle import _getattribute +import platform +import struct +import sys +import threading +import types +import typing +import uuid +import warnings +import weakref + +# The following import is required to be imported in the cloudpickle +# namespace to be able to load pickle files generated with older versions of +# cloudpickle. See: tests/test_backward_compat.py +from types import CellType # noqa: F401 + + +# cloudpickle is meant for inter process communication: we expect all +# communicating processes to run the same Python version hence we favor +# communication speed over compatibility: +DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL + +# Names of modules whose resources should be treated as dynamic. +_PICKLE_BY_VALUE_MODULES = set() + +# Track the provenance of reconstructed dynamic classes to make it possible to +# reconstruct instances from the matching singleton class definition when +# appropriate and preserve the usual "isinstance" semantics of Python objects. +_DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary() +_DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary() +_DYNAMIC_CLASS_TRACKER_LOCK = threading.Lock() + +PYPY = platform.python_implementation() == "PyPy" + +builtin_code_type = None +if PYPY: + # builtin-code objects only exist in pypy + builtin_code_type = type(float.__new__.__code__) + +_extract_code_globals_cache = weakref.WeakKeyDictionary() + + +def _get_or_create_tracker_id(class_def): + with _DYNAMIC_CLASS_TRACKER_LOCK: + class_tracker_id = _DYNAMIC_CLASS_TRACKER_BY_CLASS.get(class_def) + if class_tracker_id is None: + class_tracker_id = uuid.uuid4().hex + _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id + _DYNAMIC_CLASS_TRACKER_BY_ID[class_tracker_id] = class_def + return class_tracker_id + + +def _lookup_class_or_track(class_tracker_id, class_def): + if class_tracker_id is not None: + with _DYNAMIC_CLASS_TRACKER_LOCK: + class_def = _DYNAMIC_CLASS_TRACKER_BY_ID.setdefault( + class_tracker_id, class_def + ) + _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id + return class_def + + +def register_pickle_by_value(module): + """Register a module to make it functions and classes picklable by value. + + By default, functions and classes that are attributes of an importable + module are to be pickled by reference, that is relying on re-importing + the attribute from the module at load time. + + If `register_pickle_by_value(module)` is called, all its functions and + classes are subsequently to be pickled by value, meaning that they can + be loaded in Python processes where the module is not importable. + + This is especially useful when developing a module in a distributed + execution environment: restarting the client Python process with the new + source code is enough: there is no need to re-install the new version + of the module on all the worker nodes nor to restart the workers. + + Note: this feature is considered experimental. See the cloudpickle + README.md file for more details and limitations. + """ + if not isinstance(module, types.ModuleType): + raise ValueError(f"Input should be a module object, got {str(module)} instead") + # In the future, cloudpickle may need a way to access any module registered + # for pickling by value in order to introspect relative imports inside + # functions pickled by value. (see + # https://github.com/cloudpipe/cloudpickle/pull/417#issuecomment-873684633). + # This access can be ensured by checking that module is present in + # sys.modules at registering time and assuming that it will still be in + # there when accessed during pickling. Another alternative would be to + # store a weakref to the module. Even though cloudpickle does not implement + # this introspection yet, in order to avoid a possible breaking change + # later, we still enforce the presence of module inside sys.modules. + if module.__name__ not in sys.modules: + raise ValueError( + f"{module} was not imported correctly, have you used an " + "`import` statement to access it?" + ) + _PICKLE_BY_VALUE_MODULES.add(module.__name__) + + +def unregister_pickle_by_value(module): + """Unregister that the input module should be pickled by value.""" + if not isinstance(module, types.ModuleType): + raise ValueError(f"Input should be a module object, got {str(module)} instead") + if module.__name__ not in _PICKLE_BY_VALUE_MODULES: + raise ValueError(f"{module} is not registered for pickle by value") + else: + _PICKLE_BY_VALUE_MODULES.remove(module.__name__) + + +def list_registry_pickle_by_value(): + return _PICKLE_BY_VALUE_MODULES.copy() + + +def _is_registered_pickle_by_value(module): + module_name = module.__name__ + if module_name in _PICKLE_BY_VALUE_MODULES: + return True + while True: + parent_name = module_name.rsplit(".", 1)[0] + if parent_name == module_name: + break + if parent_name in _PICKLE_BY_VALUE_MODULES: + return True + module_name = parent_name + return False + + +def _whichmodule(obj, name): + """Find the module an object belongs to. + + This function differs from ``pickle.whichmodule`` in two ways: + - it does not mangle the cases where obj's module is __main__ and obj was + not found in any module. + - Errors arising during module introspection are ignored, as those errors + are considered unwanted side effects. + """ + module_name = getattr(obj, "__module__", None) + + if module_name is not None: + return module_name + # Protect the iteration by using a copy of sys.modules against dynamic + # modules that trigger imports of other modules upon calls to getattr or + # other threads importing at the same time. + for module_name, module in sys.modules.copy().items(): + # Some modules such as coverage can inject non-module objects inside + # sys.modules + if ( + module_name == "__main__" + or module is None + or not isinstance(module, types.ModuleType) + ): + continue + try: + if _getattribute(module, name)[0] is obj: + return module_name + except Exception: + pass + return None + + +def _should_pickle_by_reference(obj, name=None): + """Test whether an function or a class should be pickled by reference + + Pickling by reference means by that the object (typically a function or a + class) is an attribute of a module that is assumed to be importable in the + target Python environment. Loading will therefore rely on importing the + module and then calling `getattr` on it to access the function or class. + + Pickling by reference is the only option to pickle functions and classes + in the standard library. In cloudpickle the alternative option is to + pickle by value (for instance for interactively or locally defined + functions and classes or for attributes of modules that have been + explicitly registered to be pickled by value. + """ + if isinstance(obj, types.FunctionType) or issubclass(type(obj), type): + module_and_name = _lookup_module_and_qualname(obj, name=name) + if module_and_name is None: + return False + module, name = module_and_name + return not _is_registered_pickle_by_value(module) + + elif isinstance(obj, types.ModuleType): + # We assume that sys.modules is primarily used as a cache mechanism for + # the Python import machinery. Checking if a module has been added in + # is sys.modules therefore a cheap and simple heuristic to tell us + # whether we can assume that a given module could be imported by name + # in another Python process. + if _is_registered_pickle_by_value(obj): + return False + return obj.__name__ in sys.modules + else: + raise TypeError( + "cannot check importability of {} instances".format(type(obj).__name__) + ) + + +def _lookup_module_and_qualname(obj, name=None): + if name is None: + name = getattr(obj, "__qualname__", None) + if name is None: # pragma: no cover + # This used to be needed for Python 2.7 support but is probably not + # needed anymore. However we keep the __name__ introspection in case + # users of cloudpickle rely on this old behavior for unknown reasons. + name = getattr(obj, "__name__", None) + + module_name = _whichmodule(obj, name) + + if module_name is None: + # In this case, obj.__module__ is None AND obj was not found in any + # imported module. obj is thus treated as dynamic. + return None + + if module_name == "__main__": + return None + + # Note: if module_name is in sys.modules, the corresponding module is + # assumed importable at unpickling time. See #357 + module = sys.modules.get(module_name, None) + if module is None: + # The main reason why obj's module would not be imported is that this + # module has been dynamically created, using for example + # types.ModuleType. The other possibility is that module was removed + # from sys.modules after obj was created/imported. But this case is not + # supported, as the standard pickle does not support it either. + return None + + try: + obj2, parent = _getattribute(module, name) + except AttributeError: + # obj was not found inside the module it points to + return None + if obj2 is not obj: + return None + return module, name + + +def _extract_code_globals(co): + """Find all globals names read or written to by codeblock co.""" + out_names = _extract_code_globals_cache.get(co) + if out_names is None: + # We use a dict with None values instead of a set to get a + # deterministic order and avoid introducing non-deterministic pickle + # bytes as a results. + out_names = {name: None for name in _walk_global_ops(co)} + + # Declaring a function inside another one using the "def ..." syntax + # generates a constant code object corresponding to the one of the + # nested function's As the nested function may itself need global + # variables, we need to introspect its code, extract its globals, (look + # for code object in it's co_consts attribute..) and add the result to + # code_globals + if co.co_consts: + for const in co.co_consts: + if isinstance(const, types.CodeType): + out_names.update(_extract_code_globals(const)) + + _extract_code_globals_cache[co] = out_names + + return out_names + + +def _find_imported_submodules(code, top_level_dependencies): + """Find currently imported submodules used by a function. + + Submodules used by a function need to be detected and referenced for the + function to work correctly at depickling time. Because submodules can be + referenced as attribute of their parent package (``package.submodule``), we + need a special introspection technique that does not rely on GLOBAL-related + opcodes to find references of them in a code object. + + Example: + ``` + import concurrent.futures + import cloudpickle + def func(): + x = concurrent.futures.ThreadPoolExecutor + if __name__ == '__main__': + cloudpickle.dumps(func) + ``` + The globals extracted by cloudpickle in the function's state include the + concurrent package, but not its submodule (here, concurrent.futures), which + is the module used by func. Find_imported_submodules will detect the usage + of concurrent.futures. Saving this module alongside with func will ensure + that calling func once depickled does not fail due to concurrent.futures + not being imported + """ + + subimports = [] + # check if any known dependency is an imported package + for x in top_level_dependencies: + if ( + isinstance(x, types.ModuleType) + and hasattr(x, "__package__") + and x.__package__ + ): + # check if the package has any currently loaded sub-imports + prefix = x.__name__ + "." + # A concurrent thread could mutate sys.modules, + # make sure we iterate over a copy to avoid exceptions + for name in list(sys.modules): + # Older versions of pytest will add a "None" module to + # sys.modules. + if name is not None and name.startswith(prefix): + # check whether the function can address the sub-module + tokens = set(name[len(prefix) :].split(".")) + if not tokens - set(code.co_names): + subimports.append(sys.modules[name]) + return subimports + + +# relevant opcodes +STORE_GLOBAL = opcode.opmap["STORE_GLOBAL"] +DELETE_GLOBAL = opcode.opmap["DELETE_GLOBAL"] +LOAD_GLOBAL = opcode.opmap["LOAD_GLOBAL"] +GLOBAL_OPS = (STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL) +HAVE_ARGUMENT = dis.HAVE_ARGUMENT +EXTENDED_ARG = dis.EXTENDED_ARG + + +_BUILTIN_TYPE_NAMES = {} +for k, v in types.__dict__.items(): + if type(v) is type: + _BUILTIN_TYPE_NAMES[v] = k + + +def _builtin_type(name): + if name == "ClassType": # pragma: no cover + # Backward compat to load pickle files generated with cloudpickle + # < 1.3 even if loading pickle files from older versions is not + # officially supported. + return type + return getattr(types, name) + + +def _walk_global_ops(code): + """Yield referenced name for global-referencing instructions in code.""" + for instr in dis.get_instructions(code): + op = instr.opcode + if op in GLOBAL_OPS: + yield instr.argval + + +def _extract_class_dict(cls): + """Retrieve a copy of the dict of a class without the inherited method.""" + clsdict = dict(cls.__dict__) # copy dict proxy to a dict + if len(cls.__bases__) == 1: + inherited_dict = cls.__bases__[0].__dict__ + else: + inherited_dict = {} + for base in reversed(cls.__bases__): + inherited_dict.update(base.__dict__) + to_remove = [] + for name, value in clsdict.items(): + try: + base_value = inherited_dict[name] + if value is base_value: + to_remove.append(name) + except KeyError: + pass + for name in to_remove: + clsdict.pop(name) + return clsdict + + +def is_tornado_coroutine(func): + """Return whether `func` is a Tornado coroutine function. + + Running coroutines are not supported. + """ + warnings.warn( + "is_tornado_coroutine is deprecated in cloudpickle 3.0 and will be " + "removed in cloudpickle 4.0. Use tornado.gen.is_coroutine_function " + "directly instead.", + category=DeprecationWarning, + ) + if "tornado.gen" not in sys.modules: + return False + gen = sys.modules["tornado.gen"] + if not hasattr(gen, "is_coroutine_function"): + # Tornado version is too old + return False + return gen.is_coroutine_function(func) + + +def subimport(name): + # We cannot do simply: `return __import__(name)`: Indeed, if ``name`` is + # the name of a submodule, __import__ will return the top-level root module + # of this submodule. For instance, __import__('os.path') returns the `os` + # module. + __import__(name) + return sys.modules[name] + + +def dynamic_subimport(name, vars): + mod = types.ModuleType(name) + mod.__dict__.update(vars) + mod.__dict__["__builtins__"] = builtins.__dict__ + return mod + + +def _get_cell_contents(cell): + try: + return cell.cell_contents + except ValueError: + # Handle empty cells explicitly with a sentinel value. + return _empty_cell_value + + +def instance(cls): + """Create a new instance of a class. + + Parameters + ---------- + cls : type + The class to create an instance of. + + Returns + ------- + instance : cls + A new instance of ``cls``. + """ + return cls() + + +@instance +class _empty_cell_value: + """Sentinel for empty closures.""" + + @classmethod + def __reduce__(cls): + return cls.__name__ + + +def _make_function(code, globals, name, argdefs, closure): + # Setting __builtins__ in globals is needed for nogil CPython. + globals["__builtins__"] = __builtins__ + return types.FunctionType(code, globals, name, argdefs, closure) + + +def _make_empty_cell(): + if False: + # trick the compiler into creating an empty cell in our lambda + cell = None + raise AssertionError("this route should not be executed") + + return (lambda: cell).__closure__[0] + + +def _make_cell(value=_empty_cell_value): + cell = _make_empty_cell() + if value is not _empty_cell_value: + cell.cell_contents = value + return cell + + +def _make_skeleton_class( + type_constructor, name, bases, type_kwargs, class_tracker_id, extra +): + """Build dynamic class with an empty __dict__ to be filled once memoized + + If class_tracker_id is not None, try to lookup an existing class definition + matching that id. If none is found, track a newly reconstructed class + definition under that id so that other instances stemming from the same + class id will also reuse this class definition. + + The "extra" variable is meant to be a dict (or None) that can be used for + forward compatibility shall the need arise. + """ + skeleton_class = types.new_class( + name, bases, {"metaclass": type_constructor}, lambda ns: ns.update(type_kwargs) + ) + return _lookup_class_or_track(class_tracker_id, skeleton_class) + + +def _make_skeleton_enum( + bases, name, qualname, members, module, class_tracker_id, extra +): + """Build dynamic enum with an empty __dict__ to be filled once memoized + + The creation of the enum class is inspired by the code of + EnumMeta._create_. + + If class_tracker_id is not None, try to lookup an existing enum definition + matching that id. If none is found, track a newly reconstructed enum + definition under that id so that other instances stemming from the same + class id will also reuse this enum definition. + + The "extra" variable is meant to be a dict (or None) that can be used for + forward compatibility shall the need arise. + """ + # enums always inherit from their base Enum class at the last position in + # the list of base classes: + enum_base = bases[-1] + metacls = enum_base.__class__ + classdict = metacls.__prepare__(name, bases) + + for member_name, member_value in members.items(): + classdict[member_name] = member_value + enum_class = metacls.__new__(metacls, name, bases, classdict) + enum_class.__module__ = module + enum_class.__qualname__ = qualname + + return _lookup_class_or_track(class_tracker_id, enum_class) + + +def _make_typevar(name, bound, constraints, covariant, contravariant, class_tracker_id): + tv = typing.TypeVar( + name, + *constraints, + bound=bound, + covariant=covariant, + contravariant=contravariant, + ) + return _lookup_class_or_track(class_tracker_id, tv) + + +def _decompose_typevar(obj): + return ( + obj.__name__, + obj.__bound__, + obj.__constraints__, + obj.__covariant__, + obj.__contravariant__, + _get_or_create_tracker_id(obj), + ) + + +def _typevar_reduce(obj): + # TypeVar instances require the module information hence why we + # are not using the _should_pickle_by_reference directly + module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__) + + if module_and_name is None: + return (_make_typevar, _decompose_typevar(obj)) + elif _is_registered_pickle_by_value(module_and_name[0]): + return (_make_typevar, _decompose_typevar(obj)) + + return (getattr, module_and_name) + + +def _get_bases(typ): + if "__orig_bases__" in getattr(typ, "__dict__", {}): + # For generic types (see PEP 560) + # Note that simply checking `hasattr(typ, '__orig_bases__')` is not + # correct. Subclasses of a fully-parameterized generic class does not + # have `__orig_bases__` defined, but `hasattr(typ, '__orig_bases__')` + # will return True because it's defined in the base class. + bases_attr = "__orig_bases__" + else: + # For regular class objects + bases_attr = "__bases__" + return getattr(typ, bases_attr) + + +def _make_dict_keys(obj, is_ordered=False): + if is_ordered: + return OrderedDict.fromkeys(obj).keys() + else: + return dict.fromkeys(obj).keys() + + +def _make_dict_values(obj, is_ordered=False): + if is_ordered: + return OrderedDict((i, _) for i, _ in enumerate(obj)).values() + else: + return {i: _ for i, _ in enumerate(obj)}.values() + + +def _make_dict_items(obj, is_ordered=False): + if is_ordered: + return OrderedDict(obj).items() + else: + return obj.items() + + +# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS +# ------------------------------------------------- + + +def _class_getnewargs(obj): + type_kwargs = {} + if "__module__" in obj.__dict__: + type_kwargs["__module__"] = obj.__module__ + + __dict__ = obj.__dict__.get("__dict__", None) + if isinstance(__dict__, property): + type_kwargs["__dict__"] = __dict__ + + return ( + type(obj), + obj.__name__, + _get_bases(obj), + type_kwargs, + _get_or_create_tracker_id(obj), + None, + ) + + +def _enum_getnewargs(obj): + members = {e.name: e.value for e in obj} + return ( + obj.__bases__, + obj.__name__, + obj.__qualname__, + members, + obj.__module__, + _get_or_create_tracker_id(obj), + None, + ) + + +# COLLECTION OF OBJECTS RECONSTRUCTORS +# ------------------------------------ +def _file_reconstructor(retval): + return retval + + +# COLLECTION OF OBJECTS STATE GETTERS +# ----------------------------------- + + +def _function_getstate(func): + # - Put func's dynamic attributes (stored in func.__dict__) in state. These + # attributes will be restored at unpickling time using + # f.__dict__.update(state) + # - Put func's members into slotstate. Such attributes will be restored at + # unpickling time by iterating over slotstate and calling setattr(func, + # slotname, slotvalue) + slotstate = { + "__name__": func.__name__, + "__qualname__": func.__qualname__, + "__annotations__": func.__annotations__, + "__kwdefaults__": func.__kwdefaults__, + "__defaults__": func.__defaults__, + "__module__": func.__module__, + "__doc__": func.__doc__, + "__closure__": func.__closure__, + } + + f_globals_ref = _extract_code_globals(func.__code__) + f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in func.__globals__} + + if func.__closure__ is not None: + closure_values = list(map(_get_cell_contents, func.__closure__)) + else: + closure_values = () + + # Extract currently-imported submodules used by func. Storing these modules + # in a smoke _cloudpickle_subimports attribute of the object's state will + # trigger the side effect of importing these modules at unpickling time + # (which is necessary for func to work correctly once depickled) + slotstate["_cloudpickle_submodules"] = _find_imported_submodules( + func.__code__, itertools.chain(f_globals.values(), closure_values) + ) + slotstate["__globals__"] = f_globals + + state = func.__dict__ + return state, slotstate + + +def _class_getstate(obj): + clsdict = _extract_class_dict(obj) + clsdict.pop("__weakref__", None) + + if issubclass(type(obj), abc.ABCMeta): + # If obj is an instance of an ABCMeta subclass, don't pickle the + # cache/negative caches populated during isinstance/issubclass + # checks, but pickle the list of registered subclasses of obj. + clsdict.pop("_abc_cache", None) + clsdict.pop("_abc_negative_cache", None) + clsdict.pop("_abc_negative_cache_version", None) + registry = clsdict.pop("_abc_registry", None) + if registry is None: + # The abc caches and registered subclasses of a + # class are bundled into the single _abc_impl attribute + clsdict.pop("_abc_impl", None) + (registry, _, _, _) = abc._get_dump(obj) + + clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry] + else: + # In the above if clause, registry is a set of weakrefs -- in + # this case, registry is a WeakSet + clsdict["_abc_impl"] = [type_ for type_ in registry] + + if "__slots__" in clsdict: + # pickle string length optimization: member descriptors of obj are + # created automatically from obj's __slots__ attribute, no need to + # save them in obj's state + if isinstance(obj.__slots__, str): + clsdict.pop(obj.__slots__) + else: + for k in obj.__slots__: + clsdict.pop(k, None) + + clsdict.pop("__dict__", None) # unpicklable property object + + return (clsdict, {}) + + +def _enum_getstate(obj): + clsdict, slotstate = _class_getstate(obj) + + members = {e.name: e.value for e in obj} + # Cleanup the clsdict that will be passed to _make_skeleton_enum: + # Those attributes are already handled by the metaclass. + for attrname in [ + "_generate_next_value_", + "_member_names_", + "_member_map_", + "_member_type_", + "_value2member_map_", + ]: + clsdict.pop(attrname, None) + for member in members: + clsdict.pop(member) + # Special handling of Enum subclasses + return clsdict, slotstate + + +# COLLECTIONS OF OBJECTS REDUCERS +# ------------------------------- +# A reducer is a function taking a single argument (obj), and that returns a +# tuple with all the necessary data to re-construct obj. Apart from a few +# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to +# correctly pickle an object. +# While many built-in objects (Exceptions objects, instances of the "object" +# class, etc), are shipped with their own built-in reducer (invoked using +# obj.__reduce__), some do not. The following methods were created to "fill +# these holes". + + +def _code_reduce(obj): + """code object reducer.""" + # If you are not sure about the order of arguments, take a look at help + # of the specific type from types, for example: + # >>> from types import CodeType + # >>> help(CodeType) + if hasattr(obj, "co_exceptiontable"): + # Python 3.11 and later: there are some new attributes + # related to the enhanced exceptions. + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + obj.co_filename, + obj.co_name, + obj.co_qualname, + obj.co_firstlineno, + obj.co_linetable, + obj.co_exceptiontable, + obj.co_freevars, + obj.co_cellvars, + ) + elif hasattr(obj, "co_linetable"): + # Python 3.10 and later: obj.co_lnotab is deprecated and constructor + # expects obj.co_linetable instead. + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + obj.co_filename, + obj.co_name, + obj.co_firstlineno, + obj.co_linetable, + obj.co_freevars, + obj.co_cellvars, + ) + elif hasattr(obj, "co_nmeta"): # pragma: no cover + # "nogil" Python: modified attributes from 3.9 + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_framesize, + obj.co_ndefaultargs, + obj.co_nmeta, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_varnames, + obj.co_filename, + obj.co_name, + obj.co_firstlineno, + obj.co_lnotab, + obj.co_exc_handlers, + obj.co_jump_table, + obj.co_freevars, + obj.co_cellvars, + obj.co_free2reg, + obj.co_cell2reg, + ) + else: + # Backward compat for 3.8 and 3.9 + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + obj.co_filename, + obj.co_name, + obj.co_firstlineno, + obj.co_lnotab, + obj.co_freevars, + obj.co_cellvars, + ) + return types.CodeType, args + + +def _cell_reduce(obj): + """Cell (containing values of a function's free variables) reducer.""" + try: + obj.cell_contents + except ValueError: # cell is empty + return _make_empty_cell, () + else: + return _make_cell, (obj.cell_contents,) + + +def _classmethod_reduce(obj): + orig_func = obj.__func__ + return type(obj), (orig_func,) + + +def _file_reduce(obj): + """Save a file.""" + import io + + if not hasattr(obj, "name") or not hasattr(obj, "mode"): + raise pickle.PicklingError( + "Cannot pickle files that do not map to an actual file" + ) + if obj is sys.stdout: + return getattr, (sys, "stdout") + if obj is sys.stderr: + return getattr, (sys, "stderr") + if obj is sys.stdin: + raise pickle.PicklingError("Cannot pickle standard input") + if obj.closed: + raise pickle.PicklingError("Cannot pickle closed files") + if hasattr(obj, "isatty") and obj.isatty(): + raise pickle.PicklingError("Cannot pickle files that map to tty objects") + if "r" not in obj.mode and "+" not in obj.mode: + raise pickle.PicklingError( + "Cannot pickle files that are not opened for reading: %s" % obj.mode + ) + + name = obj.name + + retval = io.StringIO() + + try: + # Read the whole file + curloc = obj.tell() + obj.seek(0) + contents = obj.read() + obj.seek(curloc) + except OSError as e: + raise pickle.PicklingError( + "Cannot pickle file %s as it cannot be read" % name + ) from e + retval.write(contents) + retval.seek(curloc) + + retval.name = name + return _file_reconstructor, (retval,) + + +def _getset_descriptor_reduce(obj): + return getattr, (obj.__objclass__, obj.__name__) + + +def _mappingproxy_reduce(obj): + return types.MappingProxyType, (dict(obj),) + + +def _memoryview_reduce(obj): + return bytes, (obj.tobytes(),) + + +def _module_reduce(obj): + if _should_pickle_by_reference(obj): + return subimport, (obj.__name__,) + else: + # Some external libraries can populate the "__builtins__" entry of a + # module's `__dict__` with unpicklable objects (see #316). For that + # reason, we do not attempt to pickle the "__builtins__" entry, and + # restore a default value for it at unpickling time. + state = obj.__dict__.copy() + state.pop("__builtins__", None) + return dynamic_subimport, (obj.__name__, state) + + +def _method_reduce(obj): + return (types.MethodType, (obj.__func__, obj.__self__)) + + +def _logger_reduce(obj): + return logging.getLogger, (obj.name,) + + +def _root_logger_reduce(obj): + return logging.getLogger, () + + +def _property_reduce(obj): + return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__) + + +def _weakset_reduce(obj): + return weakref.WeakSet, (list(obj),) + + +def _dynamic_class_reduce(obj): + """Save a class that can't be referenced as a module attribute. + + This method is used to serialize classes that are defined inside + functions, or that otherwise can't be serialized as attribute lookups + from importable modules. + """ + if Enum is not None and issubclass(obj, Enum): + return ( + _make_skeleton_enum, + _enum_getnewargs(obj), + _enum_getstate(obj), + None, + None, + _class_setstate, + ) + else: + return ( + _make_skeleton_class, + _class_getnewargs(obj), + _class_getstate(obj), + None, + None, + _class_setstate, + ) + + +def _class_reduce(obj): + """Select the reducer depending on the dynamic nature of the class obj.""" + if obj is type(None): # noqa + return type, (None,) + elif obj is type(Ellipsis): + return type, (Ellipsis,) + elif obj is type(NotImplemented): + return type, (NotImplemented,) + elif obj in _BUILTIN_TYPE_NAMES: + return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],) + elif not _should_pickle_by_reference(obj): + return _dynamic_class_reduce(obj) + return NotImplemented + + +def _dict_keys_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_keys, (list(obj),) + + +def _dict_values_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_values, (list(obj),) + + +def _dict_items_reduce(obj): + return _make_dict_items, (dict(obj),) + + +def _odict_keys_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_keys, (list(obj), True) + + +def _odict_values_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_values, (list(obj), True) + + +def _odict_items_reduce(obj): + return _make_dict_items, (dict(obj), True) + + +def _dataclass_field_base_reduce(obj): + return _get_dataclass_field_type_sentinel, (obj.name,) + + +# COLLECTIONS OF OBJECTS STATE SETTERS +# ------------------------------------ +# state setters are called at unpickling time, once the object is created and +# it has to be updated to how it was at unpickling time. + + +def _function_setstate(obj, state): + """Update the state of a dynamic function. + + As __closure__ and __globals__ are readonly attributes of a function, we + cannot rely on the native setstate routine of pickle.load_build, that calls + setattr on items of the slotstate. Instead, we have to modify them inplace. + """ + state, slotstate = state + obj.__dict__.update(state) + + obj_globals = slotstate.pop("__globals__") + obj_closure = slotstate.pop("__closure__") + # _cloudpickle_subimports is a set of submodules that must be loaded for + # the pickled function to work correctly at unpickling time. Now that these + # submodules are depickled (hence imported), they can be removed from the + # object's state (the object state only served as a reference holder to + # these submodules) + slotstate.pop("_cloudpickle_submodules") + + obj.__globals__.update(obj_globals) + obj.__globals__["__builtins__"] = __builtins__ + + if obj_closure is not None: + for i, cell in enumerate(obj_closure): + try: + value = cell.cell_contents + except ValueError: # cell is empty + continue + obj.__closure__[i].cell_contents = value + + for k, v in slotstate.items(): + setattr(obj, k, v) + + +def _class_setstate(obj, state): + state, slotstate = state + registry = None + for attrname, attr in state.items(): + if attrname == "_abc_impl": + registry = attr + else: + setattr(obj, attrname, attr) + if registry is not None: + for subclass in registry: + obj.register(subclass) + + return obj + + +# COLLECTION OF DATACLASS UTILITIES +# --------------------------------- +# There are some internal sentinel values whose identity must be preserved when +# unpickling dataclass fields. Each sentinel value has a unique name that we can +# use to retrieve its identity at unpickling time. + + +_DATACLASSE_FIELD_TYPE_SENTINELS = { + dataclasses._FIELD.name: dataclasses._FIELD, + dataclasses._FIELD_CLASSVAR.name: dataclasses._FIELD_CLASSVAR, + dataclasses._FIELD_INITVAR.name: dataclasses._FIELD_INITVAR, +} + + +def _get_dataclass_field_type_sentinel(name): + return _DATACLASSE_FIELD_TYPE_SENTINELS[name] + + +class Pickler(pickle.Pickler): + # set of reducers defined and used by cloudpickle (private) + _dispatch_table = {} + _dispatch_table[classmethod] = _classmethod_reduce + _dispatch_table[io.TextIOWrapper] = _file_reduce + _dispatch_table[logging.Logger] = _logger_reduce + _dispatch_table[logging.RootLogger] = _root_logger_reduce + _dispatch_table[memoryview] = _memoryview_reduce + _dispatch_table[property] = _property_reduce + _dispatch_table[staticmethod] = _classmethod_reduce + _dispatch_table[CellType] = _cell_reduce + _dispatch_table[types.CodeType] = _code_reduce + _dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce + _dispatch_table[types.ModuleType] = _module_reduce + _dispatch_table[types.MethodType] = _method_reduce + _dispatch_table[types.MappingProxyType] = _mappingproxy_reduce + _dispatch_table[weakref.WeakSet] = _weakset_reduce + _dispatch_table[typing.TypeVar] = _typevar_reduce + _dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce + _dispatch_table[_collections_abc.dict_values] = _dict_values_reduce + _dispatch_table[_collections_abc.dict_items] = _dict_items_reduce + _dispatch_table[type(OrderedDict().keys())] = _odict_keys_reduce + _dispatch_table[type(OrderedDict().values())] = _odict_values_reduce + _dispatch_table[type(OrderedDict().items())] = _odict_items_reduce + _dispatch_table[abc.abstractmethod] = _classmethod_reduce + _dispatch_table[abc.abstractclassmethod] = _classmethod_reduce + _dispatch_table[abc.abstractstaticmethod] = _classmethod_reduce + _dispatch_table[abc.abstractproperty] = _property_reduce + _dispatch_table[dataclasses._FIELD_BASE] = _dataclass_field_base_reduce + + dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table) + + # function reducers are defined as instance methods of cloudpickle.Pickler + # objects, as they rely on a cloudpickle.Pickler attribute (globals_ref) + def _dynamic_function_reduce(self, func): + """Reduce a function that is not pickleable via attribute lookup.""" + newargs = self._function_getnewargs(func) + state = _function_getstate(func) + return (_make_function, newargs, state, None, None, _function_setstate) + + def _function_reduce(self, obj): + """Reducer for function objects. + + If obj is a top-level attribute of a file-backed module, this reducer + returns NotImplemented, making the cloudpickle.Pickler fall back to + traditional pickle.Pickler routines to save obj. Otherwise, it reduces + obj using a custom cloudpickle reducer designed specifically to handle + dynamic functions. + """ + if _should_pickle_by_reference(obj): + return NotImplemented + else: + return self._dynamic_function_reduce(obj) + + def _function_getnewargs(self, func): + code = func.__code__ + + # base_globals represents the future global namespace of func at + # unpickling time. Looking it up and storing it in + # cloudpickle.Pickler.globals_ref allow functions sharing the same + # globals at pickling time to also share them once unpickled, at one + # condition: since globals_ref is an attribute of a cloudpickle.Pickler + # instance, and that a new cloudpickle.Pickler is created each time + # cloudpickle.dump or cloudpickle.dumps is called, functions also need + # to be saved within the same invocation of + # cloudpickle.dump/cloudpickle.dumps (for example: + # cloudpickle.dumps([f1, f2])). There is no such limitation when using + # cloudpickle.Pickler.dump, as long as the multiple invocations are + # bound to the same cloudpickle.Pickler instance. + base_globals = self.globals_ref.setdefault(id(func.__globals__), {}) + + if base_globals == {}: + # Add module attributes used to resolve relative imports + # instructions inside func. + for k in ["__package__", "__name__", "__path__", "__file__"]: + if k in func.__globals__: + base_globals[k] = func.__globals__[k] + + # Do not bind the free variables before the function is created to + # avoid infinite recursion. + if func.__closure__ is None: + closure = None + else: + closure = tuple(_make_empty_cell() for _ in range(len(code.co_freevars))) + + return code, base_globals, None, None, closure + + def dump(self, obj): + try: + return super().dump(obj) + except RuntimeError as e: + if len(e.args) > 0 and "recursion" in e.args[0]: + msg = "Could not pickle object as excessively deep recursion required." + raise pickle.PicklingError(msg) from e + else: + raise + + def __init__(self, file, protocol=None, buffer_callback=None): + if protocol is None: + protocol = DEFAULT_PROTOCOL + super().__init__(file, protocol=protocol, buffer_callback=buffer_callback) + # map functions __globals__ attribute ids, to ensure that functions + # sharing the same global namespace at pickling time also share + # their global namespace at unpickling time. + self.globals_ref = {} + self.proto = int(protocol) + + if not PYPY: + # pickle.Pickler is the C implementation of the CPython pickler and + # therefore we rely on reduce_override method to customize the pickler + # behavior. + + # `cloudpickle.Pickler.dispatch` is only left for backward + # compatibility - note that when using protocol 5, + # `cloudpickle.Pickler.dispatch` is not an extension of + # `pickle._Pickler.dispatch` dictionary, because `cloudpickle.Pickler` + # subclasses the C-implemented `pickle.Pickler`, which does not expose + # a `dispatch` attribute. Earlier versions of `cloudpickle.Pickler` + # used `cloudpickle.Pickler.dispatch` as a class-level attribute + # storing all reducers implemented by cloudpickle, but the attribute + # name was not a great choice given because it would collide with a + # similarly named attribute in the pure-Python `pickle._Pickler` + # implementation in the standard library. + dispatch = dispatch_table + + # Implementation of the reducer_override callback, in order to + # efficiently serialize dynamic functions and classes by subclassing + # the C-implemented `pickle.Pickler`. + # TODO: decorrelate reducer_override (which is tied to CPython's + # implementation - would it make sense to backport it to pypy? - and + # pickle's protocol 5 which is implementation agnostic. Currently, the + # availability of both notions coincide on CPython's pickle, but it may + # not be the case anymore when pypy implements protocol 5. + + def reducer_override(self, obj): + """Type-agnostic reducing callback for function and classes. + + For performance reasons, subclasses of the C `pickle.Pickler` class + cannot register custom reducers for functions and classes in the + dispatch_table attribute. Reducers for such types must instead + implemented via the special `reducer_override` method. + + Note that this method will be called for any object except a few + builtin-types (int, lists, dicts etc.), which differs from reducers + in the Pickler's dispatch_table, each of them being invoked for + objects of a specific type only. + + This property comes in handy for classes: although most classes are + instances of the ``type`` metaclass, some of them can be instances + of other custom metaclasses (such as enum.EnumMeta for example). In + particular, the metaclass will likely not be known in advance, and + thus cannot be special-cased using an entry in the dispatch_table. + reducer_override, among other things, allows us to register a + reducer that will be called for any class, independently of its + type. + + Notes: + + * reducer_override has the priority over dispatch_table-registered + reducers. + * reducer_override can be used to fix other limitations of + cloudpickle for other types that suffered from type-specific + reducers, such as Exceptions. See + https://github.com/cloudpipe/cloudpickle/issues/248 + """ + t = type(obj) + try: + is_anyclass = issubclass(t, type) + except TypeError: # t is not a class (old Boost; see SF #502085) + is_anyclass = False + + if is_anyclass: + return _class_reduce(obj) + elif isinstance(obj, types.FunctionType): + return self._function_reduce(obj) + else: + # fallback to save_global, including the Pickler's + # dispatch_table + return NotImplemented + + else: + # When reducer_override is not available, hack the pure-Python + # Pickler's types.FunctionType and type savers. Note: the type saver + # must override Pickler.save_global, because pickle.py contains a + # hard-coded call to save_global when pickling meta-classes. + dispatch = pickle.Pickler.dispatch.copy() + + def _save_reduce_pickle5( + self, + func, + args, + state=None, + listitems=None, + dictitems=None, + state_setter=None, + obj=None, + ): + save = self.save + write = self.write + self.save_reduce( + func, + args, + state=None, + listitems=listitems, + dictitems=dictitems, + obj=obj, + ) + # backport of the Python 3.8 state_setter pickle operations + save(state_setter) + save(obj) # simple BINGET opcode as obj is already memoized. + save(state) + write(pickle.TUPLE2) + # Trigger a state_setter(obj, state) function call. + write(pickle.REDUCE) + # The purpose of state_setter is to carry-out an + # inplace modification of obj. We do not care about what the + # method might return, so its output is eventually removed from + # the stack. + write(pickle.POP) + + def save_global(self, obj, name=None, pack=struct.pack): + """Main dispatch method. + + The name of this method is somewhat misleading: all types get + dispatched here. + """ + if obj is type(None): # noqa + return self.save_reduce(type, (None,), obj=obj) + elif obj is type(Ellipsis): + return self.save_reduce(type, (Ellipsis,), obj=obj) + elif obj is type(NotImplemented): + return self.save_reduce(type, (NotImplemented,), obj=obj) + elif obj in _BUILTIN_TYPE_NAMES: + return self.save_reduce( + _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj + ) + + if name is not None: + super().save_global(obj, name=name) + elif not _should_pickle_by_reference(obj, name=name): + self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj) + else: + super().save_global(obj, name=name) + + dispatch[type] = save_global + + def save_function(self, obj, name=None): + """Registered with the dispatch to handle all function types. + + Determines what kind of function obj is (e.g. lambda, defined at + interactive prompt, etc) and handles the pickling appropriately. + """ + if _should_pickle_by_reference(obj, name=name): + return super().save_global(obj, name=name) + elif PYPY and isinstance(obj.__code__, builtin_code_type): + return self.save_pypy_builtin_func(obj) + else: + return self._save_reduce_pickle5( + *self._dynamic_function_reduce(obj), obj=obj + ) + + def save_pypy_builtin_func(self, obj): + """Save pypy equivalent of builtin functions. + + PyPy does not have the concept of builtin-functions. Instead, + builtin-functions are simple function instances, but with a + builtin-code attribute. + Most of the time, builtin functions should be pickled by attribute. + But PyPy has flaky support for __qualname__, so some builtin + functions such as float.__new__ will be classified as dynamic. For + this reason only, we created this special routine. Because + builtin-functions are not expected to have closure or globals, + there is no additional hack (compared the one already implemented + in pickle) to protect ourselves from reference cycles. A simple + (reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note + also that PyPy improved their support for __qualname__ in v3.6, so + this routing should be removed when cloudpickle supports only PyPy + 3.6 and later. + """ + rv = ( + types.FunctionType, + (obj.__code__, {}, obj.__name__, obj.__defaults__, obj.__closure__), + obj.__dict__, + ) + self.save_reduce(*rv, obj=obj) + + dispatch[types.FunctionType] = save_function + + +# Shorthands similar to pickle.dump/pickle.dumps + + +def dump(obj, file, protocol=None, buffer_callback=None): + """Serialize obj as bytes streamed into file + + protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to + pickle.HIGHEST_PROTOCOL. This setting favors maximum communication + speed between processes running the same Python version. + + Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure + compatibility with older versions of Python (although this is not always + guaranteed to work because cloudpickle relies on some internal + implementation details that can change from one Python version to the + next). + """ + Pickler(file, protocol=protocol, buffer_callback=buffer_callback).dump(obj) + + +def dumps(obj, protocol=None, buffer_callback=None): + """Serialize obj as a string of bytes allocated in memory + + protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to + pickle.HIGHEST_PROTOCOL. This setting favors maximum communication + speed between processes running the same Python version. + + Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure + compatibility with older versions of Python (although this is not always + guaranteed to work because cloudpickle relies on some internal + implementation details that can change from one Python version to the + next). + """ + with io.BytesIO() as file: + cp = Pickler(file, protocol=protocol, buffer_callback=buffer_callback) + cp.dump(obj) + return file.getvalue() + + +# Include pickles unloading functions in this namespace for convenience. +load, loads = pickle.load, pickle.loads + +# Backward compat alias. +CloudPickler = Pickler diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..52d6732e44ebcc0053b24969943f7c3b742268bb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py @@ -0,0 +1,13 @@ +"""Compatibility module. + +It can be necessary to load files generated by previous versions of cloudpickle +that rely on symbols being defined under the `cloudpickle.cloudpickle_fast` +namespace. + +See: tests/test_backward_compat.py +""" +from . import cloudpickle + + +def __getattr__(name): + return getattr(cloudpickle, name) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..905076e83ac585877d622a2bc8fd0ce6f28ee636 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78cf292ee865345ba5f1a97170c146aa5b726049 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3aee554dbde22e3ed6560e7f4c9c215abc0200d4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f0f9ab551b93ada0ca0050729db5c784500321c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b44fc19674935257cd8253651e0a951cc78e0a5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3dde4c60c60ab4146bc3dd5fdc68a11395d6244d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d339aa644599cf5728394200abdfa19a1256aa02 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py @@ -0,0 +1,14 @@ +import os +from multiprocessing import synchronize + +from .context import get_context + + +def _make_name(): + return f"/loky-{os.getpid()}-{next(synchronize.SemLock._rand)}" + + +# monkey patch the name creation for multiprocessing +synchronize.SemLock._make_name = staticmethod(_make_name) + +__all__ = ["get_context"] diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc4048f3cb53c5ad43a02ecd3ea2375222824b8c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e15b41db900882f3b1d5a4c1e052350c4e00cf9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9d68df530c152b6974cfc89f14f67c275c78118 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01d87916e01661a23bc80f9ea09c9b43184bee19 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..946bcf3e3c4f0209c54667812273b7c6c6a5d2a7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c9a2c31ec54475504e34c4f905352f7ba4c30fc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c070ebeb27774864011a954ac0fa5db4c9e799c1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e877a922bb98f3c3b998d73e8244c5770c378066 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6d5803ee257bca41716fbe55972407d476d8887 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ee5b77271eae3930bf9bab4747433305f4cb5ba Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d37787db5a3ce8a9ef86ac14301594e69628bab7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57624b545ac0fac879ad202bab8c794a82644e65 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afde6396cecddabaebf13305a18dfeea8c38de85 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2201d097e505b4fff07c3e2e404197965adcdc1d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py new file mode 100644 index 0000000000000000000000000000000000000000..d0f590317e75752fdd0b4962b9f3ecbbbaf50b37 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py @@ -0,0 +1,378 @@ +############################################################################### +# Basic context management with LokyContext +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/context.py +# * Create a context ensuring loky uses only objects that are compatible +# * Add LokyContext to the list of context of multiprocessing so loky can be +# used with multiprocessing.set_start_method +# * Implement a CFS-aware amd physical-core aware cpu_count function. +# +import os +import sys +import math +import subprocess +import traceback +import warnings +import multiprocessing as mp +from multiprocessing import get_context as mp_get_context +from multiprocessing.context import BaseContext + + +from .process import LokyProcess, LokyInitMainProcess + +# Apparently, on older Python versions, loky cannot work 61 workers on Windows +# but instead 60: ¯\_(ツ)_/¯ +if sys.version_info >= (3, 8): + from concurrent.futures.process import _MAX_WINDOWS_WORKERS + + if sys.version_info < (3, 10): + _MAX_WINDOWS_WORKERS = _MAX_WINDOWS_WORKERS - 1 +else: + # compat for versions before 3.8 which do not define this. + _MAX_WINDOWS_WORKERS = 60 + +START_METHODS = ["loky", "loky_init_main", "spawn"] +if sys.platform != "win32": + START_METHODS += ["fork", "forkserver"] + +_DEFAULT_START_METHOD = None + +# Cache for the number of physical cores to avoid repeating subprocess calls. +# It should not change during the lifetime of the program. +physical_cores_cache = None + + +def get_context(method=None): + # Try to overload the default context + method = method or _DEFAULT_START_METHOD or "loky" + if method == "fork": + # If 'fork' is explicitly requested, warn user about potential issues. + warnings.warn( + "`fork` start method should not be used with " + "`loky` as it does not respect POSIX. Try using " + "`spawn` or `loky` instead.", + UserWarning, + ) + try: + return mp_get_context(method) + except ValueError: + raise ValueError( + f"Unknown context '{method}'. Value should be in " + f"{START_METHODS}." + ) + + +def set_start_method(method, force=False): + global _DEFAULT_START_METHOD + if _DEFAULT_START_METHOD is not None and not force: + raise RuntimeError("context has already been set") + assert method is None or method in START_METHODS, ( + f"'{method}' is not a valid start_method. It should be in " + f"{START_METHODS}" + ) + + _DEFAULT_START_METHOD = method + + +def get_start_method(): + return _DEFAULT_START_METHOD + + +def cpu_count(only_physical_cores=False): + """Return the number of CPUs the current process can use. + + The returned number of CPUs accounts for: + * the number of CPUs in the system, as given by + ``multiprocessing.cpu_count``; + * the CPU affinity settings of the current process + (available on some Unix systems); + * Cgroup CPU bandwidth limit (available on Linux only, typically + set by docker and similar container orchestration systems); + * the value of the LOKY_MAX_CPU_COUNT environment variable if defined. + and is given as the minimum of these constraints. + + If ``only_physical_cores`` is True, return the number of physical cores + instead of the number of logical cores (hyperthreading / SMT). Note that + this option is not enforced if the number of usable cores is controlled in + any other way such as: process affinity, Cgroup restricted CPU bandwidth + or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical + cores is not found, return the number of logical cores. + + Note that on Windows, the returned number of CPUs cannot exceed 61 (or 60 for + Python < 3.10), see: + https://bugs.python.org/issue26903. + + It is also always larger or equal to 1. + """ + # Note: os.cpu_count() is allowed to return None in its docstring + os_cpu_count = os.cpu_count() or 1 + if sys.platform == "win32": + # On Windows, attempting to use more than 61 CPUs would result in a + # OS-level error. See https://bugs.python.org/issue26903. According to + # https://learn.microsoft.com/en-us/windows/win32/procthread/processor-groups + # it might be possible to go beyond with a lot of extra work but this + # does not look easy. + os_cpu_count = min(os_cpu_count, _MAX_WINDOWS_WORKERS) + + cpu_count_user = _cpu_count_user(os_cpu_count) + aggregate_cpu_count = max(min(os_cpu_count, cpu_count_user), 1) + + if not only_physical_cores: + return aggregate_cpu_count + + if cpu_count_user < os_cpu_count: + # Respect user setting + return max(cpu_count_user, 1) + + cpu_count_physical, exception = _count_physical_cores() + if cpu_count_physical != "not found": + return cpu_count_physical + + # Fallback to default behavior + if exception is not None: + # warns only the first time + warnings.warn( + "Could not find the number of physical cores for the " + f"following reason:\n{exception}\n" + "Returning the number of logical cores instead. You can " + "silence this warning by setting LOKY_MAX_CPU_COUNT to " + "the number of cores you want to use." + ) + traceback.print_tb(exception.__traceback__) + + return aggregate_cpu_count + + +def _cpu_count_cgroup(os_cpu_count): + # Cgroup CPU bandwidth limit available in Linux since 2.6 kernel + cpu_max_fname = "/sys/fs/cgroup/cpu.max" + cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + if os.path.exists(cpu_max_fname): + # cgroup v2 + # https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html + with open(cpu_max_fname) as fh: + cpu_quota_us, cpu_period_us = fh.read().strip().split() + elif os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname): + # cgroup v1 + # https://www.kernel.org/doc/html/latest/scheduler/sched-bwc.html#management + with open(cfs_quota_fname) as fh: + cpu_quota_us = fh.read().strip() + with open(cfs_period_fname) as fh: + cpu_period_us = fh.read().strip() + else: + # No Cgroup CPU bandwidth limit (e.g. non-Linux platform) + cpu_quota_us = "max" + cpu_period_us = 100_000 # unused, for consistency with default values + + if cpu_quota_us == "max": + # No active Cgroup quota on a Cgroup-capable platform + return os_cpu_count + else: + cpu_quota_us = int(cpu_quota_us) + cpu_period_us = int(cpu_period_us) + if cpu_quota_us > 0 and cpu_period_us > 0: + return math.ceil(cpu_quota_us / cpu_period_us) + else: # pragma: no cover + # Setting a negative cpu_quota_us value is a valid way to disable + # cgroup CPU bandwith limits + return os_cpu_count + + +def _cpu_count_affinity(os_cpu_count): + # Number of available CPUs given affinity settings + if hasattr(os, "sched_getaffinity"): + try: + return len(os.sched_getaffinity(0)) + except NotImplementedError: + pass + + # On PyPy and possibly other platforms, os.sched_getaffinity does not exist + # or raises NotImplementedError, let's try with the psutil if installed. + try: + import psutil + + p = psutil.Process() + if hasattr(p, "cpu_affinity"): + return len(p.cpu_affinity()) + + except ImportError: # pragma: no cover + if ( + sys.platform == "linux" + and os.environ.get("LOKY_MAX_CPU_COUNT") is None + ): + # PyPy does not implement os.sched_getaffinity on Linux which + # can cause severe oversubscription problems. Better warn the + # user in this particularly pathological case which can wreck + # havoc, typically on CI workers. + warnings.warn( + "Failed to inspect CPU affinity constraints on this system. " + "Please install psutil or explictly set LOKY_MAX_CPU_COUNT." + ) + + # This can happen for platforms that do not implement any kind of CPU + # infinity such as macOS-based platforms. + return os_cpu_count + + +def _cpu_count_user(os_cpu_count): + """Number of user defined available CPUs""" + cpu_count_affinity = _cpu_count_affinity(os_cpu_count) + + cpu_count_cgroup = _cpu_count_cgroup(os_cpu_count) + + # User defined soft-limit passed as a loky specific environment variable. + cpu_count_loky = int(os.environ.get("LOKY_MAX_CPU_COUNT", os_cpu_count)) + + return min(cpu_count_affinity, cpu_count_cgroup, cpu_count_loky) + + +def _count_physical_cores(): + """Return a tuple (number of physical cores, exception) + + If the number of physical cores is found, exception is set to None. + If it has not been found, return ("not found", exception). + + The number of physical cores is cached to avoid repeating subprocess calls. + """ + exception = None + + # First check if the value is cached + global physical_cores_cache + if physical_cores_cache is not None: + return physical_cores_cache, exception + + # Not cached yet, find it + try: + if sys.platform == "linux": + cpu_info = subprocess.run( + "lscpu --parse=core".split(), capture_output=True, text=True + ) + cpu_info = cpu_info.stdout.splitlines() + cpu_info = {line for line in cpu_info if not line.startswith("#")} + cpu_count_physical = len(cpu_info) + elif sys.platform == "win32": + cpu_info = subprocess.run( + "wmic CPU Get NumberOfCores /Format:csv".split(), + capture_output=True, + text=True, + ) + cpu_info = cpu_info.stdout.splitlines() + cpu_info = [ + l.split(",")[1] + for l in cpu_info + if (l and l != "Node,NumberOfCores") + ] + cpu_count_physical = sum(map(int, cpu_info)) + elif sys.platform == "darwin": + cpu_info = subprocess.run( + "sysctl -n hw.physicalcpu".split(), + capture_output=True, + text=True, + ) + cpu_info = cpu_info.stdout + cpu_count_physical = int(cpu_info) + else: + raise NotImplementedError(f"unsupported platform: {sys.platform}") + + # if cpu_count_physical < 1, we did not find a valid value + if cpu_count_physical < 1: + raise ValueError(f"found {cpu_count_physical} physical cores < 1") + + except Exception as e: + exception = e + cpu_count_physical = "not found" + + # Put the result in cache + physical_cores_cache = cpu_count_physical + + return cpu_count_physical, exception + + +class LokyContext(BaseContext): + """Context relying on the LokyProcess.""" + + _name = "loky" + Process = LokyProcess + cpu_count = staticmethod(cpu_count) + + def Queue(self, maxsize=0, reducers=None): + """Returns a queue object""" + from .queues import Queue + + return Queue(maxsize, reducers=reducers, ctx=self.get_context()) + + def SimpleQueue(self, reducers=None): + """Returns a queue object""" + from .queues import SimpleQueue + + return SimpleQueue(reducers=reducers, ctx=self.get_context()) + + if sys.platform != "win32": + """For Unix platform, use our custom implementation of synchronize + ensuring that we use the loky.backend.resource_tracker to clean-up + the semaphores in case of a worker crash. + """ + + def Semaphore(self, value=1): + """Returns a semaphore object""" + from .synchronize import Semaphore + + return Semaphore(value=value) + + def BoundedSemaphore(self, value): + """Returns a bounded semaphore object""" + from .synchronize import BoundedSemaphore + + return BoundedSemaphore(value) + + def Lock(self): + """Returns a lock object""" + from .synchronize import Lock + + return Lock() + + def RLock(self): + """Returns a recurrent lock object""" + from .synchronize import RLock + + return RLock() + + def Condition(self, lock=None): + """Returns a condition object""" + from .synchronize import Condition + + return Condition(lock) + + def Event(self): + """Returns an event object""" + from .synchronize import Event + + return Event() + + +class LokyInitMainContext(LokyContext): + """Extra context with LokyProcess, which does load the main module + + This context is used for compatibility in the case ``cloudpickle`` is not + present on the running system. This permits to load functions defined in + the ``main`` module, using proper safeguards. The declaration of the + ``executor`` should be protected by ``if __name__ == "__main__":`` and the + functions and variable used from main should be out of this block. + + This mimics the default behavior of multiprocessing under Windows and the + behavior of the ``spawn`` start method on a posix system. + For more details, see the end of the following section of python doc + https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming + """ + + _name = "loky_init_main" + Process = LokyInitMainProcess + + +# Register loky context so it works with multiprocessing.get_context +ctx_loky = LokyContext() +mp.context._concrete_contexts["loky"] = ctx_loky +mp.context._concrete_contexts["loky_init_main"] = LokyInitMainContext() diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py new file mode 100644 index 0000000000000000000000000000000000000000..4f85f65df5e22bc2342f44c4a59b5e2ece63a81f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py @@ -0,0 +1,173 @@ +import os +import sys +import msvcrt +import _winapi +from pickle import load +from multiprocessing import process, util +from multiprocessing.context import set_spawning_popen +from multiprocessing.popen_spawn_win32 import Popen as _Popen + +from . import reduction, spawn + + +__all__ = ["Popen"] + +# +# +# + + +def _path_eq(p1, p2): + return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) + + +WINENV = hasattr(sys, "_base_executable") and not _path_eq( + sys.executable, sys._base_executable +) + + +def _close_handles(*handles): + for handle in handles: + _winapi.CloseHandle(handle) + + +# +# We define a Popen class similar to the one from subprocess, but +# whose constructor takes a process object as its argument. +# + + +class Popen(_Popen): + """ + Start a subprocess to run the code of a process object. + + We differ from cpython implementation with the way we handle environment + variables, in order to be able to modify then in the child processes before + importing any library, in order to control the number of threads in C-level + threadpools. + + We also use the loky preparation data, in particular to handle main_module + inits and the loky resource tracker. + """ + + method = "loky" + + def __init__(self, process_obj): + prep_data = spawn.get_preparation_data( + process_obj._name, getattr(process_obj, "init_main_module", True) + ) + + # read end of pipe will be duplicated by the child process + # -- see spawn_main() in spawn.py. + # + # bpo-33929: Previously, the read end of pipe was "stolen" by the child + # process, but it leaked a handle if the child process had been + # terminated before it could steal the handle from the parent process. + rhandle, whandle = _winapi.CreatePipe(None, 0) + wfd = msvcrt.open_osfhandle(whandle, 0) + cmd = get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle) + + python_exe = spawn.get_executable() + + # copy the environment variables to set in the child process + child_env = {**os.environ, **process_obj.env} + + # bpo-35797: When running in a venv, we bypass the redirect + # executor and launch our base Python. + if WINENV and _path_eq(python_exe, sys.executable): + cmd[0] = python_exe = sys._base_executable + child_env["__PYVENV_LAUNCHER__"] = sys.executable + + cmd = " ".join(f'"{x}"' for x in cmd) + + with open(wfd, "wb") as to_child: + # start process + try: + hp, ht, pid, _ = _winapi.CreateProcess( + python_exe, + cmd, + None, + None, + False, + 0, + child_env, + None, + None, + ) + _winapi.CloseHandle(ht) + except BaseException: + _winapi.CloseHandle(rhandle) + raise + + # set attributes of self + self.pid = pid + self.returncode = None + self._handle = hp + self.sentinel = int(hp) + self.finalizer = util.Finalize( + self, _close_handles, (self.sentinel, int(rhandle)) + ) + + # send information to child + set_spawning_popen(self) + try: + reduction.dump(prep_data, to_child) + reduction.dump(process_obj, to_child) + finally: + set_spawning_popen(None) + + +def get_command_line(pipe_handle, parent_pid, **kwds): + """Returns prefix of command line used for spawning a child process.""" + if getattr(sys, "frozen", False): + return [sys.executable, "--multiprocessing-fork", pipe_handle] + else: + prog = ( + "from joblib.externals.loky.backend.popen_loky_win32 import main; " + f"main(pipe_handle={pipe_handle}, parent_pid={parent_pid})" + ) + opts = util._args_from_interpreter_flags() + return [ + spawn.get_executable(), + *opts, + "-c", + prog, + "--multiprocessing-fork", + ] + + +def is_forking(argv): + """Return whether commandline indicates we are forking.""" + if len(argv) >= 2 and argv[1] == "--multiprocessing-fork": + return True + else: + return False + + +def main(pipe_handle, parent_pid=None): + """Run code specified by data received over pipe.""" + assert is_forking(sys.argv), "Not forking" + + if parent_pid is not None: + source_process = _winapi.OpenProcess( + _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid + ) + else: + source_process = None + new_handle = reduction.duplicate( + pipe_handle, source_process=source_process + ) + fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) + parent_sentinel = source_process + + with os.fdopen(fd, "rb", closefd=True) as from_parent: + process.current_process()._inheriting = True + try: + preparation_data = load(from_parent) + spawn.prepare(preparation_data, parent_sentinel) + self = load(from_parent) + finally: + del process.current_process()._inheriting + + exitcode = self._bootstrap(parent_sentinel) + sys.exit(exitcode) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py new file mode 100644 index 0000000000000000000000000000000000000000..5afd99b420fbc480ed5eb743333a687110a90e49 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py @@ -0,0 +1,236 @@ +############################################################################### +# Queue and SimpleQueue implementation for loky +# +# authors: Thomas Moreau, Olivier Grisel +# +# based on multiprocessing/queues.py (16/02/2017) +# * Add some custom reducers for the Queues/SimpleQueue to tweak the +# pickling process. (overload Queue._feed/SimpleQueue.put) +# +import os +import sys +import errno +import weakref +import threading +from multiprocessing import util +from multiprocessing.queues import ( + Full, + Queue as mp_Queue, + SimpleQueue as mp_SimpleQueue, + _sentinel, +) +from multiprocessing.context import assert_spawning + +from .reduction import dumps + + +__all__ = ["Queue", "SimpleQueue", "Full"] + + +class Queue(mp_Queue): + def __init__(self, maxsize=0, reducers=None, ctx=None): + super().__init__(maxsize=maxsize, ctx=ctx) + self._reducers = reducers + + # Use custom queue set/get state to be able to reduce the custom reducers + def __getstate__(self): + assert_spawning(self) + return ( + self._ignore_epipe, + self._maxsize, + self._reader, + self._writer, + self._reducers, + self._rlock, + self._wlock, + self._sem, + self._opid, + ) + + def __setstate__(self, state): + ( + self._ignore_epipe, + self._maxsize, + self._reader, + self._writer, + self._reducers, + self._rlock, + self._wlock, + self._sem, + self._opid, + ) = state + if sys.version_info >= (3, 9): + self._reset() + else: + self._after_fork() + + # Overload _start_thread to correctly call our custom _feed + def _start_thread(self): + util.debug("Queue._start_thread()") + + # Start thread which transfers data from buffer to pipe + self._buffer.clear() + self._thread = threading.Thread( + target=Queue._feed, + args=( + self._buffer, + self._notempty, + self._send_bytes, + self._wlock, + self._writer.close, + self._reducers, + self._ignore_epipe, + self._on_queue_feeder_error, + self._sem, + ), + name="QueueFeederThread", + ) + self._thread.daemon = True + + util.debug("doing self._thread.start()") + self._thread.start() + util.debug("... done self._thread.start()") + + # On process exit we will wait for data to be flushed to pipe. + # + # However, if this process created the queue then all + # processes which use the queue will be descendants of this + # process. Therefore waiting for the queue to be flushed + # is pointless once all the child processes have been joined. + created_by_this_process = self._opid == os.getpid() + if not self._joincancelled and not created_by_this_process: + self._jointhread = util.Finalize( + self._thread, + Queue._finalize_join, + [weakref.ref(self._thread)], + exitpriority=-5, + ) + + # Send sentinel to the thread queue object when garbage collected + self._close = util.Finalize( + self, + Queue._finalize_close, + [self._buffer, self._notempty], + exitpriority=10, + ) + + # Overload the _feed methods to use our custom pickling strategy. + @staticmethod + def _feed( + buffer, + notempty, + send_bytes, + writelock, + close, + reducers, + ignore_epipe, + onerror, + queue_sem, + ): + util.debug("starting thread to feed data to pipe") + nacquire = notempty.acquire + nrelease = notempty.release + nwait = notempty.wait + bpopleft = buffer.popleft + sentinel = _sentinel + if sys.platform != "win32": + wacquire = writelock.acquire + wrelease = writelock.release + else: + wacquire = None + + while True: + try: + nacquire() + try: + if not buffer: + nwait() + finally: + nrelease() + try: + while True: + obj = bpopleft() + if obj is sentinel: + util.debug("feeder thread got sentinel -- exiting") + close() + return + + # serialize the data before acquiring the lock + obj_ = dumps(obj, reducers=reducers) + if wacquire is None: + send_bytes(obj_) + else: + wacquire() + try: + send_bytes(obj_) + finally: + wrelease() + # Remove references early to avoid leaking memory + del obj, obj_ + except IndexError: + pass + except BaseException as e: + if ignore_epipe and getattr(e, "errno", 0) == errno.EPIPE: + return + # Since this runs in a daemon thread the resources it uses + # may be become unusable while the process is cleaning up. + # We ignore errors which happen after the process has + # started to cleanup. + if util.is_exiting(): + util.info(f"error in queue thread: {e}") + return + else: + queue_sem.release() + onerror(e, obj) + + def _on_queue_feeder_error(self, e, obj): + """ + Private API hook called when feeding data in the background thread + raises an exception. For overriding by concurrent.futures. + """ + import traceback + + traceback.print_exc() + + +class SimpleQueue(mp_SimpleQueue): + def __init__(self, reducers=None, ctx=None): + super().__init__(ctx=ctx) + + # Add possiblity to use custom reducers + self._reducers = reducers + + def close(self): + self._reader.close() + self._writer.close() + + # Use custom queue set/get state to be able to reduce the custom reducers + def __getstate__(self): + assert_spawning(self) + return ( + self._reader, + self._writer, + self._reducers, + self._rlock, + self._wlock, + ) + + def __setstate__(self, state): + ( + self._reader, + self._writer, + self._reducers, + self._rlock, + self._wlock, + ) = state + + # Overload put to use our customizable reducer + def put(self, obj): + # serialize the data before acquiring the lock + obj = dumps(obj, reducers=self._reducers) + if self._wlock is None: + # writes to a message oriented win32 pipe are atomic + self._writer.send_bytes(obj) + else: + with self._wlock: + self._writer.send_bytes(obj) diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..bed32ba9e18f7d0fccab7ead6095996d27f448e2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py @@ -0,0 +1,224 @@ +############################################################################### +# Customizable Pickler with some basic reducers +# +# author: Thomas Moreau +# +# adapted from multiprocessing/reduction.py (17/02/2017) +# * Replace the ForkingPickler with a similar _LokyPickler, +# * Add CustomizableLokyPickler to allow customizing pickling process +# on the fly. +# +import copyreg +import io +import functools +import types +import sys +import os + +from multiprocessing import util +from pickle import loads, HIGHEST_PROTOCOL + +############################################################################### +# Enable custom pickling in Loky. + +_dispatch_table = {} + + +def register(type_, reduce_function): + _dispatch_table[type_] = reduce_function + + +############################################################################### +# Registers extra pickling routines to improve picklization for loky + + +# make methods picklable +def _reduce_method(m): + if m.__self__ is None: + return getattr, (m.__class__, m.__func__.__name__) + else: + return getattr, (m.__self__, m.__func__.__name__) + + +class _C: + def f(self): + pass + + @classmethod + def h(cls): + pass + + +register(type(_C().f), _reduce_method) +register(type(_C.h), _reduce_method) + + +if not hasattr(sys, "pypy_version_info"): + # PyPy uses functions instead of method_descriptors and wrapper_descriptors + def _reduce_method_descriptor(m): + return getattr, (m.__objclass__, m.__name__) + + register(type(list.append), _reduce_method_descriptor) + register(type(int.__add__), _reduce_method_descriptor) + + +# Make partial func pickable +def _reduce_partial(p): + return _rebuild_partial, (p.func, p.args, p.keywords or {}) + + +def _rebuild_partial(func, args, keywords): + return functools.partial(func, *args, **keywords) + + +register(functools.partial, _reduce_partial) + +if sys.platform != "win32": + from ._posix_reduction import _mk_inheritable # noqa: F401 +else: + from . import _win_reduction # noqa: F401 + +# global variable to change the pickler behavior +try: + from joblib.externals import cloudpickle # noqa: F401 + + DEFAULT_ENV = "cloudpickle" +except ImportError: + # If cloudpickle is not present, fallback to pickle + DEFAULT_ENV = "pickle" + +ENV_LOKY_PICKLER = os.environ.get("LOKY_PICKLER", DEFAULT_ENV) +_LokyPickler = None +_loky_pickler_name = None + + +def set_loky_pickler(loky_pickler=None): + global _LokyPickler, _loky_pickler_name + + if loky_pickler is None: + loky_pickler = ENV_LOKY_PICKLER + + loky_pickler_cls = None + + # The default loky_pickler is cloudpickle + if loky_pickler in ["", None]: + loky_pickler = "cloudpickle" + + if loky_pickler == _loky_pickler_name: + return + + if loky_pickler == "cloudpickle": + from joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls + else: + try: + from importlib import import_module + + module_pickle = import_module(loky_pickler) + loky_pickler_cls = module_pickle.Pickler + except (ImportError, AttributeError) as e: + extra_info = ( + "\nThis error occurred while setting loky_pickler to" + f" '{loky_pickler}', as required by the env variable " + "LOKY_PICKLER or the function set_loky_pickler." + ) + e.args = (e.args[0] + extra_info,) + e.args[1:] + e.msg = e.args[0] + raise e + + util.debug( + f"Using '{loky_pickler if loky_pickler else 'cloudpickle'}' for " + "serialization." + ) + + class CustomizablePickler(loky_pickler_cls): + _loky_pickler_cls = loky_pickler_cls + + def _set_dispatch_table(self, dispatch_table): + for ancestor_class in self._loky_pickler_cls.mro(): + dt_attribute = getattr(ancestor_class, "dispatch_table", None) + if isinstance(dt_attribute, types.MemberDescriptorType): + # Ancestor class (typically _pickle.Pickler) has a + # member_descriptor for its "dispatch_table" attribute. Use + # it to set the dispatch_table as a member instead of a + # dynamic attribute in the __dict__ of the instance, + # otherwise it will not be taken into account by the C + # implementation of the dump method if a subclass defines a + # class-level dispatch_table attribute as was done in + # cloudpickle 1.6.0: + # https://github.com/joblib/loky/pull/260 + dt_attribute.__set__(self, dispatch_table) + break + + # On top of member descriptor set, also use setattr such that code + # that directly access self.dispatch_table gets a consistent view + # of the same table. + self.dispatch_table = dispatch_table + + def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL): + loky_pickler_cls.__init__(self, writer, protocol=protocol) + if reducers is None: + reducers = {} + + if hasattr(self, "dispatch_table"): + # Force a copy that we will update without mutating the + # any class level defined dispatch_table. + loky_dt = dict(self.dispatch_table) + else: + # Use standard reducers as bases + loky_dt = copyreg.dispatch_table.copy() + + # Register loky specific reducers + loky_dt.update(_dispatch_table) + + # Set the new dispatch table, taking care of the fact that we + # need to use the member_descriptor when we inherit from a + # subclass of the C implementation of the Pickler base class + # with an class level dispatch_table attribute. + self._set_dispatch_table(loky_dt) + + # Register the reducers + for type, reduce_func in reducers.items(): + self.register(type, reduce_func) + + def register(self, type, reduce_func): + """Attach a reducer function to a given type in the dispatch table.""" + self.dispatch_table[type] = reduce_func + + _LokyPickler = CustomizablePickler + _loky_pickler_name = loky_pickler + + +def get_loky_pickler_name(): + global _loky_pickler_name + return _loky_pickler_name + + +def get_loky_pickler(): + global _LokyPickler + return _LokyPickler + + +# Set it to its default value +set_loky_pickler() + + +def dump(obj, file, reducers=None, protocol=None): + """Replacement for pickle.dump() using _LokyPickler.""" + global _LokyPickler + _LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj) + + +def dumps(obj, reducers=None, protocol=None): + global _LokyPickler + + buf = io.BytesIO() + dump(obj, buf, reducers=reducers, protocol=protocol) + return buf.getbuffer() + + +__all__ = ["dump", "dumps", "loads", "register", "set_loky_pickler"] + +if sys.platform == "win32": + from multiprocessing.reduction import duplicate + + __all__ += ["duplicate"] diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aa089f7a1bf9b577455775f6d6249baf4bd430de --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py @@ -0,0 +1,181 @@ +import os +import sys +import time +import errno +import signal +import warnings +import subprocess +import traceback + +try: + import psutil +except ImportError: + psutil = None + + +def kill_process_tree(process, use_psutil=True): + """Terminate process and its descendants with SIGKILL""" + if use_psutil and psutil is not None: + _kill_process_tree_with_psutil(process) + else: + _kill_process_tree_without_psutil(process) + + +def recursive_terminate(process, use_psutil=True): + warnings.warn( + "recursive_terminate is deprecated in loky 3.2, use kill_process_tree" + "instead", + DeprecationWarning, + ) + kill_process_tree(process, use_psutil=use_psutil) + + +def _kill_process_tree_with_psutil(process): + try: + descendants = psutil.Process(process.pid).children(recursive=True) + except psutil.NoSuchProcess: + return + + # Kill the descendants in reverse order to avoid killing the parents before + # the descendant in cases where there are more processes nested. + for descendant in descendants[::-1]: + try: + descendant.kill() + except psutil.NoSuchProcess: + pass + + try: + psutil.Process(process.pid).kill() + except psutil.NoSuchProcess: + pass + process.join() + + +def _kill_process_tree_without_psutil(process): + """Terminate a process and its descendants.""" + try: + if sys.platform == "win32": + _windows_taskkill_process_tree(process.pid) + else: + _posix_recursive_kill(process.pid) + except Exception: # pragma: no cover + details = traceback.format_exc() + warnings.warn( + "Failed to kill subprocesses on this platform. Please install" + "psutil: https://github.com/giampaolo/psutil\n" + f"Details:\n{details}" + ) + # In case we cannot introspect or kill the descendants, we fall back to + # only killing the main process. + # + # Note: on Windows, process.kill() is an alias for process.terminate() + # which in turns calls the Win32 API function TerminateProcess(). + process.kill() + process.join() + + +def _windows_taskkill_process_tree(pid): + # On windows, the taskkill function with option `/T` terminate a given + # process pid and its children. + try: + subprocess.check_output( + ["taskkill", "/F", "/T", "/PID", str(pid)], stderr=None + ) + except subprocess.CalledProcessError as e: + # In Windows, taskkill returns 128, 255 for no process found. + if e.returncode not in [128, 255]: + # Let's raise to let the caller log the error details in a + # warning and only kill the root process. + raise # pragma: no cover + + +def _kill(pid): + # Not all systems (e.g. Windows) have a SIGKILL, but the C specification + # mandates a SIGTERM signal. While Windows is handled specifically above, + # let's try to be safe for other hypothetic platforms that only have + # SIGTERM without SIGKILL. + kill_signal = getattr(signal, "SIGKILL", signal.SIGTERM) + try: + os.kill(pid, kill_signal) + except OSError as e: + # if OSError is raised with [Errno 3] no such process, the process + # is already terminated, else, raise the error and let the top + # level function raise a warning and retry to kill the process. + if e.errno != errno.ESRCH: + raise # pragma: no cover + + +def _posix_recursive_kill(pid): + """Recursively kill the descendants of a process before killing it.""" + try: + children_pids = subprocess.check_output( + ["pgrep", "-P", str(pid)], stderr=None, text=True + ) + except subprocess.CalledProcessError as e: + # `ps` returns 1 when no child process has been found + if e.returncode == 1: + children_pids = "" + else: + raise # pragma: no cover + + # Decode the result, split the cpid and remove the trailing line + for cpid in children_pids.splitlines(): + cpid = int(cpid) + _posix_recursive_kill(cpid) + + _kill(pid) + + +def get_exitcodes_terminated_worker(processes): + """Return a formatted string with the exitcodes of terminated workers. + + If necessary, wait (up to .25s) for the system to correctly set the + exitcode of one terminated worker. + """ + patience = 5 + + # Catch the exitcode of the terminated workers. There should at least be + # one. If not, wait a bit for the system to correctly set the exitcode of + # the terminated worker. + exitcodes = [ + p.exitcode for p in list(processes.values()) if p.exitcode is not None + ] + while not exitcodes and patience > 0: + patience -= 1 + exitcodes = [ + p.exitcode + for p in list(processes.values()) + if p.exitcode is not None + ] + time.sleep(0.05) + + return _format_exitcodes(exitcodes) + + +def _format_exitcodes(exitcodes): + """Format a list of exit code with names of the signals if possible""" + str_exitcodes = [ + f"{_get_exitcode_name(e)}({e})" for e in exitcodes if e is not None + ] + return "{" + ", ".join(str_exitcodes) + "}" + + +def _get_exitcode_name(exitcode): + if sys.platform == "win32": + # The exitcode are unreliable on windows (see bpo-31863). + # For this case, return UNKNOWN + return "UNKNOWN" + + if exitcode < 0: + try: + import signal + + return signal.Signals(-exitcode).name + except ValueError: + return "UNKNOWN" + elif exitcode != 255: + # The exitcode are unreliable on forkserver were 255 is always returned + # (see bpo-30589). For this case, return UNKNOWN + return "EXIT" + + return "UNKNOWN" diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np16.gz b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np16.gz new file mode 100644 index 0000000000000000000000000000000000000000..fedefdd304054a85fa995801885f997ca8e1a44f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np16.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:418447e90d83486568ae3092a960b18d358230e24ac9ec38365daa99f415bd0f +size 769 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py33_np18.gz b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py33_np18.gz new file mode 100644 index 0000000000000000000000000000000000000000..7cd1fcc9dc7a04d7ac251d3b1bbf973609b947b8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py33_np18.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9e215780f978ce693e48110ead23652e1c6de1c2189172232690198f7088788 +size 792 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.xz b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.xz new file mode 100644 index 0000000000000000000000000000000000000000..826c9ba7b9579a988f8f1718219bbc41fd1ad756 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.xz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e9a63dcc7df38ab0a1137a9b44b436b13cebfa300eb19dba4ae4bce50d0fa81 +size 752 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl new file mode 100644 index 0000000000000000000000000000000000000000..f22c25bdb59d15a3771104dff6dfebe564e98add --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cbe456f5b91f5a3cb8e386838f276c30335432a351426686187761d5c34168b +size 1068 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.bz2 b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..80818a8baa1e2481b62bed06bb2b95f4a614cc3a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.bz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f2af67ea667c1f5315ddcab06bfa447005863c1c0fd88bb7e04a0b8acb9a54b +size 1021 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_compressed_pickle_py36_np111.gz b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_compressed_pickle_py36_np111.gz new file mode 100644 index 0000000000000000000000000000000000000000..f2e65e202609648f0a5464ae5b78b9f9fba8dd6e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_compressed_pickle_py36_np111.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d56ae75c3a83a0d10f60e657d50e56af6e3addbf2f555e9fc385a6e52e1b32de +size 800 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.8.4_compressed_pickle_py27_np17.gz b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.8.4_compressed_pickle_py27_np17.gz new file mode 100644 index 0000000000000000000000000000000000000000..fc4e28719d5acc118ac1d8bd8cdd15227eef25ba --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.8.4_compressed_pickle_py27_np17.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a9f994fb8baa63e689f681ba6db33bbb45aaf32693a61c9ebb50a3a608f40c8 +size 659 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py34_np19.gz b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py34_np19.gz new file mode 100644 index 0000000000000000000000000000000000000000..0720a70aee276c37f9457817922ae60b67600d47 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py34_np19.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f33bd8a21a41b729b05dac5deeb0e868f218a092b0e3fe5988094cf167217f6 +size 673 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl new file mode 100644 index 0000000000000000000000000000000000000000..f7ca0addc6d032e93d0b530a2b42a583fb0d4b81 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9da8a3764db121e29d21ade67c9c3426598e76d88deae44cd7238983af8cef73 +size 670 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_01.npy b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_01.npy new file mode 100644 index 0000000000000000000000000000000000000000..15574a4193ad4ad724b2b8053c701a82efa78fd5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_01.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0efbd7d9ce7eec3a6e0a0db41e795e0396cca3d6b037dad6c61b464843d28809 +size 120 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_02.npy b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_02.npy new file mode 100644 index 0000000000000000000000000000000000000000..f00f08fbeeda280fa3ce00069c313c5412a33eca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_02.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c1cf36cb781fbcc21b953bb0a0b45df092da0eae0e765882e5963ccd70105b1 +size 120 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_03.npy b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_03.npy new file mode 100644 index 0000000000000000000000000000000000000000..73976395be90d4b2b2d955c79a90721e16cebc82 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_03.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ede9a64a52b25d7db30950956c978ec0b3932b7d14acd5abc63216e64babde7 +size 307 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_04.npy b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_04.npy new file mode 100644 index 0000000000000000000000000000000000000000..e9b5e77c73268dfff541b576126f06fc6fed3d59 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_04.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ecbe244294ba93e08479b16c1b9a9411e3569ff660ed0459dca1d241381df05 +size 104 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_04.npy b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_04.npy new file mode 100644 index 0000000000000000000000000000000000000000..e9b5e77c73268dfff541b576126f06fc6fed3d59 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_04.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ecbe244294ba93e08479b16c1b9a9411e3569ff660ed0459dca1d241381df05 +size 104 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_03.npy b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_03.npy new file mode 100644 index 0000000000000000000000000000000000000000..73976395be90d4b2b2d955c79a90721e16cebc82 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_03.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ede9a64a52b25d7db30950956c978ec0b3932b7d14acd5abc63216e64babde7 +size 307 diff --git a/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz new file mode 100644 index 0000000000000000000000000000000000000000..e3125fe0fd4709dbd0067e67a06a3f24073934ad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2361f589b31d2863627edcb96612280ae5c0a59c9496d89dab7de493670f93b +size 802 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99.so.0.0.0 b/llmeval-env/lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99.so.0.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..05e193bdd18b0edbec3774904c97407a4ff0afbe Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99.so.0.0.0 differ diff --git a/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/PKG-INFO b/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..aef853b67fe31632fd47f3297c1508cd75a9160d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/PKG-INFO @@ -0,0 +1,117 @@ +Metadata-Version: 2.1 +Name: word2number +Version: 1.1 +Summary: Convert number words eg. three hundred and forty two to numbers (342). +Home-page: https://github.com/akshaynagpal/w2n +Author: Akshay Nagpal +Author-email: akshay2626@gmail.com +License: The MIT License (MIT) + + Copyright (c) 2016 Akshay Nagpal (https://github.com/akshaynagpal) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + +Download-URL: https://github.com/akshaynagpal/w2n/tarball/1.1 +Keywords: numbers,convert,words +Platform: UNKNOWN +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python +License-File: LICENSE.txt + +|travis_pic| |codecov_pic| + +============== +Word to Number +============== +This is a Python module to convert number words (eg. twenty one) to numeric digits (21). It works for positive numbers upto the range of 999,999,999,999 (i.e. billions). + +++++++++++++ +Installation +++++++++++++ +Please ensure that you have **updated pip** to the latest version before installing word2number. + +You can install the module using Python Package Index using the below command. + +.. code-block:: python + + pip install word2number + +Make sure you install all requirements given in requirements.txt + +.. code-block:: python + + pip install -r requirements.txt + ++++++ +Usage ++++++ +First you have to import the module using the below code. +.. code-block:: python + + from word2number import w2n + +Then you can use the **word_to_num** method to convert a number-word to numeric digits, as shown below. + +.. code-block:: python + + >>> print w2n.word_to_num("two million three thousand nine hundred and eighty four") + 2003984 + + >>> print(w2n.word_to_num('two point three')) + 2.3 + + >>> print(w2n.word_to_num('112')) + 112 + + >>> print(w2n.word_to_num('point one')) + 0.1 + + >>> print(w2n.word_to_num('one hundred thirty-five')) + 135 + + >>> print(w2n.word_to_num('million million')) + Error: Redundant number! Please enter a valid number word (eg. two million twenty three thousand and forty nine) + None + + >>> print(w2n.word_to_num('blah')) + Error: No valid number words found! Please enter a valid number word (eg. two million twenty three thousand and forty nine) + None + ++++++++++++ +Bugs/Errors ++++++++++++ + +Please ensure that you have updated pip to the latest version before installing word2number. + +If you find any bugs/errors in the usage of above code, please raise an issue through `Github `_. If you don't know how to use Github or raise an issue through it, I suggest that you should learn it. Else, send an email to akshay2626@gmail.com with a clear example that can reproduce the issue. + +++++++++++++ +Contributors +++++++++++++ +- Ben Batorsky (`bpben `__) +- Alex (`ledovsky `__) +- Tal Yarkoni (`tyarkoni `__) +- ButteredGroove (`ButteredGroove `__) + +.. |travis_pic| image:: https://travis-ci.org/akshaynagpal/w2n.svg?branch=master + :target: https://travis-ci.org/akshaynagpal/w2n + +.. |codecov_pic| image:: https://codecov.io/gh/akshaynagpal/w2n/branch/master/graph/badge.svg + :target: https://codecov.io/gh/akshaynagpal/w2n + diff --git a/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/SOURCES.txt b/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..d2aa5a404a5d2a3f5f1056dcbe2b46f9e8665426 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/SOURCES.txt @@ -0,0 +1,12 @@ +LICENSE.txt +MANIFEST.in +README.rst +requirements.txt +setup.cfg +setup.py +word2number/__init__.py +word2number/w2n.py +word2number.egg-info/PKG-INFO +word2number.egg-info/SOURCES.txt +word2number.egg-info/dependency_links.txt +word2number.egg-info/top_level.txt \ No newline at end of file diff --git a/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/dependency_links.txt b/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/installed-files.txt b/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/installed-files.txt new file mode 100644 index 0000000000000000000000000000000000000000..413048dc850fa47824ecd7a75c066a9400b20fc4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/installed-files.txt @@ -0,0 +1,8 @@ +../word2number/__init__.py +../word2number/__pycache__/__init__.cpython-310.pyc +../word2number/__pycache__/w2n.cpython-310.pyc +../word2number/w2n.py +PKG-INFO +SOURCES.txt +dependency_links.txt +top_level.txt diff --git a/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..b7c664e33c170715d0101175b97770f68b8f82f9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/word2number-1.1.egg-info/top_level.txt @@ -0,0 +1 @@ +word2number diff --git a/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..36b248ce00511fd76f358eb0f5543ca448d0e443 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2014-2020, Yue Du +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..4f7117bec2f88ca9d73ccca493b5c5bd56913863 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/METADATA @@ -0,0 +1,515 @@ +Metadata-Version: 2.1 +Name: xxhash +Version: 3.4.1 +Summary: Python binding for xxHash +Home-page: https://github.com/ifduyue/python-xxhash +Author: Yue Du +Author-email: ifduyue@gmail.com +License: BSD +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: BSD License +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE + +python-xxhash +============= + +.. image:: https://github.com/ifduyue/python-xxhash/actions/workflows/test.yml/badge.svg + :target: https://github.com/ifduyue/python-xxhash/actions/workflows/test.yml + :alt: Github Actions Status + +.. image:: https://img.shields.io/pypi/v/xxhash.svg + :target: https://pypi.org/project/xxhash/ + :alt: Latest Version + +.. image:: https://img.shields.io/pypi/pyversions/xxhash.svg + :target: https://pypi.org/project/xxhash/ + :alt: Supported Python versions + +.. image:: https://img.shields.io/pypi/l/xxhash.svg + :target: https://pypi.org/project/xxhash/ + :alt: License + + +.. _HMAC: http://en.wikipedia.org/wiki/Hash-based_message_authentication_code +.. _xxHash: https://github.com/Cyan4973/xxHash +.. _Cyan4973: https://github.com/Cyan4973 + + +xxhash is a Python binding for the xxHash_ library by `Yann Collet`__. + +__ Cyan4973_ + +Installation +------------ + +.. code-block:: bash + + $ pip install xxhash + +You can also install using conda: + +.. code-block:: bash + + $ conda install -c conda-forge python-xxhash + + +Installing From Source +~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + $ pip install --no-binary xxhash xxhash + +Prerequisites +++++++++++++++ + +On Debian/Ubuntu: + +.. code-block:: bash + + $ apt-get install python-dev gcc + +On CentOS/Fedora: + +.. code-block:: bash + + $ yum install python-devel gcc redhat-rpm-config + +Linking to libxxhash.so +~~~~~~~~~~~~~~~~~~~~~~~~ + +By default python-xxhash will use bundled xxHash, +we can change this by specifying ENV var ``XXHASH_LINK_SO``: + +.. code-block:: bash + + $ XXHASH_LINK_SO=1 pip install --no-binary xxhash xxhash + +Usage +-------- + +Module version and its backend xxHash library version can be retrieved using +the module properties ``VERSION`` AND ``XXHASH_VERSION`` respectively. + +.. code-block:: python + + >>> import xxhash + >>> xxhash.VERSION + '2.0.0' + >>> xxhash.XXHASH_VERSION + '0.8.0' + +This module is hashlib-compliant, which means you can use it in the same way as ``hashlib.md5``. + + | update() -- update the current digest with an additional string + | digest() -- return the current digest value + | hexdigest() -- return the current digest as a string of hexadecimal digits + | intdigest() -- return the current digest as an integer + | copy() -- return a copy of the current xxhash object + | reset() -- reset state + +md5 digest returns bytes, but the original xxh32 and xxh64 C APIs return integers. +While this module is made hashlib-compliant, ``intdigest()`` is also provided to +get the integer digest. + +Constructors for hash algorithms provided by this module are ``xxh32()`` and ``xxh64()``. + +For example, to obtain the digest of the byte string ``b'Nobody inspects the spammish repetition'``: + +.. code-block:: python + + >>> import xxhash + >>> x = xxhash.xxh32() + >>> x.update(b'Nobody inspects') + >>> x.update(b' the spammish repetition') + >>> x.digest() + b'\xe2);/' + >>> x.digest_size + 4 + >>> x.block_size + 16 + +More condensed: + +.. code-block:: python + + >>> xxhash.xxh32(b'Nobody inspects the spammish repetition').hexdigest() + 'e2293b2f' + >>> xxhash.xxh32(b'Nobody inspects the spammish repetition').digest() == x.digest() + True + +An optional seed (default is 0) can be used to alter the result predictably: + +.. code-block:: python + + >>> import xxhash + >>> xxhash.xxh64('xxhash').hexdigest() + '32dd38952c4bc720' + >>> xxhash.xxh64('xxhash', seed=20141025).hexdigest() + 'b559b98d844e0635' + >>> x = xxhash.xxh64(seed=20141025) + >>> x.update('xxhash') + >>> x.hexdigest() + 'b559b98d844e0635' + >>> x.intdigest() + 13067679811253438005 + +Be careful that xxh32 takes an unsigned 32-bit integer as seed, while xxh64 +takes an unsigned 64-bit integer. Although unsigned integer overflow is +defined behavior, it's better not to make it happen: + +.. code-block:: python + + >>> xxhash.xxh32('I want an unsigned 32-bit seed!', seed=0).hexdigest() + 'f7a35af8' + >>> xxhash.xxh32('I want an unsigned 32-bit seed!', seed=2**32).hexdigest() + 'f7a35af8' + >>> xxhash.xxh32('I want an unsigned 32-bit seed!', seed=1).hexdigest() + 'd8d4b4ba' + >>> xxhash.xxh32('I want an unsigned 32-bit seed!', seed=2**32+1).hexdigest() + 'd8d4b4ba' + >>> + >>> xxhash.xxh64('I want an unsigned 64-bit seed!', seed=0).hexdigest() + 'd4cb0a70a2b8c7c1' + >>> xxhash.xxh64('I want an unsigned 64-bit seed!', seed=2**64).hexdigest() + 'd4cb0a70a2b8c7c1' + >>> xxhash.xxh64('I want an unsigned 64-bit seed!', seed=1).hexdigest() + 'ce5087f12470d961' + >>> xxhash.xxh64('I want an unsigned 64-bit seed!', seed=2**64+1).hexdigest() + 'ce5087f12470d961' + + +``digest()`` returns bytes of the **big-endian** representation of the integer +digest: + +.. code-block:: python + + >>> import xxhash + >>> h = xxhash.xxh64() + >>> h.digest() + b'\xefF\xdb7Q\xd8\xe9\x99' + >>> h.intdigest().to_bytes(8, 'big') + b'\xefF\xdb7Q\xd8\xe9\x99' + >>> h.hexdigest() + 'ef46db3751d8e999' + >>> format(h.intdigest(), '016x') + 'ef46db3751d8e999' + >>> h.intdigest() + 17241709254077376921 + >>> int(h.hexdigest(), 16) + 17241709254077376921 + +Besides xxh32/xxh64 mentioned above, oneshot functions are also provided, +so we can avoid allocating XXH32/64 state on heap: + + | xxh32_digest(bytes, seed=0) + | xxh32_intdigest(bytes, seed=0) + | xxh32_hexdigest(bytes, seed=0) + | xxh64_digest(bytes, seed=0) + | xxh64_intdigest(bytes, seed=0) + | xxh64_hexdigest(bytes, seed=0) + +.. code-block:: python + + >>> import xxhash + >>> xxhash.xxh64('a').digest() == xxhash.xxh64_digest('a') + True + >>> xxhash.xxh64('a').intdigest() == xxhash.xxh64_intdigest('a') + True + >>> xxhash.xxh64('a').hexdigest() == xxhash.xxh64_hexdigest('a') + True + >>> xxhash.xxh64_hexdigest('xxhash', seed=20141025) + 'b559b98d844e0635' + >>> xxhash.xxh64_intdigest('xxhash', seed=20141025) + 13067679811253438005L + >>> xxhash.xxh64_digest('xxhash', seed=20141025) + '\xb5Y\xb9\x8d\x84N\x065' + +.. code-block:: python + + In [1]: import xxhash + + In [2]: %timeit xxhash.xxh64_hexdigest('xxhash') + 268 ns ± 24.1 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) + + In [3]: %timeit xxhash.xxh64('xxhash').hexdigest() + 416 ns ± 17.3 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) + + +XXH3 hashes are available since v2.0.0 (xxHash v0.8.0), they are: + +Streaming classes: + + | xxh3_64 + | xxh3_128 + +Oneshot functions: + + | xxh3_64_digest(bytes, seed=0) + | xxh3_64_intdigest(bytes, seed=0) + | xxh3_64_hexdigest(bytes, seed=0) + | xxh3_128_digest(bytes, seed=0) + | xxh3_128_intdigest(bytes, seed=0) + | xxh3_128_hexdigest(bytes, seed=0) + +And aliases: + + | xxh128 = xxh3_128 + | xxh128_digest = xxh3_128_digest + | xxh128_intdigest = xxh3_128_intdigest + | xxh128_hexdigest = xxh3_128_hexdigest + +Caveats +------- + +SEED OVERFLOW +~~~~~~~~~~~~~~ + +xxh32 takes an unsigned 32-bit integer as seed, and xxh64 takes +an unsigned 64-bit integer as seed. Make sure that the seed is greater than +or equal to ``0``. + +ENDIANNESS +~~~~~~~~~~~ + +As of python-xxhash 0.3.0, ``digest()`` returns bytes of the +**big-endian** representation of the integer digest. It used +to be little-endian. + +DONT USE XXHASH IN HMAC +~~~~~~~~~~~~~~~~~~~~~~~ +Though you can use xxhash as an HMAC_ hash function, but it's +highly recommended not to. + +xxhash is **NOT** a cryptographic hash function, it is a +non-cryptographic hash algorithm aimed at speed and quality. +Do not put xxhash in any position where cryptographic hash +functions are required. + + +Copyright and License +--------------------- + +Copyright (c) 2014-2020 Yue Du - https://github.com/ifduyue + +Licensed under `BSD 2-Clause License `_ + +CHANGELOG +----------- + +v3.4.1 2023-10-05 +~~~~~~~~~~~~~~~~~ + +- Remove setuptools_scm + + +v3.4.0 2023-10-05 +~~~~~~~~~~~~~~~~~ + +- Build wheels for Python 3.12 + +v3.3.0 2023-07-29 +~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to v0.8.2 +- Drop support for Python 3.6 + +v3.2.0 2022-12-28 +~~~~~~~~~~~~~~~~~ + +This is the last version to support Python 3.6 + +- Build Python 3.11 wheels. +- Remove setup.py test_suites, call unittest directly + +v3.1.0 2022-10-19 +~~~~~~~~~~~~~~~~~ + +- Type annotations. +- Enabled muslinux wheels building. + +v3.0.0 2022-02-25 +~~~~~~~~~~~~~~~~~ + +- New set `algorithms_available` lists all implemented algorithms in `xxhash` + package. +- Upgrade xxHash to v0.8.1. +- Drop support for EOL Python versions, require python >= 3.6 from now on. +- Migrate to github actions and build arm64 wheels for macOS. +- Always release GIL. + + +v2.0.2 2021-04-15 +~~~~~~~~~~~~~~~~~ + +- Fix Travis CI OSX dpl python2.7 get-pip.py error + +v2.0.1 2021-04-15 +~~~~~~~~~~~~~~~~~ + +- Only to trigger Python 3.9 wheels building. + +v2.0.0 2020-08-03 +~~~~~~~~~~~~~~~~~ + +- **Require xxHash version >= v0.8.0** +- Upgrade xxHash to v0.8.0 +- XXH3 hashes: `xxh3_64`, `xxh3_128`, and their oneshot functions + +v1.4.4 2020-06-20 +~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to v0.7.3 +- Stop using PEP393 deprecated APIs +- Use XXH(32|64)_canonicalFromHash to replace u2bytes and ull2bytes + +v1.4.3 2019-11-12 +~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to v0.7.2 +- Python 3.8 wheels + +v1.4.2 2019-10-13 +~~~~~~~~~~~~~~~~~ + +- Fixed: setup.py fails when reading README.rst and the default encoding is not UTF-8 + +v1.4.1 2019-08-27 +~~~~~~~~~~~~~~~~~ + +- Fixed: xxh3.h in missing from source tarball + +v1.4.0 2019-08-25 +~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to v0.7.1 + +v1.3.0 2018-10-21 +~~~~~~~~~~~~~~~~~ + +- Wheels are now built automatically +- Split CFFI variant into a separate package `ifduyue/python-xxhash-cffi `_ + +v1.2.0 2018-07-13 +~~~~~~~~~~~~~~~~~ + +- Add oneshot functions xxh{32,64}_{,int,hex}digest + +v1.1.0 2018-07-05 +~~~~~~~~~~~~~~~~~ + +- Allow input larger than 2GB +- Release the GIL on sufficiently large input +- Drop support for Python 3.2 + +v1.0.1 2017-03-02 +~~~~~~~~~~~~~~~~~~ + +- Free state actively, instead of delegating it to ffi.gc + +v1.0.0 2017-02-10 +~~~~~~~~~~~~~~~~~~ + +- Fixed copy() segfault +- Added CFFI variant + +v0.6.3 2017-02-10 +~~~~~~~~~~~~~~~~~~ + +- Fixed copy() segfault + +v0.6.2 2017-02-10 +~~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to v0.6.2 + +v0.6.1 2016-06-26 +~~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to v0.6.1 + +v0.5.0 2016-03-02 +~~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to v0.5.0 + +v0.4.3 2015-08-21 +~~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to r42 + +v0.4.1 2015-08-16 +~~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to r41 + +v0.4.0 2015-08-05 +~~~~~~~~~~~~~~~~~~ + +- Added method reset +- Upgrade xxHash to r40 + +v0.3.2 2015-01-27 +~~~~~~~~~~~~~~~~~~ + +- Fixed some typos in docstrings + +v0.3.1 2015-01-24 +~~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to r39 + +v0.3.0 2014-11-11 +~~~~~~~~~~~~~~~~~~ + +- Change digest() from little-endian representation to big-endian representation of the integer digest. + This change breaks compatibility (digest() results are different). + +v0.2.0 2014-10-25 +~~~~~~~~~~~~~~~~~~ + +- Make this package hashlib-compliant + +v0.1.3 2014-10-23 +~~~~~~~~~~~~~~~~~~ + +- Update xxHash to r37 + +v0.1.2 2014-10-19 +~~~~~~~~~~~~~~~~~~ + +- Improve: Check XXHnn_init() return value. +- Update xxHash to r36 + +v0.1.1 2014-08-07 +~~~~~~~~~~~~~~~~~~ + +- Improve: Can now be built with Visual C++ Compiler. + +v0.1.0 2014-08-05 +~~~~~~~~~~~~~~~~~~ + +- New: XXH32 and XXH64 type, which support partially update. +- Fix: build under Python 3.4 + +v0.0.2 2014-08-03 +~~~~~~~~~~~~~~~~~~ + +- NEW: Support Python 3 + +v0.0.1 2014-07-30 +~~~~~~~~~~~~~~~~~~ + +- NEW: xxh32 and xxh64 diff --git a/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..a3a11b791266211c8784c79155f79176c1c1db12 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/RECORD @@ -0,0 +1,13 @@ +xxhash-3.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +xxhash-3.4.1.dist-info/LICENSE,sha256=-OnvAMeL5NLaUmssN-QJnJIMf-C52UO_Da6y78MKOls,1313 +xxhash-3.4.1.dist-info/METADATA,sha256=00hjepcOk7xbVEn_p1iOwQDjmH7vVGpPsbPT5_V-SuI,12407 +xxhash-3.4.1.dist-info/RECORD,, +xxhash-3.4.1.dist-info/WHEEL,sha256=cD39NF6a3hkhaWoPQJng7gnGZRIfQsUCtwcedITCPtg,152 +xxhash-3.4.1.dist-info/top_level.txt,sha256=1PPSBP-gnjG59E5bigzMTzmT6BVWjHwnpzMiisPWZ5I,15 +xxhash/__init__.py,sha256=mPEdihxDMU0rjLWum3FrU9Ua2jQ-rzfewYgIg-J-Jlc,1147 +xxhash/__init__.pyi,sha256=Te-hUGiCW_4Y65lyrGCOgeQrL35XTFM-qunbz0R9MiE,1786 +xxhash/__pycache__/__init__.cpython-310.pyc,, +xxhash/__pycache__/version.cpython-310.pyc,, +xxhash/_xxhash.cpython-310-x86_64-linux-gnu.so,sha256=-ivlxG-o9sMQAHnKz9tauU4XikfH8HzheTOL3Zp6QLw,830856 +xxhash/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +xxhash/version.py,sha256=h7POl0WWl8CsxtaUevRn7QF75kWctjTiIWdvJzM4yeA,44 diff --git a/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7b52c3f3e667e03ca4b2a8b53a94655a796beafc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_17_x86_64 +Tag: cp310-cp310-manylinux2014_x86_64 + diff --git a/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..46a6ce2f1ac8b49207db6601355076641e92f163 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_xxhash +xxhash diff --git a/llmeval-env/lib/python3.10/site-packages/yarl/__init__.py b/llmeval-env/lib/python3.10/site-packages/yarl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..127721ad0963cf9f54b1d53212278c9085f1328e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/yarl/__init__.py @@ -0,0 +1,5 @@ +from ._url import URL, cache_clear, cache_configure, cache_info + +__version__ = "1.9.4" + +__all__ = ("URL", "cache_clear", "cache_configure", "cache_info") diff --git a/llmeval-env/lib/python3.10/site-packages/yarl/__init__.pyi b/llmeval-env/lib/python3.10/site-packages/yarl/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5fd4bd0d122cb5329ba7e62d9cfa566939961551 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/yarl/__init__.pyi @@ -0,0 +1,121 @@ +import sys +from functools import _CacheInfo +from typing import Any, Mapping, Optional, Sequence, Tuple, Type, Union, overload + +import multidict + +if sys.version_info >= (3, 8): + from typing import Final, TypedDict, final +else: + from typing_extensions import Final, TypedDict, final + +_SimpleQuery = Union[str, int, float] +_QueryVariable = Union[_SimpleQuery, Sequence[_SimpleQuery]] +_Query = Union[ + None, str, Mapping[str, _QueryVariable], Sequence[Tuple[str, _QueryVariable]] +] + +@final +class URL: + scheme: Final[str] + raw_user: Final[str] + user: Final[Optional[str]] + raw_password: Final[Optional[str]] + password: Final[Optional[str]] + raw_host: Final[Optional[str]] + host: Final[Optional[str]] + port: Final[Optional[int]] + explicit_port: Final[Optional[int]] + raw_authority: Final[str] + authority: Final[str] + raw_path: Final[str] + path: Final[str] + raw_query_string: Final[str] + query_string: Final[str] + path_qs: Final[str] + raw_path_qs: Final[str] + raw_fragment: Final[str] + fragment: Final[str] + query: Final[multidict.MultiDict[str]] + raw_name: Final[str] + name: Final[str] + raw_suffix: Final[str] + suffix: Final[str] + raw_suffixes: Final[Tuple[str, ...]] + suffixes: Final[Tuple[str, ...]] + raw_parts: Final[Tuple[str, ...]] + parts: Final[Tuple[str, ...]] + parent: Final[URL] + def __init__( + self, val: Union[str, "URL"] = ..., *, encoded: bool = ... + ) -> None: ... + @classmethod + def build( + cls, + *, + scheme: str = ..., + authority: str = ..., + user: Optional[str] = ..., + password: Optional[str] = ..., + host: str = ..., + port: Optional[int] = ..., + path: str = ..., + query: Optional[_Query] = ..., + query_string: str = ..., + fragment: str = ..., + encoded: bool = ... + ) -> URL: ... + def __str__(self) -> str: ... + def __repr__(self) -> str: ... + def __bytes__(self) -> bytes: ... + def __eq__(self, other: Any) -> bool: ... + def __le__(self, other: Any) -> bool: ... + def __lt__(self, other: Any) -> bool: ... + def __ge__(self, other: Any) -> bool: ... + def __gt__(self, other: Any) -> bool: ... + def __hash__(self) -> int: ... + def __truediv__(self, name: str) -> URL: ... + def __mod__(self, query: _Query) -> URL: ... + def is_absolute(self) -> bool: ... + def is_default_port(self) -> bool: ... + def origin(self) -> URL: ... + def relative(self) -> URL: ... + def with_scheme(self, scheme: str) -> URL: ... + def with_user(self, user: Optional[str]) -> URL: ... + def with_password(self, password: Optional[str]) -> URL: ... + def with_host(self, host: str) -> URL: ... + def with_port(self, port: Optional[int]) -> URL: ... + def with_path(self, path: str, *, encoded: bool = ...) -> URL: ... + @overload + def with_query(self, query: _Query) -> URL: ... + @overload + def with_query(self, **kwargs: _QueryVariable) -> URL: ... + @overload + def update_query(self, query: _Query) -> URL: ... + @overload + def update_query(self, **kwargs: _QueryVariable) -> URL: ... + def with_fragment(self, fragment: Optional[str]) -> URL: ... + def with_name(self, name: str) -> URL: ... + def with_suffix(self, suffix: str) -> URL: ... + def join(self, url: URL) -> URL: ... + def joinpath(self, *url: str, encoded: bool = ...) -> URL: ... + def human_repr(self) -> str: ... + # private API + @classmethod + def _normalize_path(cls, path: str) -> str: ... + +@final +class cached_property: + def __init__(self, wrapped: Any) -> None: ... + def __get__(self, inst: URL, owner: Type[URL]) -> Any: ... + def __set__(self, inst: URL, value: Any) -> None: ... + +class CacheInfo(TypedDict): + idna_encode: _CacheInfo + idna_decode: _CacheInfo + +def cache_clear() -> None: ... +def cache_info() -> CacheInfo: ... +def cache_configure( + *, idna_encode_size: Optional[int] = ..., idna_decode_size: Optional[int] = ... +) -> None: ... diff --git a/llmeval-env/lib/python3.10/site-packages/yarl/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/yarl/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..108b13a75d79eec596b7ef42bac404ec07c5539b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/yarl/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/yarl/__pycache__/_quoting.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/yarl/__pycache__/_quoting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d85a2859fe6ff2ae4378d05aa2010fb7260ecd43 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/yarl/__pycache__/_quoting.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/yarl/__pycache__/_quoting_py.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/yarl/__pycache__/_quoting_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0d5c9973fa226b08bdfff1869142d2df41cec24 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/yarl/__pycache__/_quoting_py.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/yarl/__pycache__/_url.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/yarl/__pycache__/_url.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..417d211e76e392625449f693785aea281b0aba33 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/yarl/__pycache__/_url.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/yarl/_quoting.py b/llmeval-env/lib/python3.10/site-packages/yarl/_quoting.py new file mode 100644 index 0000000000000000000000000000000000000000..8d1c705ff253f7dec76a3e2e5dce97fd9a81abcd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/yarl/_quoting.py @@ -0,0 +1,18 @@ +import os +import sys + +__all__ = ("_Quoter", "_Unquoter") + + +NO_EXTENSIONS = bool(os.environ.get("YARL_NO_EXTENSIONS")) # type: bool +if sys.implementation.name != "cpython": + NO_EXTENSIONS = True + + +if not NO_EXTENSIONS: # pragma: no branch + try: + from ._quoting_c import _Quoter, _Unquoter # type: ignore[assignment] + except ImportError: # pragma: no cover + from ._quoting_py import _Quoter, _Unquoter # type: ignore[assignment] +else: + from ._quoting_py import _Quoter, _Unquoter # type: ignore[assignment] diff --git a/llmeval-env/lib/python3.10/site-packages/yarl/_quoting_c.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/yarl/_quoting_c.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..69e72a8ab5297d7b3f13c9592ff863376ee5fd51 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/yarl/_quoting_c.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/yarl/_quoting_c.pyi b/llmeval-env/lib/python3.10/site-packages/yarl/_quoting_c.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1c8fc24ec7ecc12cdbf9b0432aac4dd5b7aa32ca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/yarl/_quoting_c.pyi @@ -0,0 +1,16 @@ +from typing import Optional + +class _Quoter: + def __init__( + self, + *, + safe: str = ..., + protected: str = ..., + qs: bool = ..., + requote: bool = ... + ) -> None: ... + def __call__(self, val: Optional[str] = ...) -> Optional[str]: ... + +class _Unquoter: + def __init__(self, *, unsafe: str = ..., qs: bool = ...) -> None: ... + def __call__(self, val: Optional[str] = ...) -> Optional[str]: ... diff --git a/llmeval-env/lib/python3.10/site-packages/yarl/_quoting_c.pyx b/llmeval-env/lib/python3.10/site-packages/yarl/_quoting_c.pyx new file mode 100644 index 0000000000000000000000000000000000000000..96f69c14e2bbc356bd16587c7d480150d842fde7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/yarl/_quoting_c.pyx @@ -0,0 +1,371 @@ +# cython: language_level=3 + +from cpython.exc cimport PyErr_NoMemory +from cpython.mem cimport PyMem_Free, PyMem_Malloc, PyMem_Realloc +from cpython.unicode cimport PyUnicode_DecodeASCII, PyUnicode_DecodeUTF8Stateful +from libc.stdint cimport uint8_t, uint64_t +from libc.string cimport memcpy, memset + +from string import ascii_letters, digits + + +cdef str GEN_DELIMS = ":/?#[]@" +cdef str SUB_DELIMS_WITHOUT_QS = "!$'()*," +cdef str SUB_DELIMS = SUB_DELIMS_WITHOUT_QS + '+?=;' +cdef str RESERVED = GEN_DELIMS + SUB_DELIMS +cdef str UNRESERVED = ascii_letters + digits + '-._~' +cdef str ALLOWED = UNRESERVED + SUB_DELIMS_WITHOUT_QS +cdef str QS = '+&=;' + +DEF BUF_SIZE = 8 * 1024 # 8KiB +cdef char BUFFER[BUF_SIZE] + +cdef inline Py_UCS4 _to_hex(uint8_t v): + if v < 10: + return (v+0x30) # ord('0') == 0x30 + else: + return (v+0x41-10) # ord('A') == 0x41 + + +cdef inline int _from_hex(Py_UCS4 v): + if '0' <= v <= '9': + return (v) - 0x30 # ord('0') == 0x30 + elif 'A' <= v <= 'F': + return (v) - 0x41 + 10 # ord('A') == 0x41 + elif 'a' <= v <= 'f': + return (v) - 0x61 + 10 # ord('a') == 0x61 + else: + return -1 + + +cdef inline int _is_lower_hex(Py_UCS4 v): + return 'a' <= v <= 'f' + + +cdef inline Py_UCS4 _restore_ch(Py_UCS4 d1, Py_UCS4 d2): + cdef int digit1 = _from_hex(d1) + if digit1 < 0: + return -1 + cdef int digit2 = _from_hex(d2) + if digit2 < 0: + return -1 + return (digit1 << 4 | digit2) + + +cdef uint8_t ALLOWED_TABLE[16] +cdef uint8_t ALLOWED_NOTQS_TABLE[16] + + +cdef inline bint bit_at(uint8_t array[], uint64_t ch): + return array[ch >> 3] & (1 << (ch & 7)) + + +cdef inline void set_bit(uint8_t array[], uint64_t ch): + array[ch >> 3] |= (1 << (ch & 7)) + + +memset(ALLOWED_TABLE, 0, sizeof(ALLOWED_TABLE)) +memset(ALLOWED_NOTQS_TABLE, 0, sizeof(ALLOWED_NOTQS_TABLE)) + +for i in range(128): + if chr(i) in ALLOWED: + set_bit(ALLOWED_TABLE, i) + set_bit(ALLOWED_NOTQS_TABLE, i) + if chr(i) in QS: + set_bit(ALLOWED_NOTQS_TABLE, i) + +# ----------------- writer --------------------------- + +cdef struct Writer: + char *buf + Py_ssize_t size + Py_ssize_t pos + bint changed + + +cdef inline void _init_writer(Writer* writer): + writer.buf = &BUFFER[0] + writer.size = BUF_SIZE + writer.pos = 0 + writer.changed = 0 + + +cdef inline void _release_writer(Writer* writer): + if writer.buf != BUFFER: + PyMem_Free(writer.buf) + + +cdef inline int _write_char(Writer* writer, Py_UCS4 ch, bint changed): + cdef char * buf + cdef Py_ssize_t size + + if writer.pos == writer.size: + # reallocate + size = writer.size + BUF_SIZE + if writer.buf == BUFFER: + buf = PyMem_Malloc(size) + if buf == NULL: + PyErr_NoMemory() + return -1 + memcpy(buf, writer.buf, writer.size) + else: + buf = PyMem_Realloc(writer.buf, size) + if buf == NULL: + PyErr_NoMemory() + return -1 + writer.buf = buf + writer.size = size + writer.buf[writer.pos] = ch + writer.pos += 1 + writer.changed |= changed + return 0 + + +cdef inline int _write_pct(Writer* writer, uint8_t ch, bint changed): + if _write_char(writer, '%', changed) < 0: + return -1 + if _write_char(writer, _to_hex(ch >> 4), changed) < 0: + return -1 + return _write_char(writer, _to_hex(ch & 0x0f), changed) + + +cdef inline int _write_utf8(Writer* writer, Py_UCS4 symbol): + cdef uint64_t utf = symbol + + if utf < 0x80: + return _write_pct(writer, utf, True) + elif utf < 0x800: + if _write_pct(writer, (0xc0 | (utf >> 6)), True) < 0: + return -1 + return _write_pct(writer, (0x80 | (utf & 0x3f)), True) + elif 0xD800 <= utf <= 0xDFFF: + # surogate pair, ignored + return 0 + elif utf < 0x10000: + if _write_pct(writer, (0xe0 | (utf >> 12)), True) < 0: + return -1 + if _write_pct(writer, (0x80 | ((utf >> 6) & 0x3f)), + True) < 0: + return -1 + return _write_pct(writer, (0x80 | (utf & 0x3f)), True) + elif utf > 0x10FFFF: + # symbol is too large + return 0 + else: + if _write_pct(writer, (0xf0 | (utf >> 18)), True) < 0: + return -1 + if _write_pct(writer, (0x80 | ((utf >> 12) & 0x3f)), + True) < 0: + return -1 + if _write_pct(writer, (0x80 | ((utf >> 6) & 0x3f)), + True) < 0: + return -1 + return _write_pct(writer, (0x80 | (utf & 0x3f)), True) + + +# --------------------- end writer -------------------------- + + +cdef class _Quoter: + cdef bint _qs + cdef bint _requote + + cdef uint8_t _safe_table[16] + cdef uint8_t _protected_table[16] + + def __init__( + self, *, str safe='', str protected='', bint qs=False, bint requote=True, + ): + cdef Py_UCS4 ch + + self._qs = qs + self._requote = requote + + if not self._qs: + memcpy(self._safe_table, + ALLOWED_NOTQS_TABLE, + sizeof(self._safe_table)) + else: + memcpy(self._safe_table, + ALLOWED_TABLE, + sizeof(self._safe_table)) + for ch in safe: + if ord(ch) > 127: + raise ValueError("Only safe symbols with ORD < 128 are allowed") + set_bit(self._safe_table, ch) + + memset(self._protected_table, 0, sizeof(self._protected_table)) + for ch in protected: + if ord(ch) > 127: + raise ValueError("Only safe symbols with ORD < 128 are allowed") + set_bit(self._safe_table, ch) + set_bit(self._protected_table, ch) + + def __call__(self, val): + cdef Writer writer + if val is None: + return None + if type(val) is not str: + if isinstance(val, str): + # derived from str + val = str(val) + else: + raise TypeError("Argument should be str") + _init_writer(&writer) + try: + return self._do_quote(val, &writer) + finally: + _release_writer(&writer) + + cdef str _do_quote(self, str val, Writer *writer): + cdef Py_UCS4 ch + cdef int changed + cdef int idx = 0 + cdef int length = len(val) + + while idx < length: + ch = val[idx] + idx += 1 + if ch == '%' and self._requote and idx <= length - 2: + ch = _restore_ch(val[idx], val[idx + 1]) + if ch != -1: + idx += 2 + if ch < 128: + if bit_at(self._protected_table, ch): + if _write_pct(writer, ch, True) < 0: + raise + continue + + if bit_at(self._safe_table, ch): + if _write_char(writer, ch, True) < 0: + raise + continue + + changed = (_is_lower_hex(val[idx - 2]) or + _is_lower_hex(val[idx - 1])) + if _write_pct(writer, ch, changed) < 0: + raise + continue + else: + ch = '%' + + if self._write(writer, ch) < 0: + raise + + if not writer.changed: + return val + else: + return PyUnicode_DecodeASCII(writer.buf, writer.pos, "strict") + + cdef inline int _write(self, Writer *writer, Py_UCS4 ch): + if self._qs: + if ch == ' ': + return _write_char(writer, '+', True) + + if ch < 128 and bit_at(self._safe_table, ch): + return _write_char(writer, ch, False) + + return _write_utf8(writer, ch) + + +cdef class _Unquoter: + cdef str _unsafe + cdef bint _qs + cdef _Quoter _quoter + cdef _Quoter _qs_quoter + + def __init__(self, *, unsafe='', qs=False): + self._unsafe = unsafe + self._qs = qs + self._quoter = _Quoter() + self._qs_quoter = _Quoter(qs=True) + + def __call__(self, val): + if val is None: + return None + if type(val) is not str: + if isinstance(val, str): + # derived from str + val = str(val) + else: + raise TypeError("Argument should be str") + return self._do_unquote(val) + + cdef str _do_unquote(self, str val): + if len(val) == 0: + return val + cdef list ret = [] + cdef char buffer[4] + cdef Py_ssize_t buflen = 0 + cdef Py_ssize_t consumed + cdef str unquoted + cdef Py_UCS4 ch = 0 + cdef Py_ssize_t idx = 0 + cdef Py_ssize_t length = len(val) + cdef Py_ssize_t start_pct + + while idx < length: + ch = val[idx] + idx += 1 + if ch == '%' and idx <= length - 2: + ch = _restore_ch(val[idx], val[idx + 1]) + if ch != -1: + idx += 2 + assert buflen < 4 + buffer[buflen] = ch + buflen += 1 + try: + unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen, + NULL, &consumed) + except UnicodeDecodeError: + start_pct = idx - buflen * 3 + buffer[0] = ch + buflen = 1 + ret.append(val[start_pct : idx - 3]) + try: + unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen, + NULL, &consumed) + except UnicodeDecodeError: + buflen = 0 + ret.append(val[idx - 3 : idx]) + continue + if not unquoted: + assert consumed == 0 + continue + assert consumed == buflen + buflen = 0 + if self._qs and unquoted in '+=&;': + ret.append(self._qs_quoter(unquoted)) + elif unquoted in self._unsafe: + ret.append(self._quoter(unquoted)) + else: + ret.append(unquoted) + continue + else: + ch = '%' + + if buflen: + start_pct = idx - 1 - buflen * 3 + ret.append(val[start_pct : idx - 1]) + buflen = 0 + + if ch == '+': + if not self._qs or ch in self._unsafe: + ret.append('+') + else: + ret.append(' ') + continue + + if ch in self._unsafe: + ret.append('%') + h = hex(ord(ch)).upper()[2:] + for ch in h: + ret.append(ch) + continue + + ret.append(ch) + + if buflen: + ret.append(val[length - buflen * 3 : length]) + + return ''.join(ret) diff --git a/llmeval-env/lib/python3.10/site-packages/yarl/_quoting_py.py b/llmeval-env/lib/python3.10/site-packages/yarl/_quoting_py.py new file mode 100644 index 0000000000000000000000000000000000000000..585a1da804027636310d5abd1ed24806771425ba --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/yarl/_quoting_py.py @@ -0,0 +1,197 @@ +import codecs +import re +from string import ascii_letters, ascii_lowercase, digits +from typing import Optional, cast + +BASCII_LOWERCASE = ascii_lowercase.encode("ascii") +BPCT_ALLOWED = {f"%{i:02X}".encode("ascii") for i in range(256)} +GEN_DELIMS = ":/?#[]@" +SUB_DELIMS_WITHOUT_QS = "!$'()*," +SUB_DELIMS = SUB_DELIMS_WITHOUT_QS + "+&=;" +RESERVED = GEN_DELIMS + SUB_DELIMS +UNRESERVED = ascii_letters + digits + "-._~" +ALLOWED = UNRESERVED + SUB_DELIMS_WITHOUT_QS + + +_IS_HEX = re.compile(b"[A-Z0-9][A-Z0-9]") +_IS_HEX_STR = re.compile("[A-Fa-f0-9][A-Fa-f0-9]") + +utf8_decoder = codecs.getincrementaldecoder("utf-8") + + +class _Quoter: + def __init__( + self, + *, + safe: str = "", + protected: str = "", + qs: bool = False, + requote: bool = True, + ) -> None: + self._safe = safe + self._protected = protected + self._qs = qs + self._requote = requote + + def __call__(self, val: Optional[str]) -> Optional[str]: + if val is None: + return None + if not isinstance(val, str): + raise TypeError("Argument should be str") + if not val: + return "" + bval = cast(str, val).encode("utf8", errors="ignore") + ret = bytearray() + pct = bytearray() + safe = self._safe + safe += ALLOWED + if not self._qs: + safe += "+&=;" + safe += self._protected + bsafe = safe.encode("ascii") + idx = 0 + while idx < len(bval): + ch = bval[idx] + idx += 1 + + if pct: + if ch in BASCII_LOWERCASE: + ch = ch - 32 # convert to uppercase + pct.append(ch) + if len(pct) == 3: # pragma: no branch # peephole optimizer + buf = pct[1:] + if not _IS_HEX.match(buf): + ret.extend(b"%25") + pct.clear() + idx -= 2 + continue + try: + unquoted = chr(int(pct[1:].decode("ascii"), base=16)) + except ValueError: + ret.extend(b"%25") + pct.clear() + idx -= 2 + continue + + if unquoted in self._protected: + ret.extend(pct) + elif unquoted in safe: + ret.append(ord(unquoted)) + else: + ret.extend(pct) + pct.clear() + + # special case, if we have only one char after "%" + elif len(pct) == 2 and idx == len(bval): + ret.extend(b"%25") + pct.clear() + idx -= 1 + + continue + + elif ch == ord("%") and self._requote: + pct.clear() + pct.append(ch) + + # special case if "%" is last char + if idx == len(bval): + ret.extend(b"%25") + + continue + + if self._qs: + if ch == ord(" "): + ret.append(ord("+")) + continue + if ch in bsafe: + ret.append(ch) + continue + + ret.extend((f"%{ch:02X}").encode("ascii")) + + ret2 = ret.decode("ascii") + if ret2 == val: + return val + return ret2 + + +class _Unquoter: + def __init__(self, *, unsafe: str = "", qs: bool = False) -> None: + self._unsafe = unsafe + self._qs = qs + self._quoter = _Quoter() + self._qs_quoter = _Quoter(qs=True) + + def __call__(self, val: Optional[str]) -> Optional[str]: + if val is None: + return None + if not isinstance(val, str): + raise TypeError("Argument should be str") + if not val: + return "" + decoder = cast(codecs.BufferedIncrementalDecoder, utf8_decoder()) + ret = [] + idx = 0 + while idx < len(val): + ch = val[idx] + idx += 1 + if ch == "%" and idx <= len(val) - 2: + pct = val[idx : idx + 2] + if _IS_HEX_STR.fullmatch(pct): + b = bytes([int(pct, base=16)]) + idx += 2 + try: + unquoted = decoder.decode(b) + except UnicodeDecodeError: + start_pct = idx - 3 - len(decoder.buffer) * 3 + ret.append(val[start_pct : idx - 3]) + decoder.reset() + try: + unquoted = decoder.decode(b) + except UnicodeDecodeError: + ret.append(val[idx - 3 : idx]) + continue + if not unquoted: + continue + if self._qs and unquoted in "+=&;": + to_add = self._qs_quoter(unquoted) + if to_add is None: # pragma: no cover + raise RuntimeError("Cannot quote None") + ret.append(to_add) + elif unquoted in self._unsafe: + to_add = self._quoter(unquoted) + if to_add is None: # pragma: no cover + raise RuntimeError("Cannot quote None") + ret.append(to_add) + else: + ret.append(unquoted) + continue + + if decoder.buffer: + start_pct = idx - 1 - len(decoder.buffer) * 3 + ret.append(val[start_pct : idx - 1]) + decoder.reset() + + if ch == "+": + if not self._qs or ch in self._unsafe: + ret.append("+") + else: + ret.append(" ") + continue + + if ch in self._unsafe: + ret.append("%") + h = hex(ord(ch)).upper()[2:] + for ch in h: + ret.append(ch) + continue + + ret.append(ch) + + if decoder.buffer: + ret.append(val[-len(decoder.buffer) * 3 :]) + + ret2 = "".join(ret) + if ret2 == val: + return val + return ret2 diff --git a/llmeval-env/lib/python3.10/site-packages/yarl/_url.py b/llmeval-env/lib/python3.10/site-packages/yarl/_url.py new file mode 100644 index 0000000000000000000000000000000000000000..9cca27ef86c69fa903da38b2c9d1e5b5337c4c60 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/yarl/_url.py @@ -0,0 +1,1200 @@ +import functools +import math +import warnings +from collections.abc import Mapping, Sequence +from contextlib import suppress +from ipaddress import ip_address +from urllib.parse import SplitResult, parse_qsl, quote, urljoin, urlsplit, urlunsplit + +import idna +from multidict import MultiDict, MultiDictProxy + +from ._quoting import _Quoter, _Unquoter + +DEFAULT_PORTS = {"http": 80, "https": 443, "ws": 80, "wss": 443} + +sentinel = object() + + +def rewrite_module(obj: object) -> object: + obj.__module__ = "yarl" + return obj + + +class cached_property: + """Use as a class method decorator. It operates almost exactly like + the Python `@property` decorator, but it puts the result of the + method it decorates into the instance dict after the first call, + effectively replacing the function it decorates with an instance + variable. It is, in Python parlance, a data descriptor. + + """ + + def __init__(self, wrapped): + self.wrapped = wrapped + try: + self.__doc__ = wrapped.__doc__ + except AttributeError: # pragma: no cover + self.__doc__ = "" + self.name = wrapped.__name__ + + def __get__(self, inst, owner, _sentinel=sentinel): + if inst is None: + return self + val = inst._cache.get(self.name, _sentinel) + if val is not _sentinel: + return val + val = self.wrapped(inst) + inst._cache[self.name] = val + return val + + def __set__(self, inst, value): + raise AttributeError("cached property is read-only") + + +def _normalize_path_segments(segments): + """Drop '.' and '..' from a sequence of str segments""" + + resolved_path = [] + + for seg in segments: + if seg == "..": + # ignore any .. segments that would otherwise cause an + # IndexError when popped from resolved_path if + # resolving for rfc3986 + with suppress(IndexError): + resolved_path.pop() + elif seg != ".": + resolved_path.append(seg) + + if segments and segments[-1] in (".", ".."): + # do some post-processing here. + # if the last segment was a relative dir, + # then we need to append the trailing '/' + resolved_path.append("") + + return resolved_path + + +@rewrite_module +class URL: + # Don't derive from str + # follow pathlib.Path design + # probably URL will not suffer from pathlib problems: + # it's intended for libraries like aiohttp, + # not to be passed into standard library functions like os.open etc. + + # URL grammar (RFC 3986) + # pct-encoded = "%" HEXDIG HEXDIG + # reserved = gen-delims / sub-delims + # gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" + # sub-delims = "!" / "$" / "&" / "'" / "(" / ")" + # / "*" / "+" / "," / ";" / "=" + # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" + # URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ] + # hier-part = "//" authority path-abempty + # / path-absolute + # / path-rootless + # / path-empty + # scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) + # authority = [ userinfo "@" ] host [ ":" port ] + # userinfo = *( unreserved / pct-encoded / sub-delims / ":" ) + # host = IP-literal / IPv4address / reg-name + # IP-literal = "[" ( IPv6address / IPvFuture ) "]" + # IPvFuture = "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" ) + # IPv6address = 6( h16 ":" ) ls32 + # / "::" 5( h16 ":" ) ls32 + # / [ h16 ] "::" 4( h16 ":" ) ls32 + # / [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32 + # / [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32 + # / [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32 + # / [ *4( h16 ":" ) h16 ] "::" ls32 + # / [ *5( h16 ":" ) h16 ] "::" h16 + # / [ *6( h16 ":" ) h16 ] "::" + # ls32 = ( h16 ":" h16 ) / IPv4address + # ; least-significant 32 bits of address + # h16 = 1*4HEXDIG + # ; 16 bits of address represented in hexadecimal + # IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet + # dec-octet = DIGIT ; 0-9 + # / %x31-39 DIGIT ; 10-99 + # / "1" 2DIGIT ; 100-199 + # / "2" %x30-34 DIGIT ; 200-249 + # / "25" %x30-35 ; 250-255 + # reg-name = *( unreserved / pct-encoded / sub-delims ) + # port = *DIGIT + # path = path-abempty ; begins with "/" or is empty + # / path-absolute ; begins with "/" but not "//" + # / path-noscheme ; begins with a non-colon segment + # / path-rootless ; begins with a segment + # / path-empty ; zero characters + # path-abempty = *( "/" segment ) + # path-absolute = "/" [ segment-nz *( "/" segment ) ] + # path-noscheme = segment-nz-nc *( "/" segment ) + # path-rootless = segment-nz *( "/" segment ) + # path-empty = 0 + # segment = *pchar + # segment-nz = 1*pchar + # segment-nz-nc = 1*( unreserved / pct-encoded / sub-delims / "@" ) + # ; non-zero-length segment without any colon ":" + # pchar = unreserved / pct-encoded / sub-delims / ":" / "@" + # query = *( pchar / "/" / "?" ) + # fragment = *( pchar / "/" / "?" ) + # URI-reference = URI / relative-ref + # relative-ref = relative-part [ "?" query ] [ "#" fragment ] + # relative-part = "//" authority path-abempty + # / path-absolute + # / path-noscheme + # / path-empty + # absolute-URI = scheme ":" hier-part [ "?" query ] + __slots__ = ("_cache", "_val") + + _QUOTER = _Quoter(requote=False) + _REQUOTER = _Quoter() + _PATH_QUOTER = _Quoter(safe="@:", protected="/+", requote=False) + _PATH_REQUOTER = _Quoter(safe="@:", protected="/+") + _QUERY_QUOTER = _Quoter(safe="?/:@", protected="=+&;", qs=True, requote=False) + _QUERY_REQUOTER = _Quoter(safe="?/:@", protected="=+&;", qs=True) + _QUERY_PART_QUOTER = _Quoter(safe="?/:@", qs=True, requote=False) + _FRAGMENT_QUOTER = _Quoter(safe="?/:@", requote=False) + _FRAGMENT_REQUOTER = _Quoter(safe="?/:@") + + _UNQUOTER = _Unquoter() + _PATH_UNQUOTER = _Unquoter(unsafe="+") + _QS_UNQUOTER = _Unquoter(qs=True) + + def __new__(cls, val="", *, encoded=False, strict=None): + if strict is not None: # pragma: no cover + warnings.warn("strict parameter is ignored") + if type(val) is cls: + return val + if type(val) is str: + val = urlsplit(val) + elif type(val) is SplitResult: + if not encoded: + raise ValueError("Cannot apply decoding to SplitResult") + elif isinstance(val, str): + val = urlsplit(str(val)) + else: + raise TypeError("Constructor parameter should be str") + + if not encoded: + if not val[1]: # netloc + netloc = "" + host = "" + else: + host = val.hostname + if host is None: + raise ValueError("Invalid URL: host is required for absolute urls") + + try: + port = val.port + except ValueError as e: + raise ValueError( + "Invalid URL: port can't be converted to integer" + ) from e + + netloc = cls._make_netloc( + val.username, val.password, host, port, encode=True, requote=True + ) + path = cls._PATH_REQUOTER(val[2]) + if netloc: + path = cls._normalize_path(path) + + cls._validate_authority_uri_abs_path(host=host, path=path) + query = cls._QUERY_REQUOTER(val[3]) + fragment = cls._FRAGMENT_REQUOTER(val[4]) + val = SplitResult(val[0], netloc, path, query, fragment) + + self = object.__new__(cls) + self._val = val + self._cache = {} + return self + + @classmethod + def build( + cls, + *, + scheme="", + authority="", + user=None, + password=None, + host="", + port=None, + path="", + query=None, + query_string="", + fragment="", + encoded=False, + ): + """Creates and returns a new URL""" + + if authority and (user or password or host or port): + raise ValueError( + 'Can\'t mix "authority" with "user", "password", "host" or "port".' + ) + if not isinstance(port, (int, type(None))): + raise TypeError("The port is required to be int.") + if port and not host: + raise ValueError('Can\'t build URL with "port" but without "host".') + if query and query_string: + raise ValueError('Only one of "query" or "query_string" should be passed') + if ( + scheme is None + or authority is None + or host is None + or path is None + or query_string is None + or fragment is None + ): + raise TypeError( + 'NoneType is illegal for "scheme", "authority", "host", "path", ' + '"query_string", and "fragment" args, use empty string instead.' + ) + + if authority: + if encoded: + netloc = authority + else: + tmp = SplitResult("", authority, "", "", "") + netloc = cls._make_netloc( + tmp.username, tmp.password, tmp.hostname, tmp.port, encode=True + ) + elif not user and not password and not host and not port: + netloc = "" + else: + netloc = cls._make_netloc( + user, password, host, port, encode=not encoded, encode_host=not encoded + ) + if not encoded: + path = cls._PATH_QUOTER(path) + if netloc: + path = cls._normalize_path(path) + + cls._validate_authority_uri_abs_path(host=host, path=path) + query_string = cls._QUERY_QUOTER(query_string) + fragment = cls._FRAGMENT_QUOTER(fragment) + + url = cls( + SplitResult(scheme, netloc, path, query_string, fragment), encoded=True + ) + + if query: + return url.with_query(query) + else: + return url + + def __init_subclass__(cls): + raise TypeError(f"Inheriting a class {cls!r} from URL is forbidden") + + def __str__(self): + val = self._val + if not val.path and self.is_absolute() and (val.query or val.fragment): + val = val._replace(path="/") + return urlunsplit(val) + + def __repr__(self): + return f"{self.__class__.__name__}('{str(self)}')" + + def __bytes__(self): + return str(self).encode("ascii") + + def __eq__(self, other): + if not type(other) is URL: + return NotImplemented + + val1 = self._val + if not val1.path and self.is_absolute(): + val1 = val1._replace(path="/") + + val2 = other._val + if not val2.path and other.is_absolute(): + val2 = val2._replace(path="/") + + return val1 == val2 + + def __hash__(self): + ret = self._cache.get("hash") + if ret is None: + val = self._val + if not val.path and self.is_absolute(): + val = val._replace(path="/") + ret = self._cache["hash"] = hash(val) + return ret + + def __le__(self, other): + if not type(other) is URL: + return NotImplemented + return self._val <= other._val + + def __lt__(self, other): + if not type(other) is URL: + return NotImplemented + return self._val < other._val + + def __ge__(self, other): + if not type(other) is URL: + return NotImplemented + return self._val >= other._val + + def __gt__(self, other): + if not type(other) is URL: + return NotImplemented + return self._val > other._val + + def __truediv__(self, name): + if not isinstance(name, str): + return NotImplemented + return self._make_child((str(name),)) + + def __mod__(self, query): + return self.update_query(query) + + def __bool__(self) -> bool: + return bool( + self._val.netloc or self._val.path or self._val.query or self._val.fragment + ) + + def __getstate__(self): + return (self._val,) + + def __setstate__(self, state): + if state[0] is None and isinstance(state[1], dict): + # default style pickle + self._val = state[1]["_val"] + else: + self._val, *unused = state + self._cache = {} + + def is_absolute(self): + """A check for absolute URLs. + + Return True for absolute ones (having scheme or starting + with //), False otherwise. + + """ + return self.raw_host is not None + + def is_default_port(self): + """A check for default port. + + Return True if port is default for specified scheme, + e.g. 'http://python.org' or 'http://python.org:80', False + otherwise. + + """ + if self.port is None: + return False + default = DEFAULT_PORTS.get(self.scheme) + if default is None: + return False + return self.port == default + + def origin(self): + """Return an URL with scheme, host and port parts only. + + user, password, path, query and fragment are removed. + + """ + # TODO: add a keyword-only option for keeping user/pass maybe? + if not self.is_absolute(): + raise ValueError("URL should be absolute") + if not self._val.scheme: + raise ValueError("URL should have scheme") + v = self._val + netloc = self._make_netloc(None, None, v.hostname, v.port) + val = v._replace(netloc=netloc, path="", query="", fragment="") + return URL(val, encoded=True) + + def relative(self): + """Return a relative part of the URL. + + scheme, user, password, host and port are removed. + + """ + if not self.is_absolute(): + raise ValueError("URL should be absolute") + val = self._val._replace(scheme="", netloc="") + return URL(val, encoded=True) + + @property + def scheme(self): + """Scheme for absolute URLs. + + Empty string for relative URLs or URLs starting with // + + """ + return self._val.scheme + + @property + def raw_authority(self): + """Encoded authority part of URL. + + Empty string for relative URLs. + + """ + return self._val.netloc + + @cached_property + def authority(self): + """Decoded authority part of URL. + + Empty string for relative URLs. + + """ + return self._make_netloc( + self.user, self.password, self.host, self.port, encode_host=False + ) + + @property + def raw_user(self): + """Encoded user part of URL. + + None if user is missing. + + """ + # not .username + ret = self._val.username + if not ret: + return None + return ret + + @cached_property + def user(self): + """Decoded user part of URL. + + None if user is missing. + + """ + return self._UNQUOTER(self.raw_user) + + @property + def raw_password(self): + """Encoded password part of URL. + + None if password is missing. + + """ + return self._val.password + + @cached_property + def password(self): + """Decoded password part of URL. + + None if password is missing. + + """ + return self._UNQUOTER(self.raw_password) + + @property + def raw_host(self): + """Encoded host part of URL. + + None for relative URLs. + + """ + # Use host instead of hostname for sake of shortness + # May add .hostname prop later + return self._val.hostname + + @cached_property + def host(self): + """Decoded host part of URL. + + None for relative URLs. + + """ + raw = self.raw_host + if raw is None: + return None + if "%" in raw: + # Hack for scoped IPv6 addresses like + # fe80::2%Перевірка + # presence of '%' sign means only IPv6 address, so idna is useless. + return raw + return _idna_decode(raw) + + @property + def port(self): + """Port part of URL, with scheme-based fallback. + + None for relative URLs or URLs without explicit port and + scheme without default port substitution. + + """ + return self._val.port or DEFAULT_PORTS.get(self._val.scheme) + + @property + def explicit_port(self): + """Port part of URL, without scheme-based fallback. + + None for relative URLs or URLs without explicit port. + + """ + return self._val.port + + @property + def raw_path(self): + """Encoded path of URL. + + / for absolute URLs without path part. + + """ + ret = self._val.path + if not ret and self.is_absolute(): + ret = "/" + return ret + + @cached_property + def path(self): + """Decoded path of URL. + + / for absolute URLs without path part. + + """ + return self._PATH_UNQUOTER(self.raw_path) + + @cached_property + def query(self): + """A MultiDictProxy representing parsed query parameters in decoded + representation. + + Empty value if URL has no query part. + + """ + ret = MultiDict(parse_qsl(self.raw_query_string, keep_blank_values=True)) + return MultiDictProxy(ret) + + @property + def raw_query_string(self): + """Encoded query part of URL. + + Empty string if query is missing. + + """ + return self._val.query + + @cached_property + def query_string(self): + """Decoded query part of URL. + + Empty string if query is missing. + + """ + return self._QS_UNQUOTER(self.raw_query_string) + + @cached_property + def path_qs(self): + """Decoded path of URL with query.""" + if not self.query_string: + return self.path + return f"{self.path}?{self.query_string}" + + @cached_property + def raw_path_qs(self): + """Encoded path of URL with query.""" + if not self.raw_query_string: + return self.raw_path + return f"{self.raw_path}?{self.raw_query_string}" + + @property + def raw_fragment(self): + """Encoded fragment part of URL. + + Empty string if fragment is missing. + + """ + return self._val.fragment + + @cached_property + def fragment(self): + """Decoded fragment part of URL. + + Empty string if fragment is missing. + + """ + return self._UNQUOTER(self.raw_fragment) + + @cached_property + def raw_parts(self): + """A tuple containing encoded *path* parts. + + ('/',) for absolute URLs if *path* is missing. + + """ + path = self._val.path + if self.is_absolute(): + if not path: + parts = ["/"] + else: + parts = ["/"] + path[1:].split("/") + else: + if path.startswith("/"): + parts = ["/"] + path[1:].split("/") + else: + parts = path.split("/") + return tuple(parts) + + @cached_property + def parts(self): + """A tuple containing decoded *path* parts. + + ('/',) for absolute URLs if *path* is missing. + + """ + return tuple(self._UNQUOTER(part) for part in self.raw_parts) + + @cached_property + def parent(self): + """A new URL with last part of path removed and cleaned up query and + fragment. + + """ + path = self.raw_path + if not path or path == "/": + if self.raw_fragment or self.raw_query_string: + return URL(self._val._replace(query="", fragment=""), encoded=True) + return self + parts = path.split("/") + val = self._val._replace(path="/".join(parts[:-1]), query="", fragment="") + return URL(val, encoded=True) + + @cached_property + def raw_name(self): + """The last part of raw_parts.""" + parts = self.raw_parts + if self.is_absolute(): + parts = parts[1:] + if not parts: + return "" + else: + return parts[-1] + else: + return parts[-1] + + @cached_property + def name(self): + """The last part of parts.""" + return self._UNQUOTER(self.raw_name) + + @cached_property + def raw_suffix(self): + name = self.raw_name + i = name.rfind(".") + if 0 < i < len(name) - 1: + return name[i:] + else: + return "" + + @cached_property + def suffix(self): + return self._UNQUOTER(self.raw_suffix) + + @cached_property + def raw_suffixes(self): + name = self.raw_name + if name.endswith("."): + return () + name = name.lstrip(".") + return tuple("." + suffix for suffix in name.split(".")[1:]) + + @cached_property + def suffixes(self): + return tuple(self._UNQUOTER(suffix) for suffix in self.raw_suffixes) + + @staticmethod + def _validate_authority_uri_abs_path(host, path): + """Ensure that path in URL with authority starts with a leading slash. + + Raise ValueError if not. + """ + if len(host) > 0 and len(path) > 0 and not path.startswith("/"): + raise ValueError( + "Path in a URL with authority should start with a slash ('/') if set" + ) + + def _make_child(self, segments, encoded=False): + """add segments to self._val.path, accounting for absolute vs relative paths""" + # keep the trailing slash if the last segment ends with / + parsed = [""] if segments and segments[-1][-1:] == "/" else [] + for seg in reversed(segments): + if not seg: + continue + if seg[0] == "/": + raise ValueError( + f"Appending path {seg!r} starting from slash is forbidden" + ) + seg = seg if encoded else self._PATH_QUOTER(seg) + if "/" in seg: + parsed += ( + sub for sub in reversed(seg.split("/")) if sub and sub != "." + ) + elif seg != ".": + parsed.append(seg) + parsed.reverse() + old_path = self._val.path + if old_path: + parsed = [*old_path.rstrip("/").split("/"), *parsed] + if self.is_absolute(): + parsed = _normalize_path_segments(parsed) + if parsed and parsed[0] != "": + # inject a leading slash when adding a path to an absolute URL + # where there was none before + parsed = ["", *parsed] + new_path = "/".join(parsed) + return URL( + self._val._replace(path=new_path, query="", fragment=""), encoded=True + ) + + @classmethod + def _normalize_path(cls, path): + # Drop '.' and '..' from str path + + prefix = "" + if path.startswith("/"): + # preserve the "/" root element of absolute paths, copying it to the + # normalised output as per sections 5.2.4 and 6.2.2.3 of rfc3986. + prefix = "/" + path = path[1:] + + segments = path.split("/") + return prefix + "/".join(_normalize_path_segments(segments)) + + @classmethod + def _encode_host(cls, host, human=False): + try: + ip, sep, zone = host.partition("%") + ip = ip_address(ip) + except ValueError: + host = host.lower() + # IDNA encoding is slow, + # skip it for ASCII-only strings + # Don't move the check into _idna_encode() helper + # to reduce the cache size + if human or host.isascii(): + return host + host = _idna_encode(host) + else: + host = ip.compressed + if sep: + host += "%" + zone + if ip.version == 6: + host = "[" + host + "]" + return host + + @classmethod + def _make_netloc( + cls, user, password, host, port, encode=False, encode_host=True, requote=False + ): + quoter = cls._REQUOTER if requote else cls._QUOTER + if encode_host: + ret = cls._encode_host(host) + else: + ret = host + if port is not None: + ret = ret + ":" + str(port) + if password is not None: + if not user: + user = "" + else: + if encode: + user = quoter(user) + if encode: + password = quoter(password) + user = user + ":" + password + elif user and encode: + user = quoter(user) + if user: + ret = user + "@" + ret + return ret + + def with_scheme(self, scheme): + """Return a new URL with scheme replaced.""" + # N.B. doesn't cleanup query/fragment + if not isinstance(scheme, str): + raise TypeError("Invalid scheme type") + if not self.is_absolute(): + raise ValueError("scheme replacement is not allowed for relative URLs") + return URL(self._val._replace(scheme=scheme.lower()), encoded=True) + + def with_user(self, user): + """Return a new URL with user replaced. + + Autoencode user if needed. + + Clear user/password if user is None. + + """ + # N.B. doesn't cleanup query/fragment + val = self._val + if user is None: + password = None + elif isinstance(user, str): + user = self._QUOTER(user) + password = val.password + else: + raise TypeError("Invalid user type") + if not self.is_absolute(): + raise ValueError("user replacement is not allowed for relative URLs") + return URL( + self._val._replace( + netloc=self._make_netloc(user, password, val.hostname, val.port) + ), + encoded=True, + ) + + def with_password(self, password): + """Return a new URL with password replaced. + + Autoencode password if needed. + + Clear password if argument is None. + + """ + # N.B. doesn't cleanup query/fragment + if password is None: + pass + elif isinstance(password, str): + password = self._QUOTER(password) + else: + raise TypeError("Invalid password type") + if not self.is_absolute(): + raise ValueError("password replacement is not allowed for relative URLs") + val = self._val + return URL( + self._val._replace( + netloc=self._make_netloc(val.username, password, val.hostname, val.port) + ), + encoded=True, + ) + + def with_host(self, host): + """Return a new URL with host replaced. + + Autoencode host if needed. + + Changing host for relative URLs is not allowed, use .join() + instead. + + """ + # N.B. doesn't cleanup query/fragment + if not isinstance(host, str): + raise TypeError("Invalid host type") + if not self.is_absolute(): + raise ValueError("host replacement is not allowed for relative URLs") + if not host: + raise ValueError("host removing is not allowed") + val = self._val + return URL( + self._val._replace( + netloc=self._make_netloc(val.username, val.password, host, val.port) + ), + encoded=True, + ) + + def with_port(self, port): + """Return a new URL with port replaced. + + Clear port to default if None is passed. + + """ + # N.B. doesn't cleanup query/fragment + if port is not None: + if isinstance(port, bool) or not isinstance(port, int): + raise TypeError(f"port should be int or None, got {type(port)}") + if port < 0 or port > 65535: + raise ValueError(f"port must be between 0 and 65535, got {port}") + if not self.is_absolute(): + raise ValueError("port replacement is not allowed for relative URLs") + val = self._val + return URL( + self._val._replace( + netloc=self._make_netloc(val.username, val.password, val.hostname, port) + ), + encoded=True, + ) + + def with_path(self, path, *, encoded=False): + """Return a new URL with path replaced.""" + if not encoded: + path = self._PATH_QUOTER(path) + if self.is_absolute(): + path = self._normalize_path(path) + if len(path) > 0 and path[0] != "/": + path = "/" + path + return URL(self._val._replace(path=path, query="", fragment=""), encoded=True) + + @classmethod + def _query_seq_pairs(cls, quoter, pairs): + for key, val in pairs: + if isinstance(val, (list, tuple)): + for v in val: + yield quoter(key) + "=" + quoter(cls._query_var(v)) + else: + yield quoter(key) + "=" + quoter(cls._query_var(val)) + + @staticmethod + def _query_var(v): + cls = type(v) + if issubclass(cls, str): + return v + if issubclass(cls, float): + if math.isinf(v): + raise ValueError("float('inf') is not supported") + if math.isnan(v): + raise ValueError("float('nan') is not supported") + return str(float(v)) + if issubclass(cls, int) and cls is not bool: + return str(int(v)) + raise TypeError( + "Invalid variable type: value " + "should be str, int or float, got {!r} " + "of type {}".format(v, cls) + ) + + def _get_str_query(self, *args, **kwargs): + if kwargs: + if len(args) > 0: + raise ValueError( + "Either kwargs or single query parameter must be present" + ) + query = kwargs + elif len(args) == 1: + query = args[0] + else: + raise ValueError("Either kwargs or single query parameter must be present") + + if query is None: + query = None + elif isinstance(query, Mapping): + quoter = self._QUERY_PART_QUOTER + query = "&".join(self._query_seq_pairs(quoter, query.items())) + elif isinstance(query, str): + query = self._QUERY_QUOTER(query) + elif isinstance(query, (bytes, bytearray, memoryview)): + raise TypeError( + "Invalid query type: bytes, bytearray and memoryview are forbidden" + ) + elif isinstance(query, Sequence): + quoter = self._QUERY_PART_QUOTER + # We don't expect sequence values if we're given a list of pairs + # already; only mappings like builtin `dict` which can't have the + # same key pointing to multiple values are allowed to use + # `_query_seq_pairs`. + query = "&".join( + quoter(k) + "=" + quoter(self._query_var(v)) for k, v in query + ) + else: + raise TypeError( + "Invalid query type: only str, mapping or " + "sequence of (key, value) pairs is allowed" + ) + + return query + + def with_query(self, *args, **kwargs): + """Return a new URL with query part replaced. + + Accepts any Mapping (e.g. dict, multidict.MultiDict instances) + or str, autoencode the argument if needed. + + A sequence of (key, value) pairs is supported as well. + + It also can take an arbitrary number of keyword arguments. + + Clear query if None is passed. + + """ + # N.B. doesn't cleanup query/fragment + + new_query = self._get_str_query(*args, **kwargs) or "" + return URL( + self._val._replace(path=self._val.path, query=new_query), encoded=True + ) + + def update_query(self, *args, **kwargs): + """Return a new URL with query part updated.""" + s = self._get_str_query(*args, **kwargs) + query = None + if s is not None: + new_query = MultiDict(parse_qsl(s, keep_blank_values=True)) + query = MultiDict(self.query) + query.update(new_query) + + return URL( + self._val._replace(query=self._get_str_query(query) or ""), encoded=True + ) + + def with_fragment(self, fragment): + """Return a new URL with fragment replaced. + + Autoencode fragment if needed. + + Clear fragment to default if None is passed. + + """ + # N.B. doesn't cleanup query/fragment + if fragment is None: + raw_fragment = "" + elif not isinstance(fragment, str): + raise TypeError("Invalid fragment type") + else: + raw_fragment = self._FRAGMENT_QUOTER(fragment) + if self.raw_fragment == raw_fragment: + return self + return URL(self._val._replace(fragment=raw_fragment), encoded=True) + + def with_name(self, name): + """Return a new URL with name (last part of path) replaced. + + Query and fragment parts are cleaned up. + + Name is encoded if needed. + + """ + # N.B. DOES cleanup query/fragment + if not isinstance(name, str): + raise TypeError("Invalid name type") + if "/" in name: + raise ValueError("Slash in name is not allowed") + name = self._PATH_QUOTER(name) + if name in (".", ".."): + raise ValueError(". and .. values are forbidden") + parts = list(self.raw_parts) + if self.is_absolute(): + if len(parts) == 1: + parts.append(name) + else: + parts[-1] = name + parts[0] = "" # replace leading '/' + else: + parts[-1] = name + if parts[0] == "/": + parts[0] = "" # replace leading '/' + return URL( + self._val._replace(path="/".join(parts), query="", fragment=""), + encoded=True, + ) + + def with_suffix(self, suffix): + """Return a new URL with suffix (file extension of name) replaced. + + Query and fragment parts are cleaned up. + + suffix is encoded if needed. + """ + if not isinstance(suffix, str): + raise TypeError("Invalid suffix type") + if suffix and not suffix.startswith(".") or suffix == ".": + raise ValueError(f"Invalid suffix {suffix!r}") + name = self.raw_name + if not name: + raise ValueError(f"{self!r} has an empty name") + old_suffix = self.raw_suffix + if not old_suffix: + name = name + suffix + else: + name = name[: -len(old_suffix)] + suffix + return self.with_name(name) + + def join(self, url): + """Join URLs + + Construct a full (“absolute”) URL by combining a “base URL” + (self) with another URL (url). + + Informally, this uses components of the base URL, in + particular the addressing scheme, the network location and + (part of) the path, to provide missing components in the + relative URL. + + """ + # See docs for urllib.parse.urljoin + if not isinstance(url, URL): + raise TypeError("url should be URL") + return URL(urljoin(str(self), str(url)), encoded=True) + + def joinpath(self, *other, encoded=False): + """Return a new URL with the elements in other appended to the path.""" + return self._make_child(other, encoded=encoded) + + def human_repr(self): + """Return decoded human readable string for URL representation.""" + user = _human_quote(self.user, "#/:?@[]") + password = _human_quote(self.password, "#/:?@[]") + host = self.host + if host: + host = self._encode_host(self.host, human=True) + path = _human_quote(self.path, "#?") + query_string = "&".join( + "{}={}".format(_human_quote(k, "#&+;="), _human_quote(v, "#&+;=")) + for k, v in self.query.items() + ) + fragment = _human_quote(self.fragment, "") + return urlunsplit( + SplitResult( + self.scheme, + self._make_netloc( + user, + password, + host, + self._val.port, + encode_host=False, + ), + path, + query_string, + fragment, + ) + ) + + +def _human_quote(s, unsafe): + if not s: + return s + for c in "%" + unsafe: + if c in s: + s = s.replace(c, f"%{ord(c):02X}") + if s.isprintable(): + return s + return "".join(c if c.isprintable() else quote(c) for c in s) + + +_MAXCACHE = 256 + + +@functools.lru_cache(_MAXCACHE) +def _idna_decode(raw): + try: + return idna.decode(raw.encode("ascii")) + except UnicodeError: # e.g. '::1' + return raw.encode("ascii").decode("idna") + + +@functools.lru_cache(_MAXCACHE) +def _idna_encode(host): + try: + return idna.encode(host, uts46=True).decode("ascii") + except UnicodeError: + return host.encode("idna").decode("ascii") + + +@rewrite_module +def cache_clear(): + _idna_decode.cache_clear() + _idna_encode.cache_clear() + + +@rewrite_module +def cache_info(): + return { + "idna_encode": _idna_encode.cache_info(), + "idna_decode": _idna_decode.cache_info(), + } + + +@rewrite_module +def cache_configure(*, idna_encode_size=_MAXCACHE, idna_decode_size=_MAXCACHE): + global _idna_decode, _idna_encode + + _idna_encode = functools.lru_cache(idna_encode_size)(_idna_encode.__wrapped__) + _idna_decode = functools.lru_cache(idna_decode_size)(_idna_decode.__wrapped__) diff --git a/llmeval-env/lib/python3.10/site-packages/yarl/py.typed b/llmeval-env/lib/python3.10/site-packages/yarl/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..dcf2c804da5e19d617a03a6c68aa128d1d1f89a0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/yarl/py.typed @@ -0,0 +1 @@ +# Placeholder