diff --git a/env-llmeval/lib/python3.10/site-packages/charset_normalizer/__init__.py b/env-llmeval/lib/python3.10/site-packages/charset_normalizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..55991fc38062b9c800805437ee49b0cf42b98103 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/charset_normalizer/__init__.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +""" +Charset-Normalizer +~~~~~~~~~~~~~~ +The Real First Universal Charset Detector. +A library that helps you read text from an unknown charset encoding. +Motivated by chardet, This package is trying to resolve the issue by taking a new approach. +All IANA character set names for which the Python core library provides codecs are supported. + +Basic usage: + >>> from charset_normalizer import from_bytes + >>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8')) + >>> best_guess = results.best() + >>> str(best_guess) + 'Bсеки човек има право на образование. Oбразованието!' + +Others methods and usages are available - see the full documentation +at . +:copyright: (c) 2021 by Ahmed TAHRI +:license: MIT, see LICENSE for more details. +""" +import logging + +from .api import from_bytes, from_fp, from_path, is_binary +from .legacy import detect +from .models import CharsetMatch, CharsetMatches +from .utils import set_logging_handler +from .version import VERSION, __version__ + +__all__ = ( + "from_fp", + "from_path", + "from_bytes", + "is_binary", + "detect", + "CharsetMatch", + "CharsetMatches", + "__version__", + "VERSION", + "set_logging_handler", +) + +# Attach a NullHandler to the top level logger by default +# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library + +logging.getLogger("charset_normalizer").addHandler(logging.NullHandler()) diff --git a/env-llmeval/lib/python3.10/site-packages/charset_normalizer/cd.py b/env-llmeval/lib/python3.10/site-packages/charset_normalizer/cd.py new file mode 100644 index 0000000000000000000000000000000000000000..4ea6760c45bce5773bfe4b46d7b3c07c2c139d49 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/charset_normalizer/cd.py @@ -0,0 +1,395 @@ +import importlib +from codecs import IncrementalDecoder +from collections import Counter +from functools import lru_cache +from typing import Counter as TypeCounter, Dict, List, Optional, Tuple + +from .constant import ( + FREQUENCIES, + KO_NAMES, + LANGUAGE_SUPPORTED_COUNT, + TOO_SMALL_SEQUENCE, + ZH_NAMES, +) +from .md import is_suspiciously_successive_range +from .models import CoherenceMatches +from .utils import ( + is_accentuated, + is_latin, + is_multi_byte_encoding, + is_unicode_range_secondary, + unicode_range, +) + + +def encoding_unicode_range(iana_name: str) -> List[str]: + """ + Return associated unicode ranges in a single byte code page. + """ + if is_multi_byte_encoding(iana_name): + raise IOError("Function not supported on multi-byte code page") + + decoder = importlib.import_module( + "encodings.{}".format(iana_name) + ).IncrementalDecoder + + p: IncrementalDecoder = decoder(errors="ignore") + seen_ranges: Dict[str, int] = {} + character_count: int = 0 + + for i in range(0x40, 0xFF): + chunk: str = p.decode(bytes([i])) + + if chunk: + character_range: Optional[str] = unicode_range(chunk) + + if character_range is None: + continue + + if is_unicode_range_secondary(character_range) is False: + if character_range not in seen_ranges: + seen_ranges[character_range] = 0 + seen_ranges[character_range] += 1 + character_count += 1 + + return sorted( + [ + character_range + for character_range in seen_ranges + if seen_ranges[character_range] / character_count >= 0.15 + ] + ) + + +def unicode_range_languages(primary_range: str) -> List[str]: + """ + Return inferred languages used with a unicode range. + """ + languages: List[str] = [] + + for language, characters in FREQUENCIES.items(): + for character in characters: + if unicode_range(character) == primary_range: + languages.append(language) + break + + return languages + + +@lru_cache() +def encoding_languages(iana_name: str) -> List[str]: + """ + Single-byte encoding language association. Some code page are heavily linked to particular language(s). + This function does the correspondence. + """ + unicode_ranges: List[str] = encoding_unicode_range(iana_name) + primary_range: Optional[str] = None + + for specified_range in unicode_ranges: + if "Latin" not in specified_range: + primary_range = specified_range + break + + if primary_range is None: + return ["Latin Based"] + + return unicode_range_languages(primary_range) + + +@lru_cache() +def mb_encoding_languages(iana_name: str) -> List[str]: + """ + Multi-byte encoding language association. Some code page are heavily linked to particular language(s). + This function does the correspondence. + """ + if ( + iana_name.startswith("shift_") + or iana_name.startswith("iso2022_jp") + or iana_name.startswith("euc_j") + or iana_name == "cp932" + ): + return ["Japanese"] + if iana_name.startswith("gb") or iana_name in ZH_NAMES: + return ["Chinese"] + if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: + return ["Korean"] + + return [] + + +@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) +def get_target_features(language: str) -> Tuple[bool, bool]: + """ + Determine main aspects from a supported language if it contains accents and if is pure Latin. + """ + target_have_accents: bool = False + target_pure_latin: bool = True + + for character in FREQUENCIES[language]: + if not target_have_accents and is_accentuated(character): + target_have_accents = True + if target_pure_latin and is_latin(character) is False: + target_pure_latin = False + + return target_have_accents, target_pure_latin + + +def alphabet_languages( + characters: List[str], ignore_non_latin: bool = False +) -> List[str]: + """ + Return associated languages associated to given characters. + """ + languages: List[Tuple[str, float]] = [] + + source_have_accents = any(is_accentuated(character) for character in characters) + + for language, language_characters in FREQUENCIES.items(): + target_have_accents, target_pure_latin = get_target_features(language) + + if ignore_non_latin and target_pure_latin is False: + continue + + if target_have_accents is False and source_have_accents: + continue + + character_count: int = len(language_characters) + + character_match_count: int = len( + [c for c in language_characters if c in characters] + ) + + ratio: float = character_match_count / character_count + + if ratio >= 0.2: + languages.append((language, ratio)) + + languages = sorted(languages, key=lambda x: x[1], reverse=True) + + return [compatible_language[0] for compatible_language in languages] + + +def characters_popularity_compare( + language: str, ordered_characters: List[str] +) -> float: + """ + Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. + The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). + Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) + """ + if language not in FREQUENCIES: + raise ValueError("{} not available".format(language)) + + character_approved_count: int = 0 + FREQUENCIES_language_set = set(FREQUENCIES[language]) + + ordered_characters_count: int = len(ordered_characters) + target_language_characters_count: int = len(FREQUENCIES[language]) + + large_alphabet: bool = target_language_characters_count > 26 + + for character, character_rank in zip( + ordered_characters, range(0, ordered_characters_count) + ): + if character not in FREQUENCIES_language_set: + continue + + character_rank_in_language: int = FREQUENCIES[language].index(character) + expected_projection_ratio: float = ( + target_language_characters_count / ordered_characters_count + ) + character_rank_projection: int = int(character_rank * expected_projection_ratio) + + if ( + large_alphabet is False + and abs(character_rank_projection - character_rank_in_language) > 4 + ): + continue + + if ( + large_alphabet is True + and abs(character_rank_projection - character_rank_in_language) + < target_language_characters_count / 3 + ): + character_approved_count += 1 + continue + + characters_before_source: List[str] = FREQUENCIES[language][ + 0:character_rank_in_language + ] + characters_after_source: List[str] = FREQUENCIES[language][ + character_rank_in_language: + ] + characters_before: List[str] = ordered_characters[0:character_rank] + characters_after: List[str] = ordered_characters[character_rank:] + + before_match_count: int = len( + set(characters_before) & set(characters_before_source) + ) + + after_match_count: int = len( + set(characters_after) & set(characters_after_source) + ) + + if len(characters_before_source) == 0 and before_match_count <= 4: + character_approved_count += 1 + continue + + if len(characters_after_source) == 0 and after_match_count <= 4: + character_approved_count += 1 + continue + + if ( + before_match_count / len(characters_before_source) >= 0.4 + or after_match_count / len(characters_after_source) >= 0.4 + ): + character_approved_count += 1 + continue + + return character_approved_count / len(ordered_characters) + + +def alpha_unicode_split(decoded_sequence: str) -> List[str]: + """ + Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. + Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; + One containing the latin letters and the other hebrew. + """ + layers: Dict[str, str] = {} + + for character in decoded_sequence: + if character.isalpha() is False: + continue + + character_range: Optional[str] = unicode_range(character) + + if character_range is None: + continue + + layer_target_range: Optional[str] = None + + for discovered_range in layers: + if ( + is_suspiciously_successive_range(discovered_range, character_range) + is False + ): + layer_target_range = discovered_range + break + + if layer_target_range is None: + layer_target_range = character_range + + if layer_target_range not in layers: + layers[layer_target_range] = character.lower() + continue + + layers[layer_target_range] += character.lower() + + return list(layers.values()) + + +def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches: + """ + This function merge results previously given by the function coherence_ratio. + The return type is the same as coherence_ratio. + """ + per_language_ratios: Dict[str, List[float]] = {} + for result in results: + for sub_result in result: + language, ratio = sub_result + if language not in per_language_ratios: + per_language_ratios[language] = [ratio] + continue + per_language_ratios[language].append(ratio) + + merge = [ + ( + language, + round( + sum(per_language_ratios[language]) / len(per_language_ratios[language]), + 4, + ), + ) + for language in per_language_ratios + ] + + return sorted(merge, key=lambda x: x[1], reverse=True) + + +def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches: + """ + We shall NOT return "English—" in CoherenceMatches because it is an alternative + of "English". This function only keeps the best match and remove the em-dash in it. + """ + index_results: Dict[str, List[float]] = dict() + + for result in results: + language, ratio = result + no_em_name: str = language.replace("—", "") + + if no_em_name not in index_results: + index_results[no_em_name] = [] + + index_results[no_em_name].append(ratio) + + if any(len(index_results[e]) > 1 for e in index_results): + filtered_results: CoherenceMatches = [] + + for language in index_results: + filtered_results.append((language, max(index_results[language]))) + + return filtered_results + + return results + + +@lru_cache(maxsize=2048) +def coherence_ratio( + decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None +) -> CoherenceMatches: + """ + Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers. + A layer = Character extraction by alphabets/ranges. + """ + + results: List[Tuple[str, float]] = [] + ignore_non_latin: bool = False + + sufficient_match_count: int = 0 + + lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else [] + if "Latin Based" in lg_inclusion_list: + ignore_non_latin = True + lg_inclusion_list.remove("Latin Based") + + for layer in alpha_unicode_split(decoded_sequence): + sequence_frequencies: TypeCounter[str] = Counter(layer) + most_common = sequence_frequencies.most_common() + + character_count: int = sum(o for c, o in most_common) + + if character_count <= TOO_SMALL_SEQUENCE: + continue + + popular_character_ordered: List[str] = [c for c, o in most_common] + + for language in lg_inclusion_list or alphabet_languages( + popular_character_ordered, ignore_non_latin + ): + ratio: float = characters_popularity_compare( + language, popular_character_ordered + ) + + if ratio < threshold: + continue + elif ratio >= 0.8: + sufficient_match_count += 1 + + results.append((language, round(ratio, 4))) + + if sufficient_match_count >= 3: + break + + return sorted( + filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True + ) diff --git a/env-llmeval/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3824a428ffd621958e1f1f22dfd105c58417ffd0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/charset_normalizer/models.py b/env-llmeval/lib/python3.10/site-packages/charset_normalizer/models.py new file mode 100644 index 0000000000000000000000000000000000000000..a760b9c558d953f6907d29fa31844d07d06f9ce1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/charset_normalizer/models.py @@ -0,0 +1,340 @@ +from encodings.aliases import aliases +from hashlib import sha256 +from json import dumps +from typing import Any, Dict, Iterator, List, Optional, Tuple, Union + +from .constant import TOO_BIG_SEQUENCE +from .utils import iana_name, is_multi_byte_encoding, unicode_range + + +class CharsetMatch: + def __init__( + self, + payload: bytes, + guessed_encoding: str, + mean_mess_ratio: float, + has_sig_or_bom: bool, + languages: "CoherenceMatches", + decoded_payload: Optional[str] = None, + ): + self._payload: bytes = payload + + self._encoding: str = guessed_encoding + self._mean_mess_ratio: float = mean_mess_ratio + self._languages: CoherenceMatches = languages + self._has_sig_or_bom: bool = has_sig_or_bom + self._unicode_ranges: Optional[List[str]] = None + + self._leaves: List[CharsetMatch] = [] + self._mean_coherence_ratio: float = 0.0 + + self._output_payload: Optional[bytes] = None + self._output_encoding: Optional[str] = None + + self._string: Optional[str] = decoded_payload + + def __eq__(self, other: object) -> bool: + if not isinstance(other, CharsetMatch): + raise TypeError( + "__eq__ cannot be invoked on {} and {}.".format( + str(other.__class__), str(self.__class__) + ) + ) + return self.encoding == other.encoding and self.fingerprint == other.fingerprint + + def __lt__(self, other: object) -> bool: + """ + Implemented to make sorted available upon CharsetMatches items. + """ + if not isinstance(other, CharsetMatch): + raise ValueError + + chaos_difference: float = abs(self.chaos - other.chaos) + coherence_difference: float = abs(self.coherence - other.coherence) + + # Below 1% difference --> Use Coherence + if chaos_difference < 0.01 and coherence_difference > 0.02: + return self.coherence > other.coherence + elif chaos_difference < 0.01 and coherence_difference <= 0.02: + # When having a difficult decision, use the result that decoded as many multi-byte as possible. + # preserve RAM usage! + if len(self._payload) >= TOO_BIG_SEQUENCE: + return self.chaos < other.chaos + return self.multi_byte_usage > other.multi_byte_usage + + return self.chaos < other.chaos + + @property + def multi_byte_usage(self) -> float: + return 1.0 - (len(str(self)) / len(self.raw)) + + def __str__(self) -> str: + # Lazy Str Loading + if self._string is None: + self._string = str(self._payload, self._encoding, "strict") + return self._string + + def __repr__(self) -> str: + return "".format(self.encoding, self.fingerprint) + + def add_submatch(self, other: "CharsetMatch") -> None: + if not isinstance(other, CharsetMatch) or other == self: + raise ValueError( + "Unable to add instance <{}> as a submatch of a CharsetMatch".format( + other.__class__ + ) + ) + + other._string = None # Unload RAM usage; dirty trick. + self._leaves.append(other) + + @property + def encoding(self) -> str: + return self._encoding + + @property + def encoding_aliases(self) -> List[str]: + """ + Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855. + """ + also_known_as: List[str] = [] + for u, p in aliases.items(): + if self.encoding == u: + also_known_as.append(p) + elif self.encoding == p: + also_known_as.append(u) + return also_known_as + + @property + def bom(self) -> bool: + return self._has_sig_or_bom + + @property + def byte_order_mark(self) -> bool: + return self._has_sig_or_bom + + @property + def languages(self) -> List[str]: + """ + Return the complete list of possible languages found in decoded sequence. + Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'. + """ + return [e[0] for e in self._languages] + + @property + def language(self) -> str: + """ + Most probable language found in decoded sequence. If none were detected or inferred, the property will return + "Unknown". + """ + if not self._languages: + # Trying to infer the language based on the given encoding + # Its either English or we should not pronounce ourselves in certain cases. + if "ascii" in self.could_be_from_charset: + return "English" + + # doing it there to avoid circular import + from charset_normalizer.cd import encoding_languages, mb_encoding_languages + + languages = ( + mb_encoding_languages(self.encoding) + if is_multi_byte_encoding(self.encoding) + else encoding_languages(self.encoding) + ) + + if len(languages) == 0 or "Latin Based" in languages: + return "Unknown" + + return languages[0] + + return self._languages[0][0] + + @property + def chaos(self) -> float: + return self._mean_mess_ratio + + @property + def coherence(self) -> float: + if not self._languages: + return 0.0 + return self._languages[0][1] + + @property + def percent_chaos(self) -> float: + return round(self.chaos * 100, ndigits=3) + + @property + def percent_coherence(self) -> float: + return round(self.coherence * 100, ndigits=3) + + @property + def raw(self) -> bytes: + """ + Original untouched bytes. + """ + return self._payload + + @property + def submatch(self) -> List["CharsetMatch"]: + return self._leaves + + @property + def has_submatch(self) -> bool: + return len(self._leaves) > 0 + + @property + def alphabets(self) -> List[str]: + if self._unicode_ranges is not None: + return self._unicode_ranges + # list detected ranges + detected_ranges: List[Optional[str]] = [ + unicode_range(char) for char in str(self) + ] + # filter and sort + self._unicode_ranges = sorted(list({r for r in detected_ranges if r})) + return self._unicode_ranges + + @property + def could_be_from_charset(self) -> List[str]: + """ + The complete list of encoding that output the exact SAME str result and therefore could be the originating + encoding. + This list does include the encoding available in property 'encoding'. + """ + return [self._encoding] + [m.encoding for m in self._leaves] + + def output(self, encoding: str = "utf_8") -> bytes: + """ + Method to get re-encoded bytes payload using given target encoding. Default to UTF-8. + Any errors will be simply ignored by the encoder NOT replaced. + """ + if self._output_encoding is None or self._output_encoding != encoding: + self._output_encoding = encoding + self._output_payload = str(self).encode(encoding, "replace") + + return self._output_payload # type: ignore + + @property + def fingerprint(self) -> str: + """ + Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one. + """ + return sha256(self.output()).hexdigest() + + +class CharsetMatches: + """ + Container with every CharsetMatch items ordered by default from most probable to the less one. + Act like a list(iterable) but does not implements all related methods. + """ + + def __init__(self, results: Optional[List[CharsetMatch]] = None): + self._results: List[CharsetMatch] = sorted(results) if results else [] + + def __iter__(self) -> Iterator[CharsetMatch]: + yield from self._results + + def __getitem__(self, item: Union[int, str]) -> CharsetMatch: + """ + Retrieve a single item either by its position or encoding name (alias may be used here). + Raise KeyError upon invalid index or encoding not present in results. + """ + if isinstance(item, int): + return self._results[item] + if isinstance(item, str): + item = iana_name(item, False) + for result in self._results: + if item in result.could_be_from_charset: + return result + raise KeyError + + def __len__(self) -> int: + return len(self._results) + + def __bool__(self) -> bool: + return len(self._results) > 0 + + def append(self, item: CharsetMatch) -> None: + """ + Insert a single match. Will be inserted accordingly to preserve sort. + Can be inserted as a submatch. + """ + if not isinstance(item, CharsetMatch): + raise ValueError( + "Cannot append instance '{}' to CharsetMatches".format( + str(item.__class__) + ) + ) + # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage) + if len(item.raw) <= TOO_BIG_SEQUENCE: + for match in self._results: + if match.fingerprint == item.fingerprint and match.chaos == item.chaos: + match.add_submatch(item) + return + self._results.append(item) + self._results = sorted(self._results) + + def best(self) -> Optional["CharsetMatch"]: + """ + Simply return the first match. Strict equivalent to matches[0]. + """ + if not self._results: + return None + return self._results[0] + + def first(self) -> Optional["CharsetMatch"]: + """ + Redundant method, call the method best(). Kept for BC reasons. + """ + return self.best() + + +CoherenceMatch = Tuple[str, float] +CoherenceMatches = List[CoherenceMatch] + + +class CliDetectionResult: + def __init__( + self, + path: str, + encoding: Optional[str], + encoding_aliases: List[str], + alternative_encodings: List[str], + language: str, + alphabets: List[str], + has_sig_or_bom: bool, + chaos: float, + coherence: float, + unicode_path: Optional[str], + is_preferred: bool, + ): + self.path: str = path + self.unicode_path: Optional[str] = unicode_path + self.encoding: Optional[str] = encoding + self.encoding_aliases: List[str] = encoding_aliases + self.alternative_encodings: List[str] = alternative_encodings + self.language: str = language + self.alphabets: List[str] = alphabets + self.has_sig_or_bom: bool = has_sig_or_bom + self.chaos: float = chaos + self.coherence: float = coherence + self.is_preferred: bool = is_preferred + + @property + def __dict__(self) -> Dict[str, Any]: # type: ignore + return { + "path": self.path, + "encoding": self.encoding, + "encoding_aliases": self.encoding_aliases, + "alternative_encodings": self.alternative_encodings, + "language": self.language, + "alphabets": self.alphabets, + "has_sig_or_bom": self.has_sig_or_bom, + "chaos": self.chaos, + "coherence": self.coherence, + "unicode_path": self.unicode_path, + "is_preferred": self.is_preferred, + } + + def to_json(self) -> str: + return dumps(self.__dict__, ensure_ascii=True, indent=4) diff --git a/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..a1b5c57543005b310ac2efb02243a6b6ab6d8932 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/METADATA @@ -0,0 +1,441 @@ +Metadata-Version: 2.1 +Name: colorama +Version: 0.4.6 +Summary: Cross-platform colored terminal text. +Project-URL: Homepage, https://github.com/tartley/colorama +Author-email: Jonathan Hartley +License-File: LICENSE.txt +Keywords: ansi,color,colour,crossplatform,terminal,text,windows,xplatform +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Terminals +Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7 +Description-Content-Type: text/x-rst + +.. image:: https://img.shields.io/pypi/v/colorama.svg + :target: https://pypi.org/project/colorama/ + :alt: Latest Version + +.. image:: https://img.shields.io/pypi/pyversions/colorama.svg + :target: https://pypi.org/project/colorama/ + :alt: Supported Python versions + +.. image:: https://github.com/tartley/colorama/actions/workflows/test.yml/badge.svg + :target: https://github.com/tartley/colorama/actions/workflows/test.yml + :alt: Build Status + +Colorama +======== + +Makes ANSI escape character sequences (for producing colored terminal text and +cursor positioning) work under MS Windows. + +.. |donate| image:: https://www.paypalobjects.com/en_US/i/btn/btn_donate_SM.gif + :target: https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=2MZ9D2GMLYCUJ&item_name=Colorama¤cy_code=USD + :alt: Donate with Paypal + +`PyPI for releases `_ | +`Github for source `_ | +`Colorama for enterprise on Tidelift `_ + +If you find Colorama useful, please |donate| to the authors. Thank you! + +Installation +------------ + +Tested on CPython 2.7, 3.7, 3.8, 3.9 and 3.10 and Pypy 2.7 and 3.8. + +No requirements other than the standard library. + +.. code-block:: bash + + pip install colorama + # or + conda install -c anaconda colorama + +Description +----------- + +ANSI escape character sequences have long been used to produce colored terminal +text and cursor positioning on Unix and Macs. Colorama makes this work on +Windows, too, by wrapping ``stdout``, stripping ANSI sequences it finds (which +would appear as gobbledygook in the output), and converting them into the +appropriate win32 calls to modify the state of the terminal. On other platforms, +Colorama does nothing. + +This has the upshot of providing a simple cross-platform API for printing +colored terminal text from Python, and has the happy side-effect that existing +applications or libraries which use ANSI sequences to produce colored output on +Linux or Macs can now also work on Windows, simply by calling +``colorama.just_fix_windows_console()`` (since v0.4.6) or ``colorama.init()`` +(all versions, but may have other side-effects – see below). + +An alternative approach is to install ``ansi.sys`` on Windows machines, which +provides the same behaviour for all applications running in terminals. Colorama +is intended for situations where that isn't easy (e.g., maybe your app doesn't +have an installer.) + +Demo scripts in the source code repository print some colored text using +ANSI sequences. Compare their output under Gnome-terminal's built in ANSI +handling, versus on Windows Command-Prompt using Colorama: + +.. image:: https://github.com/tartley/colorama/raw/master/screenshots/ubuntu-demo.png + :width: 661 + :height: 357 + :alt: ANSI sequences on Ubuntu under gnome-terminal. + +.. image:: https://github.com/tartley/colorama/raw/master/screenshots/windows-demo.png + :width: 668 + :height: 325 + :alt: Same ANSI sequences on Windows, using Colorama. + +These screenshots show that, on Windows, Colorama does not support ANSI 'dim +text'; it looks the same as 'normal text'. + +Usage +----- + +Initialisation +.............. + +If the only thing you want from Colorama is to get ANSI escapes to work on +Windows, then run: + +.. code-block:: python + + from colorama import just_fix_windows_console + just_fix_windows_console() + +If you're on a recent version of Windows 10 or better, and your stdout/stderr +are pointing to a Windows console, then this will flip the magic configuration +switch to enable Windows' built-in ANSI support. + +If you're on an older version of Windows, and your stdout/stderr are pointing to +a Windows console, then this will wrap ``sys.stdout`` and/or ``sys.stderr`` in a +magic file object that intercepts ANSI escape sequences and issues the +appropriate Win32 calls to emulate them. + +In all other circumstances, it does nothing whatsoever. Basically the idea is +that this makes Windows act like Unix with respect to ANSI escape handling. + +It's safe to call this function multiple times. It's safe to call this function +on non-Windows platforms, but it won't do anything. It's safe to call this +function when one or both of your stdout/stderr are redirected to a file – it +won't do anything to those streams. + +Alternatively, you can use the older interface with more features (but also more +potential footguns): + +.. code-block:: python + + from colorama import init + init() + +This does the same thing as ``just_fix_windows_console``, except for the +following differences: + +- It's not safe to call ``init`` multiple times; you can end up with multiple + layers of wrapping and broken ANSI support. + +- Colorama will apply a heuristic to guess whether stdout/stderr support ANSI, + and if it thinks they don't, then it will wrap ``sys.stdout`` and + ``sys.stderr`` in a magic file object that strips out ANSI escape sequences + before printing them. This happens on all platforms, and can be convenient if + you want to write your code to emit ANSI escape sequences unconditionally, and + let Colorama decide whether they should actually be output. But note that + Colorama's heuristic is not particularly clever. + +- ``init`` also accepts explicit keyword args to enable/disable various + functionality – see below. + +To stop using Colorama before your program exits, simply call ``deinit()``. +This will restore ``stdout`` and ``stderr`` to their original values, so that +Colorama is disabled. To resume using Colorama again, call ``reinit()``; it is +cheaper than calling ``init()`` again (but does the same thing). + +Most users should depend on ``colorama >= 0.4.6``, and use +``just_fix_windows_console``. The old ``init`` interface will be supported +indefinitely for backwards compatibility, but we don't plan to fix any issues +with it, also for backwards compatibility. + +Colored Output +.............. + +Cross-platform printing of colored text can then be done using Colorama's +constant shorthand for ANSI escape sequences. These are deliberately +rudimentary, see below. + +.. code-block:: python + + from colorama import Fore, Back, Style + print(Fore.RED + 'some red text') + print(Back.GREEN + 'and with a green background') + print(Style.DIM + 'and in dim text') + print(Style.RESET_ALL) + print('back to normal now') + +...or simply by manually printing ANSI sequences from your own code: + +.. code-block:: python + + print('\033[31m' + 'some red text') + print('\033[39m') # and reset to default color + +...or, Colorama can be used in conjunction with existing ANSI libraries +such as the venerable `Termcolor `_ +the fabulous `Blessings `_, +or the incredible `_Rich `_. + +If you wish Colorama's Fore, Back and Style constants were more capable, +then consider using one of the above highly capable libraries to generate +colors, etc, and use Colorama just for its primary purpose: to convert +those ANSI sequences to also work on Windows: + +SIMILARLY, do not send PRs adding the generation of new ANSI types to Colorama. +We are only interested in converting ANSI codes to win32 API calls, not +shortcuts like the above to generate ANSI characters. + +.. code-block:: python + + from colorama import just_fix_windows_console + from termcolor import colored + + # use Colorama to make Termcolor work on Windows too + just_fix_windows_console() + + # then use Termcolor for all colored text output + print(colored('Hello, World!', 'green', 'on_red')) + +Available formatting constants are:: + + Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. + Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. + Style: DIM, NORMAL, BRIGHT, RESET_ALL + +``Style.RESET_ALL`` resets foreground, background, and brightness. Colorama will +perform this reset automatically on program exit. + +These are fairly well supported, but not part of the standard:: + + Fore: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX + Back: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX + +Cursor Positioning +.................. + +ANSI codes to reposition the cursor are supported. See ``demos/demo06.py`` for +an example of how to generate them. + +Init Keyword Args +................. + +``init()`` accepts some ``**kwargs`` to override default behaviour. + +init(autoreset=False): + If you find yourself repeatedly sending reset sequences to turn off color + changes at the end of every print, then ``init(autoreset=True)`` will + automate that: + + .. code-block:: python + + from colorama import init + init(autoreset=True) + print(Fore.RED + 'some red text') + print('automatically back to default color again') + +init(strip=None): + Pass ``True`` or ``False`` to override whether ANSI codes should be + stripped from the output. The default behaviour is to strip if on Windows + or if output is redirected (not a tty). + +init(convert=None): + Pass ``True`` or ``False`` to override whether to convert ANSI codes in the + output into win32 calls. The default behaviour is to convert if on Windows + and output is to a tty (terminal). + +init(wrap=True): + On Windows, Colorama works by replacing ``sys.stdout`` and ``sys.stderr`` + with proxy objects, which override the ``.write()`` method to do their work. + If this wrapping causes you problems, then this can be disabled by passing + ``init(wrap=False)``. The default behaviour is to wrap if ``autoreset`` or + ``strip`` or ``convert`` are True. + + When wrapping is disabled, colored printing on non-Windows platforms will + continue to work as normal. To do cross-platform colored output, you can + use Colorama's ``AnsiToWin32`` proxy directly: + + .. code-block:: python + + import sys + from colorama import init, AnsiToWin32 + init(wrap=False) + stream = AnsiToWin32(sys.stderr).stream + + # Python 2 + print >>stream, Fore.BLUE + 'blue text on stderr' + + # Python 3 + print(Fore.BLUE + 'blue text on stderr', file=stream) + +Recognised ANSI Sequences +......................... + +ANSI sequences generally take the form:: + + ESC [ ; ... + +Where ```` is an integer, and ```` is a single letter. Zero or +more params are passed to a ````. If no params are passed, it is +generally synonymous with passing a single zero. No spaces exist in the +sequence; they have been inserted here simply to read more easily. + +The only ANSI sequences that Colorama converts into win32 calls are:: + + ESC [ 0 m # reset all (colors and brightness) + ESC [ 1 m # bright + ESC [ 2 m # dim (looks same as normal brightness) + ESC [ 22 m # normal brightness + + # FOREGROUND: + ESC [ 30 m # black + ESC [ 31 m # red + ESC [ 32 m # green + ESC [ 33 m # yellow + ESC [ 34 m # blue + ESC [ 35 m # magenta + ESC [ 36 m # cyan + ESC [ 37 m # white + ESC [ 39 m # reset + + # BACKGROUND + ESC [ 40 m # black + ESC [ 41 m # red + ESC [ 42 m # green + ESC [ 43 m # yellow + ESC [ 44 m # blue + ESC [ 45 m # magenta + ESC [ 46 m # cyan + ESC [ 47 m # white + ESC [ 49 m # reset + + # cursor positioning + ESC [ y;x H # position cursor at x across, y down + ESC [ y;x f # position cursor at x across, y down + ESC [ n A # move cursor n lines up + ESC [ n B # move cursor n lines down + ESC [ n C # move cursor n characters forward + ESC [ n D # move cursor n characters backward + + # clear the screen + ESC [ mode J # clear the screen + + # clear the line + ESC [ mode K # clear the line + +Multiple numeric params to the ``'m'`` command can be combined into a single +sequence:: + + ESC [ 36 ; 45 ; 1 m # bright cyan text on magenta background + +All other ANSI sequences of the form ``ESC [ ; ... `` +are silently stripped from the output on Windows. + +Any other form of ANSI sequence, such as single-character codes or alternative +initial characters, are not recognised or stripped. It would be cool to add +them though. Let me know if it would be useful for you, via the Issues on +GitHub. + +Status & Known Problems +----------------------- + +I've personally only tested it on Windows XP (CMD, Console2), Ubuntu +(gnome-terminal, xterm), and OS X. + +Some valid ANSI sequences aren't recognised. + +If you're hacking on the code, see `README-hacking.md`_. ESPECIALLY, see the +explanation there of why we do not want PRs that allow Colorama to generate new +types of ANSI codes. + +See outstanding issues and wish-list: +https://github.com/tartley/colorama/issues + +If anything doesn't work for you, or doesn't do what you expected or hoped for, +I'd love to hear about it on that issues list, would be delighted by patches, +and would be happy to grant commit access to anyone who submits a working patch +or two. + +.. _README-hacking.md: README-hacking.md + +License +------- + +Copyright Jonathan Hartley & Arnon Yaari, 2013-2020. BSD 3-Clause license; see +LICENSE file. + +Professional support +-------------------- + +.. |tideliftlogo| image:: https://cdn2.hubspot.net/hubfs/4008838/website/logos/logos_for_download/Tidelift_primary-shorthand-logo.png + :alt: Tidelift + :target: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme + +.. list-table:: + :widths: 10 100 + + * - |tideliftlogo| + - Professional support for colorama is available as part of the + `Tidelift Subscription`_. + Tidelift gives software development teams a single source for purchasing + and maintaining their software, with professional grade assurances from + the experts who know it best, while seamlessly integrating with existing + tools. + +.. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme + +Thanks +------ + +See the CHANGELOG for more thanks! + +* Marc Schlaich (schlamar) for a ``setup.py`` fix for Python2.5. +* Marc Abramowitz, reported & fixed a crash on exit with closed ``stdout``, + providing a solution to issue #7's setuptools/distutils debate, + and other fixes. +* User 'eryksun', for guidance on correctly instantiating ``ctypes.windll``. +* Matthew McCormick for politely pointing out a longstanding crash on non-Win. +* Ben Hoyt, for a magnificent fix under 64-bit Windows. +* Jesse at Empty Square for submitting a fix for examples in the README. +* User 'jamessp', an observant documentation fix for cursor positioning. +* User 'vaal1239', Dave Mckee & Lackner Kristof for a tiny but much-needed Win7 + fix. +* Julien Stuyck, for wisely suggesting Python3 compatible updates to README. +* Daniel Griffith for multiple fabulous patches. +* Oscar Lesta for a valuable fix to stop ANSI chars being sent to non-tty + output. +* Roger Binns, for many suggestions, valuable feedback, & bug reports. +* Tim Golden for thought and much appreciated feedback on the initial idea. +* User 'Zearin' for updates to the README file. +* John Szakmeister for adding support for light colors +* Charles Merriam for adding documentation to demos +* Jurko for a fix on 64-bit Windows CPython2.5 w/o ctypes +* Florian Bruhin for a fix when stdout or stderr are None +* Thomas Weininger for fixing ValueError on Windows +* Remi Rampin for better Github integration and fixes to the README file +* Simeon Visser for closing a file handle using 'with' and updating classifiers + to include Python 3.3 and 3.4 +* Andy Neff for fixing RESET of LIGHT_EX colors. +* Jonathan Hartley for the initial idea and implementation. diff --git a/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..687d244ec85af3afce9d54e0eb48a5936247153a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/RECORD @@ -0,0 +1,31 @@ +colorama-0.4.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +colorama-0.4.6.dist-info/METADATA,sha256=e67SnrUMOym9sz_4TjF3vxvAV4T3aF7NyqRHHH3YEMw,17158 +colorama-0.4.6.dist-info/RECORD,, +colorama-0.4.6.dist-info/WHEEL,sha256=cdcF4Fbd0FPtw2EMIOwH-3rSOTUdTCeOSXRMD1iLUb8,105 +colorama-0.4.6.dist-info/licenses/LICENSE.txt,sha256=ysNcAmhuXQSlpxQL-zs25zrtSWZW6JEQLkKIhteTAxg,1491 +colorama/__init__.py,sha256=wePQA4U20tKgYARySLEC047ucNX-g8pRLpYBuiHlLb8,266 +colorama/__pycache__/__init__.cpython-310.pyc,, +colorama/__pycache__/ansi.cpython-310.pyc,, +colorama/__pycache__/ansitowin32.cpython-310.pyc,, +colorama/__pycache__/initialise.cpython-310.pyc,, +colorama/__pycache__/win32.cpython-310.pyc,, +colorama/__pycache__/winterm.cpython-310.pyc,, +colorama/ansi.py,sha256=Top4EeEuaQdBWdteKMEcGOTeKeF19Q-Wo_6_Cj5kOzQ,2522 +colorama/ansitowin32.py,sha256=vPNYa3OZbxjbuFyaVo0Tmhmy1FZ1lKMWCnT7odXpItk,11128 +colorama/initialise.py,sha256=-hIny86ClXo39ixh5iSCfUIa2f_h_bgKRDW7gqs-KLU,3325 +colorama/tests/__init__.py,sha256=MkgPAEzGQd-Rq0w0PZXSX2LadRWhUECcisJY8lSrm4Q,75 +colorama/tests/__pycache__/__init__.cpython-310.pyc,, +colorama/tests/__pycache__/ansi_test.cpython-310.pyc,, +colorama/tests/__pycache__/ansitowin32_test.cpython-310.pyc,, +colorama/tests/__pycache__/initialise_test.cpython-310.pyc,, +colorama/tests/__pycache__/isatty_test.cpython-310.pyc,, +colorama/tests/__pycache__/utils.cpython-310.pyc,, +colorama/tests/__pycache__/winterm_test.cpython-310.pyc,, +colorama/tests/ansi_test.py,sha256=FeViDrUINIZcr505PAxvU4AjXz1asEiALs9GXMhwRaE,2839 +colorama/tests/ansitowin32_test.py,sha256=RN7AIhMJ5EqDsYaCjVo-o4u8JzDD4ukJbmevWKS70rY,10678 +colorama/tests/initialise_test.py,sha256=BbPy-XfyHwJ6zKozuQOvNvQZzsx9vdb_0bYXn7hsBTc,6741 +colorama/tests/isatty_test.py,sha256=Pg26LRpv0yQDB5Ac-sxgVXG7hsA1NYvapFgApZfYzZg,1866 +colorama/tests/utils.py,sha256=1IIRylG39z5-dzq09R_ngufxyPZxgldNbrxKxUGwGKE,1079 +colorama/tests/winterm_test.py,sha256=qoWFPEjym5gm2RuMwpf3pOis3a5r_PJZFCzK254JL8A,3709 +colorama/win32.py,sha256=YQOKwMTwtGBbsY4dL5HYTvwTeP9wIQra5MvPNddpxZs,6181 +colorama/winterm.py,sha256=XCQFDHjPi6AHYNdZwy0tA02H-Jh48Jp-HvCjeLeLp3U,7134 diff --git a/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..d79189fda3251187de18c3998f23ae6fec11b20f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.11.1 +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any diff --git a/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt b/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..3105888ec149d10cad51c11d332779e94b548661 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2010 Jonathan Hartley +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holders, nor those of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/env-llmeval/lib/python3.10/site-packages/jsonlines/__init__.py b/env-llmeval/lib/python3.10/site-packages/jsonlines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4387969bc14e50894318c40d99b2972f8e4b3e46 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/jsonlines/__init__.py @@ -0,0 +1,20 @@ +""" +Module for the jsonlines data format. +""" + +# expose only public api +from .jsonlines import ( + Error, + InvalidLineError, + Reader, + Writer, + open, +) + +__all__ = [ + "Error", + "InvalidLineError", + "Reader", + "Writer", + "open", +] diff --git a/env-llmeval/lib/python3.10/site-packages/jsonlines/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/jsonlines/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..484815594dcc34552ff863bb82abd7ed4365fedb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/jsonlines/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/jsonlines/__pycache__/jsonlines.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/jsonlines/__pycache__/jsonlines.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6995cb10a9ad13b2e109e001a3bc6f46de3b930 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/jsonlines/__pycache__/jsonlines.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/jsonlines/jsonlines.py b/env-llmeval/lib/python3.10/site-packages/jsonlines/jsonlines.py new file mode 100644 index 0000000000000000000000000000000000000000..e19730f13ab799c9cba665be1e149879eb700d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/jsonlines/jsonlines.py @@ -0,0 +1,665 @@ +""" +jsonlines implementation +""" + +import builtins +import codecs +import enum +import io +import json +import os +import types +import typing +from typing import ( + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + Literal, + Optional, + Tuple, + Type, + TypeVar, + Union, + cast, + overload, +) + +import attr + +orjson: Optional[types.ModuleType] +try: + import orjson +except ImportError: + orjson = None + +ujson: Optional[types.ModuleType] +try: + import ujson +except ImportError: + ujson = None + + +VALID_TYPES = { + bool, + dict, + float, + int, + list, + str, +} + +# Characters to skip at the beginning of a line. Note: at most one such +# character is skipped per line. +SKIPPABLE_SINGLE_INITIAL_CHARS = ( + "\x1e", # RFC7464 text sequence + codecs.BOM_UTF8.decode(), +) + + +class DumpsResultConversion(enum.Enum): + LeaveAsIs = enum.auto() + EncodeToBytes = enum.auto() + DecodeToString = enum.auto() + + +# https://docs.python.org/3/library/functions.html#open +Openable = Union[str, bytes, int, os.PathLike] + +LoadsCallable = Callable[[Union[str, bytes]], Any] +DumpsCallable = Callable[[Any], Union[str, bytes]] + +# Currently, JSON structures cannot be typed properly: +# - https://github.com/python/typing/issues/182 +# - https://github.com/python/mypy/issues/731 +JSONCollection = Union[Dict[str, Any], List[Any]] +JSONScalar = Union[bool, float, int, str] +JSONValue = Union[JSONCollection, JSONScalar] +TJSONValue = TypeVar("TJSONValue", bound=JSONValue) + +TRW = TypeVar("TRW", bound="ReaderWriterBase") + +# Default to using the fastest JSON library for reading, falling back to the +# standard library (always available) if none are installed. +if orjson is not None: + default_loads = orjson.loads +elif ujson is not None: + default_loads = ujson.loads +else: + default_loads = json.loads + + +# For writing, use the stdlib. Other packages may be faster but their behaviour +# (supported types etc.) and output (whitespace etc.) are not the same as the +# stdlib json module, so this should be opt-in via the ‘dumps=’ arg. +def default_dumps(obj: Any) -> str: + """ + Fake ``dumps()`` function to use as a default marker. + """ + raise NotImplementedError # pragma: no cover + + +@attr.s(auto_exc=True, auto_attribs=True) +class Error(Exception): + """ + Base error class. + """ + + message: str + + +@attr.s(auto_exc=True, auto_attribs=True, init=False) +class InvalidLineError(Error, ValueError): + """ + Error raised when an invalid line is encountered. + + This happens when the line does not contain valid JSON, or if a + specific data type has been requested, and the line contained a + different data type. + + The original line itself is stored on the exception instance as the + ``.line`` attribute, and the line number as ``.lineno``. + + This class subclasses both ``jsonlines.Error`` and the built-in + ``ValueError``. + """ + + #: The invalid line + line: Union[str, bytes] + + #: The line number + lineno: int + + def __init__(self, message: str, line: Union[str, bytes], lineno: int) -> None: + self.line = line.rstrip() + self.lineno = lineno + super().__init__(f"{message} (line {lineno})") + + +@attr.s(auto_attribs=True, repr=False) +class ReaderWriterBase: + """ + Base class with shared behaviour for both the reader and writer. + """ + + _fp: Union[typing.IO[str], typing.IO[bytes], None] = attr.ib( + default=None, init=False + ) + _closed: bool = attr.ib(default=False, init=False) + _should_close_fp: bool = attr.ib(default=False, init=False) + + def close(self) -> None: + """ + Close this reader/writer. + + This closes the underlying file if that file has been opened by + this reader/writer. When an already opened file-like object was + provided, the caller is responsible for closing it. + """ + if self._closed: + return + self._closed = True + if self._fp is not None and self._should_close_fp: + self._fp.close() + + def __repr__(self) -> str: + cls_name = type(self).__name__ + wrapped = self._repr_for_wrapped() + return f"" + + def _repr_for_wrapped(self) -> str: + raise NotImplementedError # pragma: no cover + + def __enter__(self: TRW) -> TRW: + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[types.TracebackType], + ) -> None: + self.close() + + +@attr.s(auto_attribs=True, repr=False) +class Reader(ReaderWriterBase): + """ + Reader for the jsonlines format. + + The first argument must be an iterable that yields JSON encoded + strings. Usually this will be a readable file-like object, such as + an open file or an ``io.TextIO`` instance, but it can also be + something else as long as it yields strings when iterated over. + + Instances are iterable and can be used as a context manager. + + The `loads` argument can be used to replace the standard json + decoder. If specified, it must be a callable that accepts a + (unicode) string and returns the decoded object. + + :param file_or_iterable: file-like object or iterable yielding lines as + strings + :param loads: custom json decoder callable + """ + + _file_or_iterable: Union[ + typing.IO[str], typing.IO[bytes], Iterable[Union[str, bytes]] + ] + _line_iter: Iterator[Tuple[int, Union[bytes, str]]] = attr.ib(init=False) + _loads: LoadsCallable = attr.ib(default=default_loads, kw_only=True) + + def __attrs_post_init__(self) -> None: + if isinstance(self._file_or_iterable, io.IOBase): + self._fp = cast( + Union[typing.IO[str], typing.IO[bytes]], + self._file_or_iterable, + ) + + self._line_iter = enumerate(self._file_or_iterable, 1) + + # No type specified, None not allowed + @overload + def read( + self, + *, + type: Literal[None] = ..., + allow_none: Literal[False] = ..., + skip_empty: bool = ..., + ) -> JSONValue: + ... # pragma: no cover + + # No type specified, None allowed + @overload + def read( + self, + *, + type: Literal[None] = ..., + allow_none: Literal[True], + skip_empty: bool = ..., + ) -> Optional[JSONValue]: + ... # pragma: no cover + + # Type specified, None not allowed + @overload + def read( + self, + *, + type: Type[TJSONValue], + allow_none: Literal[False] = ..., + skip_empty: bool = ..., + ) -> TJSONValue: + ... # pragma: no cover + + # Type specified, None allowed + @overload + def read( + self, + *, + type: Type[TJSONValue], + allow_none: Literal[True], + skip_empty: bool = ..., + ) -> Optional[TJSONValue]: + ... # pragma: no cover + + # Generic definition + @overload + def read( + self, + *, + type: Optional[Type[Any]] = ..., + allow_none: bool = ..., + skip_empty: bool = ..., + ) -> Optional[JSONValue]: + ... # pragma: no cover + + def read( + self, + *, + type: Optional[Type[Any]] = None, + allow_none: bool = False, + skip_empty: bool = False, + ) -> Optional[JSONValue]: + """ + Read and decode a line. + + The optional `type` argument specifies the expected data type. + Supported types are ``dict``, ``list``, ``str``, ``int``, + ``float``, and ``bool``. When specified, non-conforming lines + result in :py:exc:`InvalidLineError`. + + By default, input lines containing ``null`` (in JSON) are + considered invalid, and will cause :py:exc:`InvalidLineError`. + The `allow_none` argument can be used to change this behaviour, + in which case ``None`` will be returned instead. + + If `skip_empty` is set to ``True``, empty lines and lines + containing only whitespace are silently skipped. + """ + if self._closed: + raise RuntimeError("reader is closed") + if type is not None and type not in VALID_TYPES: + raise ValueError("invalid type specified") + + try: + lineno, line = next(self._line_iter) + while skip_empty and not line.rstrip(): + lineno, line = next(self._line_iter) + except StopIteration: + raise EOFError from None + + if isinstance(line, bytes): + try: + line = line.decode("utf-8") + except UnicodeDecodeError as orig_exc: + exc = InvalidLineError( + f"line is not valid utf-8: {orig_exc}", line, lineno + ) + raise exc from orig_exc + + if line.startswith(SKIPPABLE_SINGLE_INITIAL_CHARS): + line = line[1:] + + try: + value: JSONValue = self._loads(line) + except ValueError as orig_exc: + exc = InvalidLineError( + f"line contains invalid json: {orig_exc}", line, lineno + ) + raise exc from orig_exc + + if value is None: + if allow_none: + return None + raise InvalidLineError("line contains null value", line, lineno) + + if type is not None: + valid = isinstance(value, type) + if type is int and isinstance(value, bool): + # isinstance() is not sufficient, since bool is an int subclass + valid = False + if not valid: + raise InvalidLineError( + "line does not match requested type", line, lineno + ) + + return value + + # No type specified, None not allowed + @overload + def iter( + self, + *, + type: Literal[None] = ..., + allow_none: Literal[False] = ..., + skip_empty: bool = ..., + skip_invalid: bool = ..., + ) -> Iterator[JSONValue]: + ... # pragma: no cover + + # No type specified, None allowed + @overload + def iter( + self, + *, + type: Literal[None] = ..., + allow_none: Literal[True], + skip_empty: bool = ..., + skip_invalid: bool = ..., + ) -> Iterator[JSONValue]: + ... # pragma: no cover + + # Type specified, None not allowed + @overload + def iter( + self, + *, + type: Type[TJSONValue], + allow_none: Literal[False] = ..., + skip_empty: bool = ..., + skip_invalid: bool = ..., + ) -> Iterator[TJSONValue]: + ... # pragma: no cover + + # Type specified, None allowed + @overload + def iter( + self, + *, + type: Type[TJSONValue], + allow_none: Literal[True], + skip_empty: bool = ..., + skip_invalid: bool = ..., + ) -> Iterator[Optional[TJSONValue]]: + ... # pragma: no cover + + # Generic definition + @overload + def iter( + self, + *, + type: Optional[Type[TJSONValue]] = ..., + allow_none: bool = ..., + skip_empty: bool = ..., + skip_invalid: bool = ..., + ) -> Iterator[Optional[TJSONValue]]: + ... # pragma: no cover + + def iter( + self, + type: Optional[Type[Any]] = None, + allow_none: bool = False, + skip_empty: bool = False, + skip_invalid: bool = False, + ) -> Iterator[Optional[JSONValue]]: + """ + Iterate over all lines. + + This is the iterator equivalent to repeatedly calling + :py:meth:`~Reader.read()`. If no arguments are specified, this + is the same as directly iterating over this :py:class:`Reader` + instance. + + When `skip_invalid` is set to ``True``, invalid lines will be + silently ignored. + + See :py:meth:`~Reader.read()` for a description of the other + arguments. + """ + try: + while True: + try: + yield self.read( + type=type, allow_none=allow_none, skip_empty=skip_empty + ) + except InvalidLineError: + if not skip_invalid: + raise + except EOFError: + pass + + def __iter__(self) -> Iterator[Any]: + """ + See :py:meth:`~Reader.iter()`. + """ + return self.iter() + + def _repr_for_wrapped(self) -> str: + if self._fp is not None: + return repr_for_fp(self._fp) + class_name = type(self._file_or_iterable).__name__ + return f"<{class_name} at 0x{id(self._file_or_iterable):x}>" + + +@attr.s(auto_attribs=True, repr=False) +class Writer(ReaderWriterBase): + """ + Writer for the jsonlines format. + + Instances can be used as a context manager. + + The `fp` argument must be a file-like object with a ``.write()`` + method accepting either text (unicode) or bytes. + + The `compact` argument can be used to to produce smaller output. + + The `sort_keys` argument can be used to sort keys in json objects, + and will produce deterministic output. + + For more control, provide a a custom encoder callable using the + `dumps` argument. The callable must produce (unicode) string output. + If specified, the `compact` and `sort` arguments will be ignored. + + When the `flush` argument is set to ``True``, the writer will call + ``fp.flush()`` after each written line. + + :param fp: writable file-like object + :param compact: whether to use a compact output format + :param sort_keys: whether to sort object keys + :param dumps: custom encoder callable + :param flush: whether to flush the file-like object after writing each line + """ + + _fp: Union[typing.IO[str], typing.IO[bytes]] = attr.ib(default=None) + _fp_is_binary: bool = attr.ib(default=False, init=False) + _compact: bool = attr.ib(default=False, kw_only=True) + _sort_keys: bool = attr.ib(default=False, kw_only=True) + _flush: bool = attr.ib(default=False, kw_only=True) + _dumps: DumpsCallable = attr.ib(default=default_dumps, kw_only=True) + _dumps_result_conversion: DumpsResultConversion = attr.ib( + default=DumpsResultConversion.LeaveAsIs, init=False + ) + + def __attrs_post_init__(self) -> None: + if isinstance(self._fp, io.TextIOBase): + self._fp_is_binary = False + elif isinstance(self._fp, io.IOBase): + self._fp_is_binary = True + else: + try: + self._fp.write("") # type: ignore[call-overload] + except TypeError: + self._fp_is_binary = True + else: + self._fp_is_binary = False + + if self._dumps is default_dumps: + self._dumps = json.JSONEncoder( + ensure_ascii=False, + separators=(",", ":") if self._compact else (", ", ": "), + sort_keys=self._sort_keys, + ).encode + + # Detect if str-to-bytes conversion (or vice versa) is needed for the + # combination of this file-like object and the used dumps() callable. + # This avoids checking this for each .write(). Note that this + # deliberately does not support ‘dynamic’ return types that depend on + # input and dump options, like simplejson on Python 2 in some cases. + sample_dumps_result = self._dumps({}) + if isinstance(sample_dumps_result, str) and self._fp_is_binary: + self._dumps_result_conversion = DumpsResultConversion.EncodeToBytes + elif isinstance(sample_dumps_result, bytes) and not self._fp_is_binary: + self._dumps_result_conversion = DumpsResultConversion.DecodeToString + + def write(self, obj: Any) -> int: + """ + Encode and write a single object. + + :param obj: the object to encode and write + :return: number of characters or bytes written + """ + if self._closed: + raise RuntimeError("writer is closed") + + line = self._dumps(obj) + + # This handles either str or bytes, but the type checker does not know + # that this code always passes the right type of arguments. + if self._dumps_result_conversion == DumpsResultConversion.EncodeToBytes: + line = line.encode() # type: ignore[union-attr] + elif self._dumps_result_conversion == DumpsResultConversion.DecodeToString: + line = line.decode() # type: ignore[union-attr] + + fp = self._fp + fp.write(line) # type: ignore[arg-type] + fp.write(b"\n" if self._fp_is_binary else "\n") # type: ignore[call-overload] + + if self._flush: + fp.flush() + + return len(line) + 1 # including newline + + def write_all(self, iterable: Iterable[Any]) -> int: + """ + Encode and write multiple objects. + + :param iterable: an iterable of objects + :return: number of characters or bytes written + """ + return sum(self.write(obj) for obj in iterable) + + def _repr_for_wrapped(self) -> str: + return repr_for_fp(self._fp) + + +@overload +def open( + file: Openable, + mode: Literal["r"] = ..., + *, + loads: Optional[LoadsCallable] = ..., +) -> Reader: + ... # pragma: no cover + + +@overload +def open( + file: Openable, + mode: Literal["w", "a", "x"], + *, + dumps: Optional[DumpsCallable] = ..., + compact: Optional[bool] = ..., + sort_keys: Optional[bool] = ..., + flush: Optional[bool] = ..., +) -> Writer: + ... # pragma: no cover + + +@overload +def open( + file: Openable, + mode: str = ..., + *, + loads: Optional[LoadsCallable] = ..., + dumps: Optional[DumpsCallable] = ..., + compact: Optional[bool] = ..., + sort_keys: Optional[bool] = ..., + flush: Optional[bool] = ..., +) -> Union[Reader, Writer]: + ... # pragma: no cover + + +def open( + file: Openable, + mode: str = "r", + *, + loads: Optional[LoadsCallable] = None, + dumps: Optional[DumpsCallable] = None, + compact: Optional[bool] = None, + sort_keys: Optional[bool] = None, + flush: Optional[bool] = None, +) -> Union[Reader, Writer]: + """ + Open a jsonlines file for reading or writing. + + This is a convenience function to open a file and wrap it in either a + :py:class:`Reader` or :py:class:`Writer` instance, depending on the + specified `mode`. + + Additional keyword arguments will be passed on to the reader and writer; + see their documentation for available options. + + The resulting reader or writer must be closed after use by the + caller, which will also close the opened file. This can be done by + calling ``.close()``, but the easiest way to ensure proper resource + finalisation is to use a ``with`` block (context manager), e.g. + + :: + + with jsonlines.open('out.jsonl', mode='w') as writer: + writer.write(...) + + :param file: name or ‘path-like object’ of the file to open + :param mode: whether to open the file for reading (``r``), + writing (``w``), appending (``a``), or exclusive creation (``x``). + """ + if mode not in {"r", "w", "a", "x"}: + raise ValueError("'mode' must be either 'r', 'w', 'a', or 'x'") + + cls = Reader if mode == "r" else Writer + encoding = "utf-8-sig" if mode == "r" else "utf-8" + fp = builtins.open(file, mode=mode + "t", encoding=encoding) + kwargs = dict( + loads=loads, + dumps=dumps, + compact=compact, + sort_keys=sort_keys, + flush=flush, + ) + kwargs = {key: value for key, value in kwargs.items() if value is not None} + instance: Union[Reader, Writer] = cls(fp, **kwargs) + instance._should_close_fp = True + return instance + + +def repr_for_fp(fp: typing.IO[Any]) -> str: + """ + Helper to make a useful repr() for a file-like object. + """ + name = getattr(fp, "name", None) + if name is not None: + return repr(name) + else: + return repr(fp) diff --git a/env-llmeval/lib/python3.10/site-packages/jsonlines/py.typed b/env-llmeval/lib/python3.10/site-packages/jsonlines/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/REQUESTED b/env-llmeval/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..bab98d675883cc7567a79df485cd7b4f015e376f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/__about__.py b/env-llmeval/lib/python3.10/site-packages/portalocker/__about__.py new file mode 100644 index 0000000000000000000000000000000000000000..e45c44327dfe0627c80af76a8f9922cb5c4330b5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/portalocker/__about__.py @@ -0,0 +1,6 @@ +__package_name__ = 'portalocker' +__author__ = 'Rick van Hattem' +__email__ = 'wolph@wol.ph' +__version__ = '2.8.2' +__description__ = '''Wraps the portalocker recipe for easy usage''' +__url__ = 'https://github.com/WoLpH/portalocker' diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/__init__.py b/env-llmeval/lib/python3.10/site-packages/portalocker/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9170e33e9c2d297d769b1d4a8d8766434df02bdc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/portalocker/__init__.py @@ -0,0 +1,76 @@ +from . import __about__, constants, exceptions, portalocker, utils + +try: # pragma: no cover + from .redis import RedisLock +except ImportError: # pragma: no cover + RedisLock = None # type: ignore + + +#: The package name on Pypi +__package_name__ = __about__.__package_name__ +#: Current author and maintainer, view the git history for the previous ones +__author__ = __about__.__author__ +#: Current author's email address +__email__ = __about__.__email__ +#: Version number +__version__ = '2.8.2' +#: Package description for Pypi +__description__ = __about__.__description__ +#: Package homepage +__url__ = __about__.__url__ + + +#: Exception thrown when the file is already locked by someone else +AlreadyLocked = exceptions.AlreadyLocked +#: Exception thrown if an error occurred during locking +LockException = exceptions.LockException + + +#: Lock a file. Note that this is an advisory lock on Linux/Unix systems +lock = portalocker.lock +#: Unlock a file +unlock = portalocker.unlock + +#: Place an exclusive lock. +#: Only one process may hold an exclusive lock for a given file at a given +#: time. +LOCK_EX: constants.LockFlags = constants.LockFlags.EXCLUSIVE + +#: Place a shared lock. +#: More than one process may hold a shared lock for a given file at a given +#: time. +LOCK_SH: constants.LockFlags = constants.LockFlags.SHARED + +#: Acquire the lock in a non-blocking fashion. +LOCK_NB: constants.LockFlags = constants.LockFlags.NON_BLOCKING + +#: Remove an existing lock held by this process. +LOCK_UN: constants.LockFlags = constants.LockFlags.UNBLOCK + +#: Locking flags enum +LockFlags = constants.LockFlags + +#: Locking utility class to automatically handle opening with timeouts and +#: context wrappers +Lock = utils.Lock +RLock = utils.RLock +BoundedSemaphore = utils.BoundedSemaphore +TemporaryFileLock = utils.TemporaryFileLock +open_atomic = utils.open_atomic + +__all__ = [ + 'lock', + 'unlock', + 'LOCK_EX', + 'LOCK_SH', + 'LOCK_NB', + 'LOCK_UN', + 'LockFlags', + 'LockException', + 'Lock', + 'RLock', + 'AlreadyLocked', + 'BoundedSemaphore', + 'open_atomic', + 'RedisLock', +] diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/__main__.py b/env-llmeval/lib/python3.10/site-packages/portalocker/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..658a3ec300f2ed7db2515168fe146d229b041cd9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/portalocker/__main__.py @@ -0,0 +1,98 @@ +import argparse +import logging +import os +import pathlib +import re + +base_path = pathlib.Path(__file__).parent.parent +src_path = base_path / 'portalocker' +dist_path = base_path / 'dist' +_default_output_path = base_path / 'dist' / 'portalocker.py' + +_RELATIVE_IMPORT_RE = re.compile(r'^from \. import (?P.+)$') +_USELESS_ASSIGNMENT_RE = re.compile(r'^(?P\w+) = \1\n$') + +_TEXT_TEMPLATE = """''' +{} +''' + +""" + +logger = logging.getLogger(__name__) + + +def main(argv=None): + parser = argparse.ArgumentParser() + + subparsers = parser.add_subparsers(required=True) + combine_parser = subparsers.add_parser( + 'combine', + help='Combine all Python files into a single unified `portalocker.py` ' + 'file for easy distribution', + ) + combine_parser.add_argument( + '--output-file', + '-o', + type=argparse.FileType('w'), + default=str(_default_output_path), + ) + + combine_parser.set_defaults(func=combine) + args = parser.parse_args(argv) + args.func(args) + + +def _read_file(path, seen_files): + if path in seen_files: + return + + names = set() + seen_files.add(path) + for line in path.open(): + if match := _RELATIVE_IMPORT_RE.match(line): + for name in match.group('names').split(','): + name = name.strip() + names.add(name) + yield from _read_file(src_path / f'{name}.py', seen_files) + else: + yield _clean_line(line, names) + + +def _clean_line(line, names): + # Replace `some_import.spam` with `spam` + if names: + joined_names = '|'.join(names) + line = re.sub(fr'\b({joined_names})\.', '', line) + + # Replace useless assignments (e.g. `spam = spam`) + return _USELESS_ASSIGNMENT_RE.sub('', line) + + +def combine(args): + output_file = args.output_file + pathlib.Path(output_file.name).parent.mkdir(parents=True, exist_ok=True) + + output_file.write( + _TEXT_TEMPLATE.format((base_path / 'README.rst').read_text()), + ) + output_file.write( + _TEXT_TEMPLATE.format((base_path / 'LICENSE').read_text()), + ) + + seen_files = set() + for line in _read_file(src_path / '__init__.py', seen_files): + output_file.write(line) + + output_file.flush() + output_file.close() + + logger.info(f'Wrote combined file to {output_file.name}') + # Run black and ruff if available. If not then just run the file. + os.system(f'black {output_file.name}') + os.system(f'ruff --fix {output_file.name}') + os.system(f'python3 {output_file.name}') + + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + main() diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__about__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__about__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46905331d1c990b57c813dd45015bce47775b3f6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__about__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d475f7f0cef5a85a184ed2df2b72ba12f3aef2a5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__main__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ca3ea077daa692b4f92b8af33649f9358ba379d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__main__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/constants.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7687f302757f7af3910f91bef276c9e68d8c4b7e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/constants.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/exceptions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cc825ae27665a5222becfa304dbf4b3bc3c0974 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/exceptions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/portalocker.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/portalocker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0818e3227fa0349b7cd5b27486a84d2f32eb3eb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/portalocker.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/redis.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/redis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e37948f4d2e85a02cfce56c4ed5252590340f74a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/redis.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..949c76925dd1660117d6a8be580b435dcd4c684c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/constants.py b/env-llmeval/lib/python3.10/site-packages/portalocker/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..72733c8546f95343b152600efa5771a294f80f4c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/portalocker/constants.py @@ -0,0 +1,58 @@ +''' +Locking constants + +Lock types: + +- `EXCLUSIVE` exclusive lock +- `SHARED` shared lock + +Lock flags: + +- `NON_BLOCKING` non-blocking + +Manually unlock, only needed internally + +- `UNBLOCK` unlock +''' +import enum +import os + +# The actual tests will execute the code anyhow so the following code can +# safely be ignored from the coverage tests +if os.name == 'nt': # pragma: no cover + import msvcrt + + #: exclusive lock + LOCK_EX = 0x1 + #: shared lock + LOCK_SH = 0x2 + #: non-blocking + LOCK_NB = 0x4 + #: unlock + LOCK_UN = msvcrt.LK_UNLCK # type: ignore + +elif os.name == 'posix': # pragma: no cover + import fcntl + + #: exclusive lock + LOCK_EX = fcntl.LOCK_EX + #: shared lock + LOCK_SH = fcntl.LOCK_SH + #: non-blocking + LOCK_NB = fcntl.LOCK_NB + #: unlock + LOCK_UN = fcntl.LOCK_UN + +else: # pragma: no cover + raise RuntimeError('PortaLocker only defined for nt and posix platforms') + + +class LockFlags(enum.IntFlag): + #: exclusive lock + EXCLUSIVE = LOCK_EX + #: shared lock + SHARED = LOCK_SH + #: non-blocking + NON_BLOCKING = LOCK_NB + #: unlock + UNBLOCK = LOCK_UN diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/exceptions.py b/env-llmeval/lib/python3.10/site-packages/portalocker/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..e871d13acbf6deb057c2e09d5e0fbd9404891f76 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/portalocker/exceptions.py @@ -0,0 +1,27 @@ +import typing + + +class BaseLockException(Exception): # noqa: N818 + # Error codes: + LOCK_FAILED = 1 + + def __init__( + self, + *args: typing.Any, + fh: typing.Union[typing.IO, None, int] = None, + **kwargs: typing.Any, + ) -> None: + self.fh = fh + Exception.__init__(self, *args) + + +class LockException(BaseLockException): + pass + + +class AlreadyLocked(LockException): + pass + + +class FileToLarge(LockException): + pass diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/portalocker.py b/env-llmeval/lib/python3.10/site-packages/portalocker/portalocker.py new file mode 100644 index 0000000000000000000000000000000000000000..90307b76ea9bfdfeb3d92a833e89d0e500b2e6c7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/portalocker/portalocker.py @@ -0,0 +1,117 @@ +import contextlib +import os +import typing + +from . import constants, exceptions + +# Alias for readability. Due to import recursion issues we cannot do: +# from .constants import LockFlags +LockFlags = constants.LockFlags + + +if os.name == 'nt': # pragma: no cover + import msvcrt + + import pywintypes + import win32con + import win32file + import winerror + + __overlapped = pywintypes.OVERLAPPED() + + def lock(file_: typing.Union[typing.IO, int], flags: LockFlags): + # Windows locking does not support locking through `fh.fileno()` so + # we cast it to make mypy and pyright happy + file_ = typing.cast(typing.IO, file_) + + mode = 0 + if flags & LockFlags.NON_BLOCKING: + mode |= win32con.LOCKFILE_FAIL_IMMEDIATELY + + if flags & LockFlags.EXCLUSIVE: + mode |= win32con.LOCKFILE_EXCLUSIVE_LOCK + + # Save the old position so we can go back to that position but + # still lock from the beginning of the file + savepos = file_.tell() + if savepos: + file_.seek(0) + + os_fh = msvcrt.get_osfhandle(file_.fileno()) # type: ignore + try: + win32file.LockFileEx(os_fh, mode, 0, -0x10000, __overlapped) + except pywintypes.error as exc_value: + # error: (33, 'LockFileEx', 'The process cannot access the file + # because another process has locked a portion of the file.') + if exc_value.winerror == winerror.ERROR_LOCK_VIOLATION: + raise exceptions.AlreadyLocked( + exceptions.LockException.LOCK_FAILED, + exc_value.strerror, + fh=file_, + ) from exc_value + else: + # Q: Are there exceptions/codes we should be dealing with + # here? + raise + finally: + if savepos: + file_.seek(savepos) + + def unlock(file_: typing.IO): + try: + savepos = file_.tell() + if savepos: + file_.seek(0) + + os_fh = msvcrt.get_osfhandle(file_.fileno()) # type: ignore + try: + win32file.UnlockFileEx( + os_fh, + 0, + -0x10000, + __overlapped, + ) + except pywintypes.error as exc: + if exc.winerror != winerror.ERROR_NOT_LOCKED: + # Q: Are there exceptions/codes we should be + # dealing with here? + raise + finally: + if savepos: + file_.seek(savepos) + except OSError as exc: + raise exceptions.LockException( + exceptions.LockException.LOCK_FAILED, + exc.strerror, + fh=file_, + ) from exc + +elif os.name == 'posix': # pragma: no cover + import fcntl + + def lock(file_: typing.Union[typing.IO, int], flags: LockFlags): + locking_exceptions = (IOError,) + with contextlib.suppress(NameError): + locking_exceptions += (BlockingIOError,) # type: ignore + # Locking with NON_BLOCKING without EXCLUSIVE or SHARED enabled results + # in an error + if (flags & LockFlags.NON_BLOCKING) and not flags & ( + LockFlags.SHARED | LockFlags.EXCLUSIVE + ): + raise RuntimeError( + 'When locking in non-blocking mode the SHARED ' + 'or EXCLUSIVE flag must be specified as well', + ) + + try: + fcntl.flock(file_, flags) + except locking_exceptions as exc_value: + # The exception code varies on different systems so we'll catch + # every IO error + raise exceptions.LockException(exc_value, fh=file_) from exc_value + + def unlock(file_: typing.IO): + fcntl.flock(file_.fileno(), LockFlags.UNBLOCK) + +else: # pragma: no cover + raise RuntimeError('PortaLocker only defined for nt and posix platforms') diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/py.typed b/env-llmeval/lib/python3.10/site-packages/portalocker/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/redis.py b/env-llmeval/lib/python3.10/site-packages/portalocker/redis.py new file mode 100644 index 0000000000000000000000000000000000000000..59ee5ff1717a0c0806df28cf065bef6b2276ae6e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/portalocker/redis.py @@ -0,0 +1,236 @@ +import _thread +import json +import logging +import random +import time +import typing + +from redis import client + +from . import exceptions, utils + +logger = logging.getLogger(__name__) + +DEFAULT_UNAVAILABLE_TIMEOUT = 1 +DEFAULT_THREAD_SLEEP_TIME = 0.1 + + +class PubSubWorkerThread(client.PubSubWorkerThread): # type: ignore + def run(self): + try: + super().run() + except Exception: # pragma: no cover + _thread.interrupt_main() + raise + + +class RedisLock(utils.LockBase): + ''' + An extremely reliable Redis lock based on pubsub with a keep-alive thread + + As opposed to most Redis locking systems based on key/value pairs, + this locking method is based on the pubsub system. The big advantage is + that if the connection gets killed due to network issues, crashing + processes or otherwise, it will still immediately unlock instead of + waiting for a lock timeout. + + To make sure both sides of the lock know about the connection state it is + recommended to set the `health_check_interval` when creating the redis + connection.. + + Args: + channel: the redis channel to use as locking key. + connection: an optional redis connection if you already have one + or if you need to specify the redis connection + timeout: timeout when trying to acquire a lock + check_interval: check interval while waiting + fail_when_locked: after the initial lock failed, return an error + or lock the file. This does not wait for the timeout. + thread_sleep_time: sleep time between fetching messages from redis to + prevent a busy/wait loop. In the case of lock conflicts this + increases the time it takes to resolve the conflict. This should + be smaller than the `check_interval` to be useful. + unavailable_timeout: If the conflicting lock is properly connected + this should never exceed twice your redis latency. Note that this + will increase the wait time possibly beyond your `timeout` and is + always executed if a conflict arises. + redis_kwargs: The redis connection arguments if no connection is + given. The `DEFAULT_REDIS_KWARGS` are used as default, if you want + to override these you need to explicitly specify a value (e.g. + `health_check_interval=0`) + + ''' + + redis_kwargs: typing.Dict[str, typing.Any] + thread: typing.Optional[PubSubWorkerThread] + channel: str + timeout: float + connection: typing.Optional[client.Redis] + pubsub: typing.Optional[client.PubSub] = None + close_connection: bool + + DEFAULT_REDIS_KWARGS: typing.ClassVar[typing.Dict[str, typing.Any]] = dict( + health_check_interval=10, + ) + + def __init__( + self, + channel: str, + connection: typing.Optional[client.Redis] = None, + timeout: typing.Optional[float] = None, + check_interval: typing.Optional[float] = None, + fail_when_locked: typing.Optional[bool] = False, + thread_sleep_time: float = DEFAULT_THREAD_SLEEP_TIME, + unavailable_timeout: float = DEFAULT_UNAVAILABLE_TIMEOUT, + redis_kwargs: typing.Optional[typing.Dict] = None, + ): + # We don't want to close connections given as an argument + self.close_connection = not connection + + self.thread = None + self.channel = channel + self.connection = connection + self.thread_sleep_time = thread_sleep_time + self.unavailable_timeout = unavailable_timeout + self.redis_kwargs = redis_kwargs or dict() + + for key, value in self.DEFAULT_REDIS_KWARGS.items(): + self.redis_kwargs.setdefault(key, value) + + super().__init__( + timeout=timeout, + check_interval=check_interval, + fail_when_locked=fail_when_locked, + ) + + def get_connection(self) -> client.Redis: + if not self.connection: + self.connection = client.Redis(**self.redis_kwargs) + + return self.connection + + def channel_handler(self, message): + if message.get('type') != 'message': # pragma: no cover + return + + try: + data = json.loads(message.get('data')) + except TypeError: # pragma: no cover + logger.debug('TypeError while parsing: %r', message) + return + + assert self.connection is not None + self.connection.publish(data['response_channel'], str(time.time())) + + @property + def client_name(self): + return f'{self.channel}-lock' + + def acquire( + self, + timeout: typing.Optional[float] = None, + check_interval: typing.Optional[float] = None, + fail_when_locked: typing.Optional[bool] = None, + ): + timeout = utils.coalesce(timeout, self.timeout, 0.0) + check_interval = utils.coalesce( + check_interval, + self.check_interval, + 0.0, + ) + fail_when_locked = utils.coalesce( + fail_when_locked, + self.fail_when_locked, + ) + + assert not self.pubsub, 'This lock is already active' + connection = self.get_connection() + + timeout_generator = self._timeout_generator(timeout, check_interval) + for _ in timeout_generator: # pragma: no branch + subscribers = connection.pubsub_numsub(self.channel)[0][1] + + if subscribers: + logger.debug( + 'Found %d lock subscribers for %s', + subscribers, + self.channel, + ) + + if self.check_or_kill_lock( + connection, + self.unavailable_timeout, + ): # pragma: no branch + continue + else: # pragma: no cover + subscribers = 0 + + # Note: this should not be changed to an elif because the if + # above can still end up here + if not subscribers: + connection.client_setname(self.client_name) + self.pubsub = connection.pubsub() + self.pubsub.subscribe(**{self.channel: self.channel_handler}) + self.thread = PubSubWorkerThread( + self.pubsub, + sleep_time=self.thread_sleep_time, + ) + self.thread.start() + + subscribers = connection.pubsub_numsub(self.channel)[0][1] + if subscribers == 1: # pragma: no branch + return self + else: # pragma: no cover + # Race condition, let's try again + self.release() + + if fail_when_locked: # pragma: no cover + raise exceptions.AlreadyLocked(exceptions) + + raise exceptions.AlreadyLocked(exceptions) + + def check_or_kill_lock(self, connection, timeout): + # Random channel name to get messages back from the lock + response_channel = f'{self.channel}-{random.random()}' + + pubsub = connection.pubsub() + pubsub.subscribe(response_channel) + connection.publish( + self.channel, + json.dumps( + dict( + response_channel=response_channel, + message='ping', + ), + ), + ) + + check_interval = min(self.thread_sleep_time, timeout / 10) + for _ in self._timeout_generator( + timeout, + check_interval, + ): # pragma: no branch + if pubsub.get_message(timeout=check_interval): + pubsub.close() + return True + + for client_ in connection.client_list('pubsub'): # pragma: no cover + if client_.get('name') == self.client_name: + logger.warning('Killing unavailable redis client: %r', client_) + connection.client_kill_filter(client_.get('id')) + return None + + def release(self): + if self.thread: # pragma: no branch + self.thread.stop() + self.thread.join() + self.thread = None + time.sleep(0.01) + + if self.pubsub: # pragma: no branch + self.pubsub.unsubscribe(self.channel) + self.pubsub.close() + self.pubsub = None + + def __del__(self): + self.release() diff --git a/env-llmeval/lib/python3.10/site-packages/portalocker/utils.py b/env-llmeval/lib/python3.10/site-packages/portalocker/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3b5682e05518c28b9f207bc069959f17726df1d5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/portalocker/utils.py @@ -0,0 +1,563 @@ +import abc +import atexit +import contextlib +import logging +import os +import pathlib +import random +import tempfile +import time +import typing +import warnings + +from . import constants, exceptions, portalocker + +logger = logging.getLogger(__name__) + +DEFAULT_TIMEOUT = 5 +DEFAULT_CHECK_INTERVAL = 0.25 +DEFAULT_FAIL_WHEN_LOCKED = False +LOCK_METHOD = constants.LockFlags.EXCLUSIVE | constants.LockFlags.NON_BLOCKING + +__all__ = [ + 'Lock', + 'open_atomic', +] + +Filename = typing.Union[str, pathlib.Path] + + +def coalesce(*args: typing.Any, test_value: typing.Any = None) -> typing.Any: + '''Simple coalescing function that returns the first value that is not + equal to the `test_value`. Or `None` if no value is valid. Usually this + means that the last given value is the default value. + + Note that the `test_value` is compared using an identity check + (i.e. `value is not test_value`) so changing the `test_value` won't work + for all values. + + >>> coalesce(None, 1) + 1 + >>> coalesce() + + >>> coalesce(0, False, True) + 0 + >>> coalesce(0, False, True, test_value=0) + False + + # This won't work because of the `is not test_value` type testing: + >>> coalesce([], dict(spam='eggs'), test_value=[]) + [] + ''' + return next((arg for arg in args if arg is not test_value), None) + + +@contextlib.contextmanager +def open_atomic( + filename: Filename, + binary: bool = True, +) -> typing.Iterator[typing.IO]: + '''Open a file for atomic writing. Instead of locking this method allows + you to write the entire file and move it to the actual location. Note that + this makes the assumption that a rename is atomic on your platform which + is generally the case but not a guarantee. + + http://docs.python.org/library/os.html#os.rename + + >>> filename = 'test_file.txt' + >>> if os.path.exists(filename): + ... os.remove(filename) + + >>> with open_atomic(filename) as fh: + ... written = fh.write(b'test') + >>> assert os.path.exists(filename) + >>> os.remove(filename) + + >>> import pathlib + >>> path_filename = pathlib.Path('test_file.txt') + + >>> with open_atomic(path_filename) as fh: + ... written = fh.write(b'test') + >>> assert path_filename.exists() + >>> path_filename.unlink() + ''' + # `pathlib.Path` cast in case `path` is a `str` + path: pathlib.Path = pathlib.Path(filename) + + assert not path.exists(), '%r exists' % path + + # Create the parent directory if it doesn't exist + path.parent.mkdir(parents=True, exist_ok=True) + + temp_fh = tempfile.NamedTemporaryFile( + mode=binary and 'wb' or 'w', + dir=str(path.parent), + delete=False, + ) + yield temp_fh + temp_fh.flush() + os.fsync(temp_fh.fileno()) + temp_fh.close() + try: + os.rename(temp_fh.name, path) + finally: + with contextlib.suppress(Exception): + os.remove(temp_fh.name) + + +class LockBase(abc.ABC): # pragma: no cover + #: timeout when trying to acquire a lock + timeout: float + #: check interval while waiting for `timeout` + check_interval: float + #: skip the timeout and immediately fail if the initial lock fails + fail_when_locked: bool + + def __init__( + self, + timeout: typing.Optional[float] = None, + check_interval: typing.Optional[float] = None, + fail_when_locked: typing.Optional[bool] = None, + ): + self.timeout = coalesce(timeout, DEFAULT_TIMEOUT) + self.check_interval = coalesce(check_interval, DEFAULT_CHECK_INTERVAL) + self.fail_when_locked = coalesce( + fail_when_locked, + DEFAULT_FAIL_WHEN_LOCKED, + ) + + @abc.abstractmethod + def acquire( + self, + timeout: typing.Optional[float] = None, + check_interval: typing.Optional[float] = None, + fail_when_locked: typing.Optional[bool] = None, + ): + return NotImplemented + + def _timeout_generator( + self, + timeout: typing.Optional[float], + check_interval: typing.Optional[float], + ) -> typing.Iterator[int]: + f_timeout = coalesce(timeout, self.timeout, 0.0) + f_check_interval = coalesce(check_interval, self.check_interval, 0.0) + + yield 0 + i = 0 + + start_time = time.perf_counter() + while start_time + f_timeout > time.perf_counter(): + i += 1 + yield i + + # Take low lock checks into account to stay within the interval + since_start_time = time.perf_counter() - start_time + time.sleep(max(0.001, (i * f_check_interval) - since_start_time)) + + @abc.abstractmethod + def release(self): + return NotImplemented + + def __enter__(self): + return self.acquire() + + def __exit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]], + exc_value: typing.Optional[BaseException], + traceback: typing.Any, # Should be typing.TracebackType + ) -> typing.Optional[bool]: + self.release() + return None + + def __delete__(self, instance): + instance.release() + + +class Lock(LockBase): + '''Lock manager with built-in timeout + + Args: + filename: filename + mode: the open mode, 'a' or 'ab' should be used for writing. When mode + contains `w` the file will be truncated to 0 bytes. + timeout: timeout when trying to acquire a lock + check_interval: check interval while waiting + fail_when_locked: after the initial lock failed, return an error + or lock the file. This does not wait for the timeout. + **file_open_kwargs: The kwargs for the `open(...)` call + + fail_when_locked is useful when multiple threads/processes can race + when creating a file. If set to true than the system will wait till + the lock was acquired and then return an AlreadyLocked exception. + + Note that the file is opened first and locked later. So using 'w' as + mode will result in truncate _BEFORE_ the lock is checked. + ''' + + def __init__( + self, + filename: Filename, + mode: str = 'a', + timeout: typing.Optional[float] = None, + check_interval: float = DEFAULT_CHECK_INTERVAL, + fail_when_locked: bool = DEFAULT_FAIL_WHEN_LOCKED, + flags: constants.LockFlags = LOCK_METHOD, + **file_open_kwargs, + ): + if 'w' in mode: + truncate = True + mode = mode.replace('w', 'a') + else: + truncate = False + + if timeout is None: + timeout = DEFAULT_TIMEOUT + elif not (flags & constants.LockFlags.NON_BLOCKING): + warnings.warn( + 'timeout has no effect in blocking mode', + stacklevel=1, + ) + + self.fh: typing.Optional[typing.IO] = None + self.filename: str = str(filename) + self.mode: str = mode + self.truncate: bool = truncate + self.timeout: float = timeout + self.check_interval: float = check_interval + self.fail_when_locked: bool = fail_when_locked + self.flags: constants.LockFlags = flags + self.file_open_kwargs = file_open_kwargs + + def acquire( + self, + timeout: typing.Optional[float] = None, + check_interval: typing.Optional[float] = None, + fail_when_locked: typing.Optional[bool] = None, + ) -> typing.IO: + '''Acquire the locked filehandle''' + + fail_when_locked = coalesce(fail_when_locked, self.fail_when_locked) + + if ( + not (self.flags & constants.LockFlags.NON_BLOCKING) + and timeout is not None + ): + warnings.warn( + 'timeout has no effect in blocking mode', + stacklevel=1, + ) + + # If we already have a filehandle, return it + fh: typing.Optional[typing.IO] = self.fh + if fh: + return fh + + # Get a new filehandler + fh = self._get_fh() + + def try_close(): # pragma: no cover + # Silently try to close the handle if possible, ignore all issues + if fh is not None: + with contextlib.suppress(Exception): + fh.close() + + exception = None + # Try till the timeout has passed + for _ in self._timeout_generator(timeout, check_interval): + exception = None + try: + # Try to lock + fh = self._get_lock(fh) + break + except exceptions.LockException as exc: + # Python will automatically remove the variable from memory + # unless you save it in a different location + exception = exc + + # We already tried to the get the lock + # If fail_when_locked is True, stop trying + if fail_when_locked: + try_close() + raise exceptions.AlreadyLocked(exception) from exc + + # Wait a bit + + if exception: + try_close() + # We got a timeout... reraising + raise exceptions.LockException(exception) + + # Prepare the filehandle (truncate if needed) + fh = self._prepare_fh(fh) + + self.fh = fh + return fh + + def release(self): + '''Releases the currently locked file handle''' + if self.fh: + portalocker.unlock(self.fh) + self.fh.close() + self.fh = None + + def _get_fh(self) -> typing.IO: + '''Get a new filehandle''' + return open( # noqa: SIM115 + self.filename, + self.mode, + **self.file_open_kwargs, + ) + + def _get_lock(self, fh: typing.IO) -> typing.IO: + ''' + Try to lock the given filehandle + + returns LockException if it fails''' + portalocker.lock(fh, self.flags) + return fh + + def _prepare_fh(self, fh: typing.IO) -> typing.IO: + ''' + Prepare the filehandle for usage + + If truncate is a number, the file will be truncated to that amount of + bytes + ''' + if self.truncate: + fh.seek(0) + fh.truncate(0) + + return fh + + +class RLock(Lock): + ''' + A reentrant lock, functions in a similar way to threading.RLock in that it + can be acquired multiple times. When the corresponding number of release() + calls are made the lock will finally release the underlying file lock. + ''' + + def __init__( + self, + filename, + mode='a', + timeout=DEFAULT_TIMEOUT, + check_interval=DEFAULT_CHECK_INTERVAL, + fail_when_locked=False, + flags=LOCK_METHOD, + ): + super().__init__( + filename, + mode, + timeout, + check_interval, + fail_when_locked, + flags, + ) + self._acquire_count = 0 + + def acquire( + self, + timeout: typing.Optional[float] = None, + check_interval: typing.Optional[float] = None, + fail_when_locked: typing.Optional[bool] = None, + ) -> typing.IO: + if self._acquire_count >= 1: + fh = self.fh + else: + fh = super().acquire(timeout, check_interval, fail_when_locked) + self._acquire_count += 1 + assert fh + return fh + + def release(self): + if self._acquire_count == 0: + raise exceptions.LockException( + 'Cannot release more times than acquired', + ) + + if self._acquire_count == 1: + super().release() + self._acquire_count -= 1 + + +class TemporaryFileLock(Lock): + def __init__( + self, + filename='.lock', + timeout=DEFAULT_TIMEOUT, + check_interval=DEFAULT_CHECK_INTERVAL, + fail_when_locked=True, + flags=LOCK_METHOD, + ): + Lock.__init__( + self, + filename=filename, + mode='w', + timeout=timeout, + check_interval=check_interval, + fail_when_locked=fail_when_locked, + flags=flags, + ) + atexit.register(self.release) + + def release(self): + Lock.release(self) + if os.path.isfile(self.filename): # pragma: no branch + os.unlink(self.filename) + + +class BoundedSemaphore(LockBase): + ''' + Bounded semaphore to prevent too many parallel processes from running + + This method is deprecated because multiple processes that are completely + unrelated could end up using the same semaphore. To prevent this, + use `NamedBoundedSemaphore` instead. The + `NamedBoundedSemaphore` is a drop-in replacement for this class. + + >>> semaphore = BoundedSemaphore(2, directory='') + >>> str(semaphore.get_filenames()[0]) + 'bounded_semaphore.00.lock' + >>> str(sorted(semaphore.get_random_filenames())[1]) + 'bounded_semaphore.01.lock' + ''' + + lock: typing.Optional[Lock] + + def __init__( + self, + maximum: int, + name: str = 'bounded_semaphore', + filename_pattern: str = '{name}.{number:02d}.lock', + directory: str = tempfile.gettempdir(), + timeout: typing.Optional[float] = DEFAULT_TIMEOUT, + check_interval: typing.Optional[float] = DEFAULT_CHECK_INTERVAL, + fail_when_locked: typing.Optional[bool] = True, + ): + self.maximum = maximum + self.name = name + self.filename_pattern = filename_pattern + self.directory = directory + self.lock: typing.Optional[Lock] = None + super().__init__( + timeout=timeout, + check_interval=check_interval, + fail_when_locked=fail_when_locked, + ) + + if not name or name == 'bounded_semaphore': + warnings.warn( + '`BoundedSemaphore` without an explicit `name` ' + 'argument is deprecated, use NamedBoundedSemaphore', + DeprecationWarning, + stacklevel=1, + ) + + def get_filenames(self) -> typing.Sequence[pathlib.Path]: + return [self.get_filename(n) for n in range(self.maximum)] + + def get_random_filenames(self) -> typing.Sequence[pathlib.Path]: + filenames = list(self.get_filenames()) + random.shuffle(filenames) + return filenames + + def get_filename(self, number) -> pathlib.Path: + return pathlib.Path(self.directory) / self.filename_pattern.format( + name=self.name, + number=number, + ) + + def acquire( + self, + timeout: typing.Optional[float] = None, + check_interval: typing.Optional[float] = None, + fail_when_locked: typing.Optional[bool] = None, + ) -> typing.Optional[Lock]: + assert not self.lock, 'Already locked' + + filenames = self.get_filenames() + + for n in self._timeout_generator(timeout, check_interval): # pragma: + logger.debug('trying lock (attempt %d) %r', n, filenames) + # no branch + if self.try_lock(filenames): # pragma: no branch + return self.lock # pragma: no cover + + if fail_when_locked := coalesce( + fail_when_locked, + self.fail_when_locked, + ): + raise exceptions.AlreadyLocked() + + return None + + def try_lock(self, filenames: typing.Sequence[Filename]) -> bool: + filename: Filename + for filename in filenames: + logger.debug('trying lock for %r', filename) + self.lock = Lock(filename, fail_when_locked=True) + try: + self.lock.acquire() + except exceptions.AlreadyLocked: + self.lock = None + else: + logger.debug('locked %r', filename) + return True + + return False + + def release(self): # pragma: no cover + if self.lock is not None: + self.lock.release() + self.lock = None + + +class NamedBoundedSemaphore(BoundedSemaphore): + ''' + Bounded semaphore to prevent too many parallel processes from running + + It's also possible to specify a timeout when acquiring the lock to wait + for a resource to become available. This is very similar to + `threading.BoundedSemaphore` but works across multiple processes and across + multiple operating systems. + + Because this works across multiple processes it's important to give the + semaphore a name. This name is used to create the lock files. If you + don't specify a name, a random name will be generated. This means that + you can't use the same semaphore in multiple processes unless you pass the + semaphore object to the other processes. + + >>> semaphore = NamedBoundedSemaphore(2, name='test') + >>> str(semaphore.get_filenames()[0]) + '...test.00.lock' + + >>> semaphore = NamedBoundedSemaphore(2) + >>> 'bounded_semaphore' in str(semaphore.get_filenames()[0]) + True + + ''' + + def __init__( + self, + maximum: int, + name: typing.Optional[str] = None, + filename_pattern: str = '{name}.{number:02d}.lock', + directory: str = tempfile.gettempdir(), + timeout: typing.Optional[float] = DEFAULT_TIMEOUT, + check_interval: typing.Optional[float] = DEFAULT_CHECK_INTERVAL, + fail_when_locked: typing.Optional[bool] = True, + ): + if name is None: + name = 'bounded_semaphore.%d' % random.randint(0, 1000000) + super().__init__( + maximum, + name, + filename_pattern, + directory, + timeout, + check_interval, + fail_when_locked, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0c59782cd1b003eaa28cb15057d43a63de7b79b7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.pyx new file mode 100644 index 0000000000000000000000000000000000000000..1c9b2f75c39f1475163e216c78e87d9f81685cb6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.pyx @@ -0,0 +1,529 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# --------------------------------------------------------------------- +# Low-level Acero bindings + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_acero cimport * +from pyarrow.lib cimport (Table, pyarrow_unwrap_table, pyarrow_wrap_table, + RecordBatchReader) +from pyarrow.lib import frombytes, tobytes +from pyarrow._compute cimport ( + Expression, FunctionOptions, _ensure_field_ref, _true, + unwrap_null_placement, unwrap_sort_order +) + + +cdef class ExecNodeOptions(_Weakrefable): + """ + Base class for the node options. + + Use one of the subclasses to construct an options object. + """ + __slots__ = () # avoid mistakingly creating attributes + + cdef void init(self, const shared_ptr[CExecNodeOptions]& sp): + self.wrapped = sp + + cdef inline shared_ptr[CExecNodeOptions] unwrap(self) nogil: + return self.wrapped + + +cdef class _TableSourceNodeOptions(ExecNodeOptions): + + def _set_options(self, Table table): + cdef: + shared_ptr[CTable] c_table + + c_table = pyarrow_unwrap_table(table) + self.wrapped.reset( + new CTableSourceNodeOptions(c_table) + ) + + +class TableSourceNodeOptions(_TableSourceNodeOptions): + """ + A Source node which accepts a table. + + This is the option class for the "table_source" node factory. + + Parameters + ---------- + table : pyarrow.Table + The table which acts as the data source. + """ + + def __init__(self, Table table): + self._set_options(table) + + +cdef class _FilterNodeOptions(ExecNodeOptions): + + def _set_options(self, Expression filter_expression not None): + self.wrapped.reset( + new CFilterNodeOptions(filter_expression.unwrap()) + ) + + +class FilterNodeOptions(_FilterNodeOptions): + """ + Make a node which excludes some rows from batches passed through it. + + This is the option class for the "filter" node factory. + + The "filter" operation provides an option to define data filtering + criteria. It selects rows where the given expression evaluates to true. + Filters can be written using pyarrow.compute.Expression, and the + expression must have a return type of boolean. + + Parameters + ---------- + filter_expression : pyarrow.compute.Expression + """ + + def __init__(self, Expression filter_expression): + self._set_options(filter_expression) + + +cdef class _ProjectNodeOptions(ExecNodeOptions): + + def _set_options(self, expressions, names=None): + cdef: + Expression expr + vector[CExpression] c_expressions + vector[c_string] c_names + + for expr in expressions: + c_expressions.push_back(expr.unwrap()) + + if names is not None: + if len(names) != len(expressions): + raise ValueError( + "The number of names should be equal to the number of expressions" + ) + + for name in names: + c_names.push_back(tobytes(name)) + + self.wrapped.reset( + new CProjectNodeOptions(c_expressions, c_names) + ) + else: + self.wrapped.reset( + new CProjectNodeOptions(c_expressions) + ) + + +class ProjectNodeOptions(_ProjectNodeOptions): + """ + Make a node which executes expressions on input batches, + producing batches of the same length with new columns. + + This is the option class for the "project" node factory. + + The "project" operation rearranges, deletes, transforms, and + creates columns. Each output column is computed by evaluating + an expression against the source record batch. These must be + scalar expressions (expressions consisting of scalar literals, + field references and scalar functions, i.e. elementwise functions + that return one value for each input row independent of the value + of all other rows). + + Parameters + ---------- + expressions : list of pyarrow.compute.Expression + List of expressions to evaluate against the source batch. This must + be scalar expressions. + names : list of str, optional + List of names for each of the output columns (same length as + `expressions`). If `names` is not provided, the string + representations of exprs will be used. + """ + + def __init__(self, expressions, names=None): + self._set_options(expressions, names) + + +cdef class _AggregateNodeOptions(ExecNodeOptions): + + def _set_options(self, aggregates, keys=None): + cdef: + CAggregate c_aggr + vector[CAggregate] c_aggregations + vector[CFieldRef] c_keys + + for arg_names, func_name, opts, name in aggregates: + c_aggr.function = tobytes(func_name) + if opts is not None: + c_aggr.options = (opts).wrapped + else: + c_aggr.options = nullptr + if not isinstance(arg_names, (list, tuple)): + arg_names = [arg_names] + for arg in arg_names: + c_aggr.target.push_back(_ensure_field_ref(arg)) + c_aggr.name = tobytes(name) + + c_aggregations.push_back(move(c_aggr)) + + if keys is None: + keys = [] + for name in keys: + c_keys.push_back(_ensure_field_ref(name)) + + self.wrapped.reset( + new CAggregateNodeOptions(c_aggregations, c_keys) + ) + + +class AggregateNodeOptions(_AggregateNodeOptions): + """ + Make a node which aggregates input batches, optionally grouped by keys. + + This is the option class for the "aggregate" node factory. + + Acero supports two types of aggregates: "scalar" aggregates, + and "hash" aggregates. Scalar aggregates reduce an array or scalar + input to a single scalar output (e.g. computing the mean of a column). + Hash aggregates act like GROUP BY in SQL and first partition data + based on one or more key columns, then reduce the data in each partition. + The aggregate node supports both types of computation, and can compute + any number of aggregations at once. + + Parameters + ---------- + aggregates : list of tuples + Aggregations which will be applied to the targeted fields. + Specified as a list of tuples, where each tuple is one aggregation + specification and consists of: aggregation target column(s) followed + by function name, aggregation function options object and the + output field name. + The target column(s) specification can be a single field reference, + an empty list or a list of fields unary, nullary and n-ary aggregation + functions respectively. Each field reference can be a string + column name or expression. + keys : list of field references, optional + Keys by which aggregations will be grouped. Each key can reference + a field using a string name or expression. + """ + + def __init__(self, aggregates, keys=None): + self._set_options(aggregates, keys) + + +cdef class _OrderByNodeOptions(ExecNodeOptions): + + def _set_options(self, sort_keys, null_placement): + cdef: + vector[CSortKey] c_sort_keys + + for name, order in sort_keys: + c_sort_keys.push_back( + CSortKey(_ensure_field_ref(name), unwrap_sort_order(order)) + ) + + self.wrapped.reset( + new COrderByNodeOptions( + COrdering(c_sort_keys, unwrap_null_placement(null_placement)) + ) + ) + + +class OrderByNodeOptions(_OrderByNodeOptions): + """ + Make a node which applies a new ordering to the data. + + Currently this node works by accumulating all data, sorting, and then + emitting the new data with an updated batch index. + Larger-than-memory sort is not currently supported. + + This is the option class for the "order_by" node factory. + + Parameters + ---------- + sort_keys : sequence of (name, order) tuples + Names of field/column keys to sort the input on, + along with the order each field/column is sorted in. + Accepted values for `order` are "ascending", "descending". + Each field reference can be a string column name or expression. + null_placement : str, default "at_end" + Where nulls in input should be sorted, only applying to + columns/fields mentioned in `sort_keys`. + Accepted values are "at_start", "at_end". + """ + + def __init__(self, sort_keys=(), *, null_placement="at_end"): + self._set_options(sort_keys, null_placement) + + +cdef class _HashJoinNodeOptions(ExecNodeOptions): + + def _set_options( + self, join_type, left_keys, right_keys, left_output=None, right_output=None, + output_suffix_for_left="", output_suffix_for_right="", + ): + cdef: + CJoinType c_join_type + vector[CFieldRef] c_left_keys + vector[CFieldRef] c_right_keys + vector[CFieldRef] c_left_output + vector[CFieldRef] c_right_output + + # join type + if join_type == "left semi": + c_join_type = CJoinType_LEFT_SEMI + elif join_type == "right semi": + c_join_type = CJoinType_RIGHT_SEMI + elif join_type == "left anti": + c_join_type = CJoinType_LEFT_ANTI + elif join_type == "right anti": + c_join_type = CJoinType_RIGHT_ANTI + elif join_type == "inner": + c_join_type = CJoinType_INNER + elif join_type == "left outer": + c_join_type = CJoinType_LEFT_OUTER + elif join_type == "right outer": + c_join_type = CJoinType_RIGHT_OUTER + elif join_type == "full outer": + c_join_type = CJoinType_FULL_OUTER + else: + raise ValueError("Unsupported join type") + + # left/right keys + if not isinstance(left_keys, (list, tuple)): + left_keys = [left_keys] + for key in left_keys: + c_left_keys.push_back(_ensure_field_ref(key)) + if not isinstance(right_keys, (list, tuple)): + right_keys = [right_keys] + for key in right_keys: + c_right_keys.push_back(_ensure_field_ref(key)) + + # left/right output fields + if left_output is not None and right_output is not None: + for colname in left_output: + c_left_output.push_back(_ensure_field_ref(colname)) + for colname in right_output: + c_right_output.push_back(_ensure_field_ref(colname)) + + self.wrapped.reset( + new CHashJoinNodeOptions( + c_join_type, c_left_keys, c_right_keys, + c_left_output, c_right_output, + _true, + tobytes(output_suffix_for_left), + tobytes(output_suffix_for_right) + ) + ) + else: + self.wrapped.reset( + new CHashJoinNodeOptions( + c_join_type, c_left_keys, c_right_keys, + _true, + tobytes(output_suffix_for_left), + tobytes(output_suffix_for_right) + ) + ) + + +class HashJoinNodeOptions(_HashJoinNodeOptions): + """ + Make a node which implements join operation using hash join strategy. + + This is the option class for the "hashjoin" node factory. + + Parameters + ---------- + join_type : str + Type of join. One of "left semi", "right semi", "left anti", + "right anti", "inner", "left outer", "right outer", "full outer". + left_keys : str, Expression or list + Key fields from left input. Each key can be a string column name + or a field expression, or a list of such field references. + right_keys : str, Expression or list + Key fields from right input. See `left_keys` for details. + left_output : list, optional + List of output fields passed from left input. If left and right + output fields are not specified, all valid fields from both left and + right input will be output. Each field can be a string column name + or a field expression. + right_output : list, optional + List of output fields passed from right input. If left and right + output fields are not specified, all valid fields from both left and + right input will be output. Each field can be a string column name + or a field expression. + output_suffix_for_left : str + Suffix added to names of output fields coming from left input + (used to distinguish, if necessary, between fields of the same + name in left and right input and can be left empty if there are + no name collisions). + output_suffix_for_right : str + Suffix added to names of output fields coming from right input, + see `output_suffix_for_left` for details. + """ + + def __init__( + self, join_type, left_keys, right_keys, left_output=None, right_output=None, + output_suffix_for_left="", output_suffix_for_right="" + ): + self._set_options( + join_type, left_keys, right_keys, left_output, right_output, + output_suffix_for_left, output_suffix_for_right + ) + + +cdef class Declaration(_Weakrefable): + """ + Helper class for declaring the nodes of an ExecPlan. + + A Declaration represents an unconstructed ExecNode, and potentially + more since its inputs may also be Declarations or when constructed + with ``from_sequence``. + + The possible ExecNodes to use are registered with a name, + the "factory name", and need to be specified using this name, together + with its corresponding ExecNodeOptions subclass. + + Parameters + ---------- + factory_name : str + The ExecNode factory name, such as "table_source", "filter", + "project" etc. See the ExecNodeOptions subclasses for the exact + factory names to use. + options : ExecNodeOptions + Corresponding ExecNodeOptions subclass (matching the factory name). + inputs : list of Declaration, optional + Input nodes for this declaration. Optional if the node is a source + node, or when the declaration gets combined later with + ``from_sequence``. + + Returns + ------- + Declaration + """ + cdef void init(self, const CDeclaration& c_decl): + self.decl = c_decl + + @staticmethod + cdef wrap(const CDeclaration& c_decl): + cdef Declaration self = Declaration.__new__(Declaration) + self.init(c_decl) + return self + + cdef inline CDeclaration unwrap(self) nogil: + return self.decl + + def __init__(self, factory_name, ExecNodeOptions options, inputs=None): + cdef: + c_string c_factory_name + CDeclaration c_decl + vector[CDeclaration.Input] c_inputs + + c_factory_name = tobytes(factory_name) + + if inputs is not None: + for ipt in inputs: + c_inputs.push_back( + CDeclaration.Input((ipt).unwrap()) + ) + + c_decl = CDeclaration(c_factory_name, c_inputs, options.unwrap()) + self.init(c_decl) + + @staticmethod + def from_sequence(decls): + """ + Convenience factory for the common case of a simple sequence of nodes. + + Each of the declarations will be appended to the inputs of the + subsequent declaration, and the final modified declaration will + be returned. + + Parameters + ---------- + decls : list of Declaration + + Returns + ------- + Declaration + """ + cdef: + vector[CDeclaration] c_decls + CDeclaration c_decl + + for decl in decls: + c_decls.push_back(( decl).unwrap()) + + c_decl = CDeclaration.Sequence(c_decls) + return Declaration.wrap(c_decl) + + def __str__(self): + return frombytes(GetResultValue(DeclarationToString(self.decl))) + + def __repr__(self): + return "\n{0}".format(str(self)) + + def to_table(self, bint use_threads=True): + """ + Run the declaration and collect the results into a table. + + This method will implicitly add a sink node to the declaration + to collect results into a table. It will then create an ExecPlan + from the declaration, start the exec plan, block until the plan + has finished, and return the created table. + + Parameters + ---------- + use_threads : bool, default True + If set to False, then all CPU work will be done on the calling + thread. I/O tasks will still happen on the I/O executor + and may be multi-threaded (but should not use significant CPU + resources). + + Returns + ------- + pyarrow.Table + """ + cdef: + shared_ptr[CTable] c_table + + with nogil: + c_table = GetResultValue(DeclarationToTable(self.unwrap(), use_threads)) + return pyarrow_wrap_table(c_table) + + def to_reader(self, bint use_threads=True): + """Run the declaration and return results as a RecordBatchReader. + + For details about the parameters, see `to_table`. + + Returns + ------- + pyarrow.RecordBatchReader + """ + cdef: + RecordBatchReader reader + reader = RecordBatchReader.__new__(RecordBatchReader) + reader.reader.reset( + GetResultValue(DeclarationToReader(self.unwrap(), use_threads)).release() + ) + return reader diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.pxd b/env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.pxd new file mode 100644 index 0000000000000000000000000000000000000000..29b37da3ac4ef36106b10a09d7583bdba8d1a260 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.pxd @@ -0,0 +1,70 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.lib cimport * +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * + +cdef class UdfContext(_Weakrefable): + cdef: + CUdfContext c_context + + cdef void init(self, const CUdfContext& c_context) + + +cdef class FunctionOptions(_Weakrefable): + cdef: + shared_ptr[CFunctionOptions] wrapped + + cdef const CFunctionOptions* get_options(self) except NULL + cdef void init(self, const shared_ptr[CFunctionOptions]& sp) + + cdef inline shared_ptr[CFunctionOptions] unwrap(self) + + +cdef class _SortOptions(FunctionOptions): + pass + + +cdef CExpression _bind(Expression filter, Schema schema) except * + + +cdef class Expression(_Weakrefable): + + cdef: + CExpression expr + + cdef void init(self, const CExpression& sp) + + @staticmethod + cdef wrap(const CExpression& sp) + + cdef inline CExpression unwrap(self) + + @staticmethod + cdef Expression _expr_or_scalar(object expr) + + +cdef CExpression _true + +cdef CFieldRef _ensure_field_ref(value) except * + +cdef CSortOrder unwrap_sort_order(order) except * + +cdef CNullPlacement unwrap_null_placement(null_placement) except * diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_compute_docstrings.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/_compute_docstrings.py new file mode 100644 index 0000000000000000000000000000000000000000..150dbdb1175803e3c40a1bd2469a4df34ea57e4e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_compute_docstrings.py @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Custom documentation additions for compute functions. +""" + +function_doc_additions = {} + +function_doc_additions["filter"] = """ + Examples + -------- + >>> import pyarrow as pa + >>> arr = pa.array(["a", "b", "c", None, "e"]) + >>> mask = pa.array([True, False, None, False, True]) + >>> arr.filter(mask) + + [ + "a", + "e" + ] + >>> arr.filter(mask, null_selection_behavior='emit_null') + + [ + "a", + null, + "e" + ] + """ + +function_doc_additions["mode"] = """ + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.compute as pc + >>> arr = pa.array([1, 1, 2, 2, 3, 2, 2, 2]) + >>> modes = pc.mode(arr, 2) + >>> modes[0] + + >>> modes[1] + + """ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b9e681b11dec0ecc7e6e1d74e4b819bca075a95f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.pyx new file mode 100644 index 0000000000000000000000000000000000000000..508488c0c3b3c3bcd2d2157f57f625b1e5b92c2e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.pyx @@ -0,0 +1,1542 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from cython.operator cimport dereference as deref + +from collections import namedtuple +from collections.abc import Mapping + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_python cimport * +from pyarrow.lib cimport (check_status, Field, MemoryPool, Schema, + RecordBatchReader, ensure_type, + maybe_unbox_memory_pool, get_input_stream, + get_writer, native_transcoding_input_stream, + pyarrow_unwrap_batch, pyarrow_unwrap_schema, + pyarrow_unwrap_table, pyarrow_wrap_schema, + pyarrow_wrap_table, pyarrow_wrap_data_type, + pyarrow_unwrap_data_type, Table, RecordBatch, + StopToken, _CRecordBatchWriter) +from pyarrow.lib import frombytes, tobytes, SignalStopHandler + + +cdef unsigned char _single_char(s) except 0: + val = ord(s) + if val == 0 or val > 127: + raise ValueError("Expecting an ASCII character") + return val + + +_InvalidRow = namedtuple( + "_InvalidRow", ("expected_columns", "actual_columns", "number", "text"), + module=__name__) + + +class InvalidRow(_InvalidRow): + """ + Description of an invalid row in a CSV file. + + Parameters + ---------- + expected_columns : int + The expected number of columns in the row. + actual_columns : int + The actual number of columns in the row. + number : int or None + The physical row number if known, otherwise None. + text : str + The contents of the row. + """ + __slots__ = () + + +cdef CInvalidRowResult _handle_invalid_row( + handler, const CCSVInvalidRow& c_row) except CInvalidRowResult_Error: + # A negative row number means undetermined (because of parallel reading) + row_number = c_row.number if c_row.number >= 0 else None + row = InvalidRow(c_row.expected_columns, c_row.actual_columns, + row_number, frombytes( c_row.text)) + result = handler(row) + if result == 'error': + return CInvalidRowResult_Error + elif result == 'skip': + return CInvalidRowResult_Skip + else: + raise ValueError("Invalid return value for invalid row handler: " + f"expected 'error' or 'skip', got {result!r}") + + +cdef class ReadOptions(_Weakrefable): + """ + Options for reading CSV files. + + Parameters + ---------- + use_threads : bool, optional (default True) + Whether to use multiple threads to accelerate reading + block_size : int, optional + How much bytes to process at a time from the input stream. + This will determine multi-threading granularity as well as + the size of individual record batches or table chunks. + Minimum valid value for block size is 1 + skip_rows : int, optional (default 0) + The number of rows to skip before the column names (if any) + and the CSV data. + skip_rows_after_names : int, optional (default 0) + The number of rows to skip after the column names. + This number can be larger than the number of rows in one + block, and empty rows are counted. + The order of application is as follows: + - `skip_rows` is applied (if non-zero); + - column names are read (unless `column_names` is set); + - `skip_rows_after_names` is applied (if non-zero). + column_names : list, optional + The column names of the target table. If empty, fall back on + `autogenerate_column_names`. + autogenerate_column_names : bool, optional (default False) + Whether to autogenerate column names if `column_names` is empty. + If true, column names will be of the form "f0", "f1"... + If false, column names will be read from the first CSV row + after `skip_rows`. + encoding : str, optional (default 'utf8') + The character encoding of the CSV data. Columns that cannot + decode using this encoding can still be read as Binary. + + Examples + -------- + + Defining an example data: + + >>> import io + >>> s = "1,2,3\\nFlamingo,2,2022-03-01\\nHorse,4,2022-03-02\\nBrittle stars,5,2022-03-03\\nCentipede,100,2022-03-04" + >>> print(s) + 1,2,3 + Flamingo,2,2022-03-01 + Horse,4,2022-03-02 + Brittle stars,5,2022-03-03 + Centipede,100,2022-03-04 + + Ignore the first numbered row and substitute it with defined + or autogenerated column names: + + >>> from pyarrow import csv + >>> read_options = csv.ReadOptions( + ... column_names=["animals", "n_legs", "entry"], + ... skip_rows=1) + >>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options) + pyarrow.Table + animals: string + n_legs: int64 + entry: date32[day] + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + n_legs: [[2,4,5,100]] + entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + + >>> read_options = csv.ReadOptions(autogenerate_column_names=True, + ... skip_rows=1) + >>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options) + pyarrow.Table + f0: string + f1: int64 + f2: date32[day] + ---- + f0: [["Flamingo","Horse","Brittle stars","Centipede"]] + f1: [[2,4,5,100]] + f2: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + + Remove the first 2 rows of the data: + + >>> read_options = csv.ReadOptions(skip_rows_after_names=2) + >>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options) + pyarrow.Table + 1: string + 2: int64 + 3: date32[day] + ---- + 1: [["Brittle stars","Centipede"]] + 2: [[5,100]] + 3: [[2022-03-03,2022-03-04]] + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + # __init__() is not called when unpickling, initialize storage here + def __cinit__(self, *argw, **kwargs): + self.options.reset(new CCSVReadOptions(CCSVReadOptions.Defaults())) + + def __init__(self, *, use_threads=None, block_size=None, skip_rows=None, + skip_rows_after_names=None, column_names=None, + autogenerate_column_names=None, encoding='utf8'): + if use_threads is not None: + self.use_threads = use_threads + if block_size is not None: + self.block_size = block_size + if skip_rows is not None: + self.skip_rows = skip_rows + if skip_rows_after_names is not None: + self.skip_rows_after_names = skip_rows_after_names + if column_names is not None: + self.column_names = column_names + if autogenerate_column_names is not None: + self.autogenerate_column_names= autogenerate_column_names + # Python-specific option + self.encoding = encoding + + @property + def use_threads(self): + """ + Whether to use multiple threads to accelerate reading. + """ + return deref(self.options).use_threads + + @use_threads.setter + def use_threads(self, value): + deref(self.options).use_threads = value + + @property + def block_size(self): + """ + How much bytes to process at a time from the input stream. + This will determine multi-threading granularity as well as + the size of individual record batches or table chunks. + """ + return deref(self.options).block_size + + @block_size.setter + def block_size(self, value): + deref(self.options).block_size = value + + @property + def skip_rows(self): + """ + The number of rows to skip before the column names (if any) + and the CSV data. + See `skip_rows_after_names` for interaction description + """ + return deref(self.options).skip_rows + + @skip_rows.setter + def skip_rows(self, value): + deref(self.options).skip_rows = value + + @property + def skip_rows_after_names(self): + """ + The number of rows to skip after the column names. + This number can be larger than the number of rows in one + block, and empty rows are counted. + The order of application is as follows: + - `skip_rows` is applied (if non-zero); + - column names are read (unless `column_names` is set); + - `skip_rows_after_names` is applied (if non-zero). + """ + return deref(self.options).skip_rows_after_names + + @skip_rows_after_names.setter + def skip_rows_after_names(self, value): + deref(self.options).skip_rows_after_names = value + + @property + def column_names(self): + """ + The column names of the target table. If empty, fall back on + `autogenerate_column_names`. + """ + return [frombytes(s) for s in deref(self.options).column_names] + + @column_names.setter + def column_names(self, value): + deref(self.options).column_names.clear() + for item in value: + deref(self.options).column_names.push_back(tobytes(item)) + + @property + def autogenerate_column_names(self): + """ + Whether to autogenerate column names if `column_names` is empty. + If true, column names will be of the form "f0", "f1"... + If false, column names will be read from the first CSV row + after `skip_rows`. + """ + return deref(self.options).autogenerate_column_names + + @autogenerate_column_names.setter + def autogenerate_column_names(self, value): + deref(self.options).autogenerate_column_names = value + + def validate(self): + check_status(deref(self.options).Validate()) + + def equals(self, ReadOptions other): + """ + Parameters + ---------- + other : pyarrow.csv.ReadOptions + + Returns + ------- + bool + """ + return ( + self.use_threads == other.use_threads and + self.block_size == other.block_size and + self.skip_rows == other.skip_rows and + self.skip_rows_after_names == other.skip_rows_after_names and + self.column_names == other.column_names and + self.autogenerate_column_names == + other.autogenerate_column_names and + self.encoding == other.encoding + ) + + @staticmethod + cdef ReadOptions wrap(CCSVReadOptions options): + out = ReadOptions() + out.options.reset(new CCSVReadOptions(move(options))) + out.encoding = 'utf8' # No way to know this + return out + + def __getstate__(self): + return (self.use_threads, self.block_size, self.skip_rows, + self.column_names, self.autogenerate_column_names, + self.encoding, self.skip_rows_after_names) + + def __setstate__(self, state): + (self.use_threads, self.block_size, self.skip_rows, + self.column_names, self.autogenerate_column_names, + self.encoding, self.skip_rows_after_names) = state + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef class ParseOptions(_Weakrefable): + """ + Options for parsing CSV files. + + Parameters + ---------- + delimiter : 1-character string, optional (default ',') + The character delimiting individual cells in the CSV data. + quote_char : 1-character string or False, optional (default '"') + The character used optionally for quoting CSV values + (False if quoting is not allowed). + double_quote : bool, optional (default True) + Whether two quotes in a quoted CSV value denote a single quote + in the data. + escape_char : 1-character string or False, optional (default False) + The character used optionally for escaping special characters + (False if escaping is not allowed). + newlines_in_values : bool, optional (default False) + Whether newline characters are allowed in CSV values. + Setting this to True reduces the performance of multi-threaded + CSV reading. + ignore_empty_lines : bool, optional (default True) + Whether empty lines are ignored in CSV input. + If False, an empty line is interpreted as containing a single empty + value (assuming a one-column CSV file). + invalid_row_handler : callable, optional (default None) + If not None, this object is called for each CSV row that fails + parsing (because of a mismatching number of columns). + It should accept a single InvalidRow argument and return either + "skip" or "error" depending on the desired outcome. + + Examples + -------- + + Defining an example file from bytes object: + + >>> import io + >>> s = ( + ... "animals;n_legs;entry\\n" + ... "Flamingo;2;2022-03-01\\n" + ... "# Comment here:\\n" + ... "Horse;4;2022-03-02\\n" + ... "Brittle stars;5;2022-03-03\\n" + ... "Centipede;100;2022-03-04" + ... ) + >>> print(s) + animals;n_legs;entry + Flamingo;2;2022-03-01 + # Comment here: + Horse;4;2022-03-02 + Brittle stars;5;2022-03-03 + Centipede;100;2022-03-04 + >>> source = io.BytesIO(s.encode()) + + Read the data from a file skipping rows with comments + and defining the delimiter: + + >>> from pyarrow import csv + >>> def skip_comment(row): + ... if row.text.startswith("# "): + ... return 'skip' + ... else: + ... return 'error' + ... + >>> parse_options = csv.ParseOptions(delimiter=";", invalid_row_handler=skip_comment) + >>> csv.read_csv(source, parse_options=parse_options) + pyarrow.Table + animals: string + n_legs: int64 + entry: date32[day] + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + n_legs: [[2,4,5,100]] + entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + """ + __slots__ = () + + def __cinit__(self, *argw, **kwargs): + self._invalid_row_handler = None + self.options.reset(new CCSVParseOptions(CCSVParseOptions.Defaults())) + + def __init__(self, *, delimiter=None, quote_char=None, double_quote=None, + escape_char=None, newlines_in_values=None, + ignore_empty_lines=None, invalid_row_handler=None): + if delimiter is not None: + self.delimiter = delimiter + if quote_char is not None: + self.quote_char = quote_char + if double_quote is not None: + self.double_quote = double_quote + if escape_char is not None: + self.escape_char = escape_char + if newlines_in_values is not None: + self.newlines_in_values = newlines_in_values + if ignore_empty_lines is not None: + self.ignore_empty_lines = ignore_empty_lines + if invalid_row_handler is not None: + self.invalid_row_handler = invalid_row_handler + + @property + def delimiter(self): + """ + The character delimiting individual cells in the CSV data. + """ + return chr(deref(self.options).delimiter) + + @delimiter.setter + def delimiter(self, value): + deref(self.options).delimiter = _single_char(value) + + @property + def quote_char(self): + """ + The character used optionally for quoting CSV values + (False if quoting is not allowed). + """ + if deref(self.options).quoting: + return chr(deref(self.options).quote_char) + else: + return False + + @quote_char.setter + def quote_char(self, value): + if value is False: + deref(self.options).quoting = False + else: + deref(self.options).quote_char = _single_char(value) + deref(self.options).quoting = True + + @property + def double_quote(self): + """ + Whether two quotes in a quoted CSV value denote a single quote + in the data. + """ + return deref(self.options).double_quote + + @double_quote.setter + def double_quote(self, value): + deref(self.options).double_quote = value + + @property + def escape_char(self): + """ + The character used optionally for escaping special characters + (False if escaping is not allowed). + """ + if deref(self.options).escaping: + return chr(deref(self.options).escape_char) + else: + return False + + @escape_char.setter + def escape_char(self, value): + if value is False: + deref(self.options).escaping = False + else: + deref(self.options).escape_char = _single_char(value) + deref(self.options).escaping = True + + @property + def newlines_in_values(self): + """ + Whether newline characters are allowed in CSV values. + Setting this to True reduces the performance of multi-threaded + CSV reading. + """ + return deref(self.options).newlines_in_values + + @newlines_in_values.setter + def newlines_in_values(self, value): + deref(self.options).newlines_in_values = value + + @property + def ignore_empty_lines(self): + """ + Whether empty lines are ignored in CSV input. + If False, an empty line is interpreted as containing a single empty + value (assuming a one-column CSV file). + """ + return deref(self.options).ignore_empty_lines + + @property + def invalid_row_handler(self): + """ + Optional handler for invalid rows. + + If not None, this object is called for each CSV row that fails + parsing (because of a mismatching number of columns). + It should accept a single InvalidRow argument and return either + "skip" or "error" depending on the desired outcome. + """ + return self._invalid_row_handler + + @invalid_row_handler.setter + def invalid_row_handler(self, value): + if value is not None and not callable(value): + raise TypeError("Expected callable or None, " + f"got instance of {type(value)!r}") + self._invalid_row_handler = value + deref(self.options).invalid_row_handler = MakeInvalidRowHandler( + &_handle_invalid_row, value) + + @ignore_empty_lines.setter + def ignore_empty_lines(self, value): + deref(self.options).ignore_empty_lines = value + + def validate(self): + check_status(deref(self.options).Validate()) + + def equals(self, ParseOptions other): + """ + Parameters + ---------- + other : pyarrow.csv.ParseOptions + + Returns + ------- + bool + """ + return ( + self.delimiter == other.delimiter and + self.quote_char == other.quote_char and + self.double_quote == other.double_quote and + self.escape_char == other.escape_char and + self.newlines_in_values == other.newlines_in_values and + self.ignore_empty_lines == other.ignore_empty_lines and + self._invalid_row_handler == other._invalid_row_handler + ) + + @staticmethod + cdef ParseOptions wrap(CCSVParseOptions options): + out = ParseOptions() + out.options.reset(new CCSVParseOptions(move(options))) + return out + + def __getstate__(self): + return (self.delimiter, self.quote_char, self.double_quote, + self.escape_char, self.newlines_in_values, + self.ignore_empty_lines, self.invalid_row_handler) + + def __setstate__(self, state): + (self.delimiter, self.quote_char, self.double_quote, + self.escape_char, self.newlines_in_values, + self.ignore_empty_lines, self.invalid_row_handler) = state + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef class _ISO8601(_Weakrefable): + """ + A special object indicating ISO-8601 parsing. + """ + __slots__ = () + + def __str__(self): + return 'ISO8601' + + def __eq__(self, other): + return isinstance(other, _ISO8601) + + +ISO8601 = _ISO8601() + + +cdef class ConvertOptions(_Weakrefable): + """ + Options for converting CSV data. + + Parameters + ---------- + check_utf8 : bool, optional (default True) + Whether to check UTF8 validity of string columns. + column_types : pyarrow.Schema or dict, optional + Explicitly map column names to column types. Passing this argument + disables type inference on the defined columns. + null_values : list, optional + A sequence of strings that denote nulls in the data + (defaults are appropriate in most cases). Note that by default, + string columns are not checked for null values. To enable + null checking for those, specify ``strings_can_be_null=True``. + true_values : list, optional + A sequence of strings that denote true booleans in the data + (defaults are appropriate in most cases). + false_values : list, optional + A sequence of strings that denote false booleans in the data + (defaults are appropriate in most cases). + decimal_point : 1-character string, optional (default '.') + The character used as decimal point in floating-point and decimal + data. + strings_can_be_null : bool, optional (default False) + Whether string / binary columns can have null values. + If true, then strings in null_values are considered null for + string columns. + If false, then all strings are valid string values. + quoted_strings_can_be_null : bool, optional (default True) + Whether quoted values can be null. + If true, then strings in "null_values" are also considered null + when they appear quoted in the CSV file. Otherwise, quoted values + are never considered null. + include_columns : list, optional + The names of columns to include in the Table. + If empty, the Table will include all columns from the CSV file. + If not empty, only these columns will be included, in this order. + include_missing_columns : bool, optional (default False) + If false, columns in `include_columns` but not in the CSV file will + error out. + If true, columns in `include_columns` but not in the CSV file will + produce a column of nulls (whose type is selected using + `column_types`, or null by default). + This option is ignored if `include_columns` is empty. + auto_dict_encode : bool, optional (default False) + Whether to try to automatically dict-encode string / binary data. + If true, then when type inference detects a string or binary column, + it it dict-encoded up to `auto_dict_max_cardinality` distinct values + (per chunk), after which it switches to regular encoding. + This setting is ignored for non-inferred columns (those in + `column_types`). + auto_dict_max_cardinality : int, optional + The maximum dictionary cardinality for `auto_dict_encode`. + This value is per chunk. + timestamp_parsers : list, optional + A sequence of strptime()-compatible format strings, tried in order + when attempting to infer or convert timestamp values (the special + value ISO8601() can also be given). By default, a fast built-in + ISO-8601 parser is used. + + Examples + -------- + + Defining an example data: + + >>> import io + >>> s = ( + ... "animals,n_legs,entry,fast\\n" + ... "Flamingo,2,01/03/2022,Yes\\n" + ... "Horse,4,02/03/2022,Yes\\n" + ... "Brittle stars,5,03/03/2022,No\\n" + ... "Centipede,100,04/03/2022,No\\n" + ... ",6,05/03/2022," + ... ) + >>> print(s) + animals,n_legs,entry,fast + Flamingo,2,01/03/2022,Yes + Horse,4,02/03/2022,Yes + Brittle stars,5,03/03/2022,No + Centipede,100,04/03/2022,No + ,6,05/03/2022, + + Change the type of a column: + + >>> import pyarrow as pa + >>> from pyarrow import csv + >>> convert_options = csv.ConvertOptions(column_types={"n_legs": pa.float64()}) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: double + entry: string + fast: string + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + entry: [["01/03/2022","02/03/2022","03/03/2022","04/03/2022","05/03/2022"]] + fast: [["Yes","Yes","No","No",""]] + + Define a date parsing format to get a timestamp type column + (in case dates are not in ISO format and not converted by default): + + >>> convert_options = csv.ConvertOptions( + ... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"]) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + entry: timestamp[s] + fast: string + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]] + fast: [["Yes","Yes","No","No",""]] + + Specify a subset of columns to be read: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["animals", "n_legs"]) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + + List additional column to be included as a null typed column: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["animals", "n_legs", "location"], + ... include_missing_columns=True) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + location: null + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + location: [5 nulls] + + Define columns as dictionary type (by default only the + string/binary columns are dictionary encoded): + + >>> convert_options = csv.ConvertOptions( + ... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"], + ... auto_dict_encode=True) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: dictionary + n_legs: int64 + entry: timestamp[s] + fast: dictionary + ---- + animals: [ -- dictionary: + ["Flamingo","Horse","Brittle stars","Centipede",""] -- indices: + [0,1,2,3,4]] + n_legs: [[2,4,5,100,6]] + entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]] + fast: [ -- dictionary: + ["Yes","No",""] -- indices: + [0,0,1,1,2]] + + Set upper limit for the number of categories. If the categories + is more than the limit, the conversion to dictionary will not + happen: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["animals"], + ... auto_dict_encode=True, + ... auto_dict_max_cardinality=2) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + + Set empty strings to missing values: + + >>> convert_options = csv.ConvertOptions(include_columns=["animals", "n_legs"], + ... strings_can_be_null=True) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",null]] + n_legs: [[2,4,5,100,6]] + + Define values to be True and False when converting a column + into a bool type: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["fast"], + ... false_values=["No"], + ... true_values=["Yes"]) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + fast: bool + ---- + fast: [[true,true,false,false,null]] + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + def __cinit__(self, *argw, **kwargs): + self.options.reset( + new CCSVConvertOptions(CCSVConvertOptions.Defaults())) + + def __init__(self, *, check_utf8=None, column_types=None, null_values=None, + true_values=None, false_values=None, decimal_point=None, + strings_can_be_null=None, quoted_strings_can_be_null=None, + include_columns=None, include_missing_columns=None, + auto_dict_encode=None, auto_dict_max_cardinality=None, + timestamp_parsers=None): + if check_utf8 is not None: + self.check_utf8 = check_utf8 + if column_types is not None: + self.column_types = column_types + if null_values is not None: + self.null_values = null_values + if true_values is not None: + self.true_values = true_values + if false_values is not None: + self.false_values = false_values + if decimal_point is not None: + self.decimal_point = decimal_point + if strings_can_be_null is not None: + self.strings_can_be_null = strings_can_be_null + if quoted_strings_can_be_null is not None: + self.quoted_strings_can_be_null = quoted_strings_can_be_null + if include_columns is not None: + self.include_columns = include_columns + if include_missing_columns is not None: + self.include_missing_columns = include_missing_columns + if auto_dict_encode is not None: + self.auto_dict_encode = auto_dict_encode + if auto_dict_max_cardinality is not None: + self.auto_dict_max_cardinality = auto_dict_max_cardinality + if timestamp_parsers is not None: + self.timestamp_parsers = timestamp_parsers + + @property + def check_utf8(self): + """ + Whether to check UTF8 validity of string columns. + """ + return deref(self.options).check_utf8 + + @check_utf8.setter + def check_utf8(self, value): + deref(self.options).check_utf8 = value + + @property + def strings_can_be_null(self): + """ + Whether string / binary columns can have null values. + """ + return deref(self.options).strings_can_be_null + + @strings_can_be_null.setter + def strings_can_be_null(self, value): + deref(self.options).strings_can_be_null = value + + @property + def quoted_strings_can_be_null(self): + """ + Whether quoted values can be null. + """ + return deref(self.options).quoted_strings_can_be_null + + @quoted_strings_can_be_null.setter + def quoted_strings_can_be_null(self, value): + deref(self.options).quoted_strings_can_be_null = value + + @property + def column_types(self): + """ + Explicitly map column names to column types. + """ + d = {frombytes(item.first): pyarrow_wrap_data_type(item.second) + for item in deref(self.options).column_types} + return d + + @column_types.setter + def column_types(self, value): + cdef: + shared_ptr[CDataType] typ + + if isinstance(value, Mapping): + value = value.items() + + deref(self.options).column_types.clear() + for item in value: + if isinstance(item, Field): + k = item.name + v = item.type + else: + k, v = item + typ = pyarrow_unwrap_data_type(ensure_type(v)) + assert typ != NULL + deref(self.options).column_types[tobytes(k)] = typ + + @property + def null_values(self): + """ + A sequence of strings that denote nulls in the data. + """ + return [frombytes(x) for x in deref(self.options).null_values] + + @null_values.setter + def null_values(self, value): + deref(self.options).null_values = [tobytes(x) for x in value] + + @property + def true_values(self): + """ + A sequence of strings that denote true booleans in the data. + """ + return [frombytes(x) for x in deref(self.options).true_values] + + @true_values.setter + def true_values(self, value): + deref(self.options).true_values = [tobytes(x) for x in value] + + @property + def false_values(self): + """ + A sequence of strings that denote false booleans in the data. + """ + return [frombytes(x) for x in deref(self.options).false_values] + + @false_values.setter + def false_values(self, value): + deref(self.options).false_values = [tobytes(x) for x in value] + + @property + def decimal_point(self): + """ + The character used as decimal point in floating-point and decimal + data. + """ + return chr(deref(self.options).decimal_point) + + @decimal_point.setter + def decimal_point(self, value): + deref(self.options).decimal_point = _single_char(value) + + @property + def auto_dict_encode(self): + """ + Whether to try to automatically dict-encode string / binary data. + """ + return deref(self.options).auto_dict_encode + + @auto_dict_encode.setter + def auto_dict_encode(self, value): + deref(self.options).auto_dict_encode = value + + @property + def auto_dict_max_cardinality(self): + """ + The maximum dictionary cardinality for `auto_dict_encode`. + + This value is per chunk. + """ + return deref(self.options).auto_dict_max_cardinality + + @auto_dict_max_cardinality.setter + def auto_dict_max_cardinality(self, value): + deref(self.options).auto_dict_max_cardinality = value + + @property + def include_columns(self): + """ + The names of columns to include in the Table. + + If empty, the Table will include all columns from the CSV file. + If not empty, only these columns will be included, in this order. + """ + return [frombytes(s) for s in deref(self.options).include_columns] + + @include_columns.setter + def include_columns(self, value): + deref(self.options).include_columns.clear() + for item in value: + deref(self.options).include_columns.push_back(tobytes(item)) + + @property + def include_missing_columns(self): + """ + If false, columns in `include_columns` but not in the CSV file will + error out. + If true, columns in `include_columns` but not in the CSV file will + produce a null column (whose type is selected using `column_types`, + or null by default). + This option is ignored if `include_columns` is empty. + """ + return deref(self.options).include_missing_columns + + @include_missing_columns.setter + def include_missing_columns(self, value): + deref(self.options).include_missing_columns = value + + @property + def timestamp_parsers(self): + """ + A sequence of strptime()-compatible format strings, tried in order + when attempting to infer or convert timestamp values (the special + value ISO8601() can also be given). By default, a fast built-in + ISO-8601 parser is used. + """ + cdef: + shared_ptr[CTimestampParser] c_parser + c_string kind + + parsers = [] + for c_parser in deref(self.options).timestamp_parsers: + kind = deref(c_parser).kind() + if kind == b'strptime': + parsers.append(frombytes(deref(c_parser).format())) + else: + assert kind == b'iso8601' + parsers.append(ISO8601) + + return parsers + + @timestamp_parsers.setter + def timestamp_parsers(self, value): + cdef: + vector[shared_ptr[CTimestampParser]] c_parsers + + for v in value: + if isinstance(v, str): + c_parsers.push_back(CTimestampParser.MakeStrptime(tobytes(v))) + elif v == ISO8601: + c_parsers.push_back(CTimestampParser.MakeISO8601()) + else: + raise TypeError("Expected list of str or ISO8601 objects") + + deref(self.options).timestamp_parsers = move(c_parsers) + + @staticmethod + cdef ConvertOptions wrap(CCSVConvertOptions options): + out = ConvertOptions() + out.options.reset(new CCSVConvertOptions(move(options))) + return out + + def validate(self): + check_status(deref(self.options).Validate()) + + def equals(self, ConvertOptions other): + """ + Parameters + ---------- + other : pyarrow.csv.ConvertOptions + + Returns + ------- + bool + """ + return ( + self.check_utf8 == other.check_utf8 and + self.column_types == other.column_types and + self.null_values == other.null_values and + self.true_values == other.true_values and + self.false_values == other.false_values and + self.decimal_point == other.decimal_point and + self.timestamp_parsers == other.timestamp_parsers and + self.strings_can_be_null == other.strings_can_be_null and + self.quoted_strings_can_be_null == + other.quoted_strings_can_be_null and + self.auto_dict_encode == other.auto_dict_encode and + self.auto_dict_max_cardinality == + other.auto_dict_max_cardinality and + self.include_columns == other.include_columns and + self.include_missing_columns == other.include_missing_columns + ) + + def __getstate__(self): + return (self.check_utf8, self.column_types, self.null_values, + self.true_values, self.false_values, self.decimal_point, + self.timestamp_parsers, self.strings_can_be_null, + self.quoted_strings_can_be_null, self.auto_dict_encode, + self.auto_dict_max_cardinality, self.include_columns, + self.include_missing_columns) + + def __setstate__(self, state): + (self.check_utf8, self.column_types, self.null_values, + self.true_values, self.false_values, self.decimal_point, + self.timestamp_parsers, self.strings_can_be_null, + self.quoted_strings_can_be_null, self.auto_dict_encode, + self.auto_dict_max_cardinality, self.include_columns, + self.include_missing_columns) = state + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef _get_reader(input_file, ReadOptions read_options, + shared_ptr[CInputStream]* out): + use_memory_map = False + get_input_stream(input_file, use_memory_map, out) + if read_options is not None: + out[0] = native_transcoding_input_stream(out[0], + read_options.encoding, + 'utf8') + + +cdef _get_read_options(ReadOptions read_options, CCSVReadOptions* out): + if read_options is None: + out[0] = CCSVReadOptions.Defaults() + else: + out[0] = deref(read_options.options) + + +cdef _get_parse_options(ParseOptions parse_options, CCSVParseOptions* out): + if parse_options is None: + out[0] = CCSVParseOptions.Defaults() + else: + out[0] = deref(parse_options.options) + + +cdef _get_convert_options(ConvertOptions convert_options, + CCSVConvertOptions* out): + if convert_options is None: + out[0] = CCSVConvertOptions.Defaults() + else: + out[0] = deref(convert_options.options) + + +cdef class CSVStreamingReader(RecordBatchReader): + """An object that reads record batches incrementally from a CSV file. + + Should not be instantiated directly by user code. + """ + cdef readonly: + Schema schema + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, " + "use pyarrow.csv.open_csv() instead." + .format(self.__class__.__name__)) + + # Note about cancellation: we cannot create a SignalStopHandler + # by default here, as several CSVStreamingReader instances may be + # created (including by the same thread). Handling cancellation + # would require having the user pass the SignalStopHandler. + # (in addition to solving ARROW-11853) + + cdef _open(self, shared_ptr[CInputStream] stream, + CCSVReadOptions c_read_options, + CCSVParseOptions c_parse_options, + CCSVConvertOptions c_convert_options, + MemoryPool memory_pool): + cdef: + shared_ptr[CSchema] c_schema + CIOContext io_context + + io_context = CIOContext(maybe_unbox_memory_pool(memory_pool)) + + with nogil: + self.reader = GetResultValue( + CCSVStreamingReader.Make( + io_context, stream, + move(c_read_options), move(c_parse_options), + move(c_convert_options))) + c_schema = self.reader.get().schema() + + self.schema = pyarrow_wrap_schema(c_schema) + + +def read_csv(input_file, read_options=None, parse_options=None, + convert_options=None, MemoryPool memory_pool=None): + """ + Read a Table from a stream of CSV data. + + Parameters + ---------- + input_file : string, path or file-like object + The location of CSV data. If a string or path, and if it ends + with a recognized compressed file extension (e.g. ".gz" or ".bz2"), + the data is automatically decompressed when reading. + read_options : pyarrow.csv.ReadOptions, optional + Options for the CSV reader (see pyarrow.csv.ReadOptions constructor + for defaults) + parse_options : pyarrow.csv.ParseOptions, optional + Options for the CSV parser + (see pyarrow.csv.ParseOptions constructor for defaults) + convert_options : pyarrow.csv.ConvertOptions, optional + Options for converting CSV data + (see pyarrow.csv.ConvertOptions constructor for defaults) + memory_pool : MemoryPool, optional + Pool to allocate Table memory from + + Returns + ------- + :class:`pyarrow.Table` + Contents of the CSV file as a in-memory table. + + Examples + -------- + + Defining an example file from bytes object: + + >>> import io + >>> s = ( + ... "animals,n_legs,entry\\n" + ... "Flamingo,2,2022-03-01\\n" + ... "Horse,4,2022-03-02\\n" + ... "Brittle stars,5,2022-03-03\\n" + ... "Centipede,100,2022-03-04" + ... ) + >>> print(s) + animals,n_legs,entry + Flamingo,2,2022-03-01 + Horse,4,2022-03-02 + Brittle stars,5,2022-03-03 + Centipede,100,2022-03-04 + >>> source = io.BytesIO(s.encode()) + + Reading from the file + + >>> from pyarrow import csv + >>> csv.read_csv(source) + pyarrow.Table + animals: string + n_legs: int64 + entry: date32[day] + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + n_legs: [[2,4,5,100]] + entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + """ + cdef: + shared_ptr[CInputStream] stream + CCSVReadOptions c_read_options + CCSVParseOptions c_parse_options + CCSVConvertOptions c_convert_options + CIOContext io_context + SharedPtrNoGIL[CCSVReader] reader + shared_ptr[CTable] table + + _get_reader(input_file, read_options, &stream) + _get_read_options(read_options, &c_read_options) + _get_parse_options(parse_options, &c_parse_options) + _get_convert_options(convert_options, &c_convert_options) + + with SignalStopHandler() as stop_handler: + io_context = CIOContext( + maybe_unbox_memory_pool(memory_pool), + ( stop_handler.stop_token).stop_token) + reader = GetResultValue(CCSVReader.Make( + io_context, stream, + c_read_options, c_parse_options, c_convert_options)) + + with nogil: + table = GetResultValue(reader.get().Read()) + + return pyarrow_wrap_table(table) + + +def open_csv(input_file, read_options=None, parse_options=None, + convert_options=None, MemoryPool memory_pool=None): + """ + Open a streaming reader of CSV data. + + Reading using this function is always single-threaded. + + Parameters + ---------- + input_file : string, path or file-like object + The location of CSV data. If a string or path, and if it ends + with a recognized compressed file extension (e.g. ".gz" or ".bz2"), + the data is automatically decompressed when reading. + read_options : pyarrow.csv.ReadOptions, optional + Options for the CSV reader (see pyarrow.csv.ReadOptions constructor + for defaults) + parse_options : pyarrow.csv.ParseOptions, optional + Options for the CSV parser + (see pyarrow.csv.ParseOptions constructor for defaults) + convert_options : pyarrow.csv.ConvertOptions, optional + Options for converting CSV data + (see pyarrow.csv.ConvertOptions constructor for defaults) + memory_pool : MemoryPool, optional + Pool to allocate Table memory from + + Returns + ------- + :class:`pyarrow.csv.CSVStreamingReader` + """ + cdef: + shared_ptr[CInputStream] stream + CCSVReadOptions c_read_options + CCSVParseOptions c_parse_options + CCSVConvertOptions c_convert_options + CSVStreamingReader reader + + _get_reader(input_file, read_options, &stream) + _get_read_options(read_options, &c_read_options) + _get_parse_options(parse_options, &c_parse_options) + _get_convert_options(convert_options, &c_convert_options) + + reader = CSVStreamingReader.__new__(CSVStreamingReader) + reader._open(stream, move(c_read_options), move(c_parse_options), + move(c_convert_options), memory_pool) + return reader + + +def _raise_invalid_function_option(value, description, *, + exception_class=ValueError): + raise exception_class(f"\"{value}\" is not a valid {description}") + + +cdef CQuotingStyle unwrap_quoting_style(quoting_style) except *: + if quoting_style == "needed": + return CQuotingStyle_Needed + elif quoting_style == "all_valid": + return CQuotingStyle_AllValid + elif quoting_style == "none": + return CQuotingStyle_None + _raise_invalid_function_option(quoting_style, "quoting style") + + +cdef wrap_quoting_style(quoting_style): + if quoting_style == CQuotingStyle_Needed: + return 'needed' + elif quoting_style == CQuotingStyle_AllValid: + return 'all_valid' + elif quoting_style == CQuotingStyle_None: + return 'none' + + +cdef class WriteOptions(_Weakrefable): + """ + Options for writing CSV files. + + Parameters + ---------- + include_header : bool, optional (default True) + Whether to write an initial header line with column names + batch_size : int, optional (default 1024) + How many rows to process together when converting and writing + CSV data + delimiter : 1-character string, optional (default ",") + The character delimiting individual cells in the CSV data. + quoting_style : str, optional (default "needed") + Whether to quote values, and if so, which quoting style to use. + The following values are accepted: + + - "needed" (default): only enclose values in quotes when needed. + - "all_valid": enclose all valid values in quotes; nulls are not quoted. + - "none": do not enclose any values in quotes; values containing + special characters (such as quotes, cell delimiters or line endings) + will raise an error. + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, *, include_header=None, batch_size=None, + delimiter=None, quoting_style=None): + self.options.reset(new CCSVWriteOptions(CCSVWriteOptions.Defaults())) + if include_header is not None: + self.include_header = include_header + if batch_size is not None: + self.batch_size = batch_size + if delimiter is not None: + self.delimiter = delimiter + if quoting_style is not None: + self.quoting_style = quoting_style + + @property + def include_header(self): + """ + Whether to write an initial header line with column names. + """ + return deref(self.options).include_header + + @include_header.setter + def include_header(self, value): + deref(self.options).include_header = value + + @property + def batch_size(self): + """ + How many rows to process together when converting and writing + CSV data. + """ + return deref(self.options).batch_size + + @batch_size.setter + def batch_size(self, value): + deref(self.options).batch_size = value + + @property + def delimiter(self): + """ + The character delimiting individual cells in the CSV data. + """ + return chr(deref(self.options).delimiter) + + @delimiter.setter + def delimiter(self, value): + deref(self.options).delimiter = _single_char(value) + + @property + def quoting_style(self): + """ + Whether to quote values, and if so, which quoting style to use. + The following values are accepted: + + - "needed" (default): only enclose values in quotes when needed. + - "all_valid": enclose all valid values in quotes; nulls are not quoted. + - "none": do not enclose any values in quotes; values containing + special characters (such as quotes, cell delimiters or line endings) + will raise an error. + """ + return wrap_quoting_style(deref(self.options).quoting_style) + + @quoting_style.setter + def quoting_style(self, value): + deref(self.options).quoting_style = unwrap_quoting_style(value) + + @staticmethod + cdef WriteOptions wrap(CCSVWriteOptions options): + out = WriteOptions() + out.options.reset(new CCSVWriteOptions(move(options))) + return out + + def validate(self): + check_status(self.options.get().Validate()) + + +cdef _get_write_options(WriteOptions write_options, CCSVWriteOptions* out): + if write_options is None: + out[0] = CCSVWriteOptions.Defaults() + else: + out[0] = deref(write_options.options) + + +def write_csv(data, output_file, write_options=None, + MemoryPool memory_pool=None): + """ + Write record batch or table to a CSV file. + + Parameters + ---------- + data : pyarrow.RecordBatch or pyarrow.Table + The data to write. + output_file : string, path, pyarrow.NativeFile, or file-like object + The location where to write the CSV data. + write_options : pyarrow.csv.WriteOptions + Options to configure writing the CSV data. + memory_pool : MemoryPool, optional + Pool for temporary allocations. + + Examples + -------- + + >>> import pyarrow as pa + >>> from pyarrow import csv + + >>> legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> entry_date = pa.array(["01/03/2022", "02/03/2022", + ... "03/03/2022", "04/03/2022"]) + >>> table = pa.table([animals, legs, entry_date], + ... names=["animals", "n_legs", "entry"]) + + >>> csv.write_csv(table, "animals.csv") + + >>> write_options = csv.WriteOptions(include_header=False) + >>> csv.write_csv(table, "animals.csv", write_options=write_options) + + >>> write_options = csv.WriteOptions(delimiter=";") + >>> csv.write_csv(table, "animals.csv", write_options=write_options) + """ + cdef: + shared_ptr[COutputStream] stream + CCSVWriteOptions c_write_options + CMemoryPool* c_memory_pool + CRecordBatch* batch + CTable* table + _get_write_options(write_options, &c_write_options) + + get_writer(output_file, &stream) + c_memory_pool = maybe_unbox_memory_pool(memory_pool) + c_write_options.io_context = CIOContext(c_memory_pool) + if isinstance(data, RecordBatch): + batch = pyarrow_unwrap_batch(data).get() + with nogil: + check_status(WriteCSV(deref(batch), c_write_options, stream.get())) + elif isinstance(data, Table): + table = pyarrow_unwrap_table(data).get() + with nogil: + check_status(WriteCSV(deref(table), c_write_options, stream.get())) + else: + raise TypeError(f"Expected Table or RecordBatch, got '{type(data)}'") + + +cdef class CSVWriter(_CRecordBatchWriter): + """ + Writer to create a CSV file. + + Parameters + ---------- + sink : str, path, pyarrow.OutputStream or file-like object + The location where to write the CSV data. + schema : pyarrow.Schema + The schema of the data to be written. + write_options : pyarrow.csv.WriteOptions + Options to configure writing the CSV data. + memory_pool : MemoryPool, optional + Pool for temporary allocations. + """ + + def __init__(self, sink, Schema schema, *, + WriteOptions write_options=None, MemoryPool memory_pool=None): + cdef: + shared_ptr[COutputStream] c_stream + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + CCSVWriteOptions c_write_options + CMemoryPool* c_memory_pool = maybe_unbox_memory_pool(memory_pool) + _get_write_options(write_options, &c_write_options) + c_write_options.io_context = CIOContext(c_memory_pool) + get_writer(sink, &c_stream) + with nogil: + self.writer = GetResultValue(MakeCSVWriter( + c_stream, c_schema, c_write_options)) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_cuda.pxd b/env-llmeval/lib/python3.10/site-packages/pyarrow/_cuda.pxd new file mode 100644 index 0000000000000000000000000000000000000000..6acb8826d1789ab2c9e5213f16f2851c9e3dc22b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_cuda.pxd @@ -0,0 +1,67 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.lib cimport * +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_cuda cimport * + + +cdef class Context(_Weakrefable): + cdef: + shared_ptr[CCudaContext] context + int device_number + + cdef void init(self, const shared_ptr[CCudaContext]& ctx) + + +cdef class IpcMemHandle(_Weakrefable): + cdef: + shared_ptr[CCudaIpcMemHandle] handle + + cdef void init(self, shared_ptr[CCudaIpcMemHandle]& h) + + +cdef class CudaBuffer(Buffer): + cdef: + shared_ptr[CCudaBuffer] cuda_buffer + object base + + cdef void init_cuda(self, + const shared_ptr[CCudaBuffer]& buffer, + object base) + + +cdef class HostBuffer(Buffer): + cdef: + shared_ptr[CCudaHostBuffer] host_buffer + + cdef void init_host(self, const shared_ptr[CCudaHostBuffer]& buffer) + + +cdef class BufferReader(NativeFile): + cdef: + CCudaBufferReader* reader + CudaBuffer buffer + + +cdef class BufferWriter(NativeFile): + cdef: + CCudaBufferWriter* writer + CudaBuffer buffer diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..370faf869700b61345281afeb1ae552c03564315 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_dlpack.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dlpack.pxi new file mode 100644 index 0000000000000000000000000000000000000000..c2f4cff64069195ad70f2ea271a842dfd166058c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dlpack.pxi @@ -0,0 +1,46 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +cimport cpython +from cpython.pycapsule cimport PyCapsule_New + + +cdef void dlpack_pycapsule_deleter(object dltensor) noexcept: + cdef DLManagedTensor* dlm_tensor + cdef PyObject* err_type + cdef PyObject* err_value + cdef PyObject* err_traceback + + # Do nothing if the capsule has been consumed + if cpython.PyCapsule_IsValid(dltensor, "used_dltensor"): + return + + # An exception may be in-flight, we must save it in case + # we create another one + cpython.PyErr_Fetch(&err_type, &err_value, &err_traceback) + + dlm_tensor = cpython.PyCapsule_GetPointer(dltensor, 'dltensor') + if dlm_tensor == NULL: + cpython.PyErr_WriteUnraisable(dltensor) + # The deleter can be NULL if there is no way for the caller + # to provide a reasonable destructor + elif dlm_tensor.deleter: + dlm_tensor.deleter(dlm_tensor) + assert (not cpython.PyErr_Occurred()) + + # Set the error indicator from err_type, err_value, err_traceback + cpython.PyErr_Restore(err_type, err_value, err_traceback) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..003a7fce0fb133b3f4075dab9485d625d4cadfb7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_gcsfs.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_gcsfs.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7b9035e44938f15d5bd27eba681612c5124b216a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_gcsfs.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfs.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfs.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fb057e70eaf4beb9405e7d2f16376420456bfe1f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfs.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfsio.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfsio.pyx new file mode 100644 index 0000000000000000000000000000000000000000..cbcc5d28ca918c41067ef7869ba6047306dc38b6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfsio.pyx @@ -0,0 +1,478 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# ---------------------------------------------------------------------- +# HDFS IO implementation + +# cython: language_level = 3 + +import re + +from pyarrow.lib cimport check_status, _Weakrefable, NativeFile +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_fs cimport * +from pyarrow.lib import frombytes, tobytes, ArrowIOError + + +_HDFS_PATH_RE = re.compile(r'hdfs://(.*):(\d+)(.*)') + + +def have_libhdfs(): + try: + with nogil: + check_status(HaveLibHdfs()) + return True + except Exception: + return False + + +def strip_hdfs_abspath(path): + m = _HDFS_PATH_RE.match(path) + if m: + return m.group(3) + else: + return path + + +cdef class HadoopFileSystem(_Weakrefable): + cdef: + shared_ptr[CIOHadoopFileSystem] client + + cdef readonly: + bint is_open + object host + object user + object kerb_ticket + int port + dict extra_conf + + def _connect(self, host, port, user, kerb_ticket, extra_conf): + cdef HdfsConnectionConfig conf + + if host is not None: + conf.host = tobytes(host) + self.host = host + + conf.port = port + self.port = port + + if user is not None: + conf.user = tobytes(user) + self.user = user + + if kerb_ticket is not None: + conf.kerb_ticket = tobytes(kerb_ticket) + self.kerb_ticket = kerb_ticket + + with nogil: + check_status(HaveLibHdfs()) + + if extra_conf is not None and isinstance(extra_conf, dict): + conf.extra_conf = {tobytes(k): tobytes(v) + for k, v in extra_conf.items()} + self.extra_conf = extra_conf + + with nogil: + check_status(CIOHadoopFileSystem.Connect(&conf, &self.client)) + self.is_open = True + + @classmethod + def connect(cls, *args, **kwargs): + return cls(*args, **kwargs) + + def __dealloc__(self): + if self.is_open: + self.close() + + def close(self): + """ + Disconnect from the HDFS cluster + """ + self._ensure_client() + with nogil: + check_status(self.client.get().Disconnect()) + self.is_open = False + + cdef _ensure_client(self): + if self.client.get() == NULL: + raise IOError('HDFS client improperly initialized') + elif not self.is_open: + raise IOError('HDFS client is closed') + + def exists(self, path): + """ + Returns True if the path is known to the cluster, False if it does not + (or there is an RPC error) + """ + self._ensure_client() + + cdef c_string c_path = tobytes(path) + cdef c_bool result + with nogil: + result = self.client.get().Exists(c_path) + return result + + def isdir(self, path): + cdef HdfsPathInfo info + try: + self._path_info(path, &info) + except ArrowIOError: + return False + return info.kind == ObjectType_DIRECTORY + + def isfile(self, path): + cdef HdfsPathInfo info + try: + self._path_info(path, &info) + except ArrowIOError: + return False + return info.kind == ObjectType_FILE + + def get_capacity(self): + """ + Get reported total capacity of file system + + Returns + ------- + capacity : int + """ + cdef int64_t capacity = 0 + with nogil: + check_status(self.client.get().GetCapacity(&capacity)) + return capacity + + def get_space_used(self): + """ + Get space used on file system + + Returns + ------- + space_used : int + """ + cdef int64_t space_used = 0 + with nogil: + check_status(self.client.get().GetUsed(&space_used)) + return space_used + + def df(self): + """ + Return free space on disk, like the UNIX df command + + Returns + ------- + space : int + """ + return self.get_capacity() - self.get_space_used() + + def rename(self, path, new_path): + cdef c_string c_path = tobytes(path) + cdef c_string c_new_path = tobytes(new_path) + with nogil: + check_status(self.client.get().Rename(c_path, c_new_path)) + + def info(self, path): + """ + Return detailed HDFS information for path + + Parameters + ---------- + path : string + Path to file or directory + + Returns + ------- + path_info : dict + """ + cdef HdfsPathInfo info + self._path_info(path, &info) + return { + 'path': frombytes(info.name), + 'owner': frombytes(info.owner), + 'group': frombytes(info.group), + 'size': info.size, + 'block_size': info.block_size, + 'last_modified': info.last_modified_time, + 'last_accessed': info.last_access_time, + 'replication': info.replication, + 'permissions': info.permissions, + 'kind': ('directory' if info.kind == ObjectType_DIRECTORY + else 'file') + } + + def stat(self, path): + """ + Return basic file system statistics about path + + Parameters + ---------- + path : string + Path to file or directory + + Returns + ------- + stat : dict + """ + cdef FileStatistics info + cdef c_string c_path = tobytes(path) + with nogil: + check_status(self.client.get() + .Stat(c_path, &info)) + return { + 'size': info.size, + 'kind': ('directory' if info.kind == ObjectType_DIRECTORY + else 'file') + } + + cdef _path_info(self, path, HdfsPathInfo* info): + cdef c_string c_path = tobytes(path) + + with nogil: + check_status(self.client.get() + .GetPathInfo(c_path, info)) + + def ls(self, path, bint full_info): + cdef: + c_string c_path = tobytes(path) + vector[HdfsPathInfo] listing + list results = [] + int i + + self._ensure_client() + + with nogil: + check_status(self.client.get() + .ListDirectory(c_path, &listing)) + + cdef const HdfsPathInfo* info + for i in range( listing.size()): + info = &listing[i] + + # Try to trim off the hdfs://HOST:PORT piece + name = strip_hdfs_abspath(frombytes(info.name)) + + if full_info: + kind = ('file' if info.kind == ObjectType_FILE + else 'directory') + + results.append({ + 'kind': kind, + 'name': name, + 'owner': frombytes(info.owner), + 'group': frombytes(info.group), + 'last_modified_time': info.last_modified_time, + 'last_access_time': info.last_access_time, + 'size': info.size, + 'replication': info.replication, + 'block_size': info.block_size, + 'permissions': info.permissions + }) + else: + results.append(name) + + return results + + def chmod(self, path, mode): + """ + Change file permissions + + Parameters + ---------- + path : string + absolute path to file or directory + mode : int + POSIX-like bitmask + """ + self._ensure_client() + cdef c_string c_path = tobytes(path) + cdef int c_mode = mode + with nogil: + check_status(self.client.get() + .Chmod(c_path, c_mode)) + + def chown(self, path, owner=None, group=None): + """ + Change file permissions + + Parameters + ---------- + path : string + absolute path to file or directory + owner : string, default None + New owner, None for no change + group : string, default None + New group, None for no change + """ + cdef: + c_string c_path + c_string c_owner + c_string c_group + const char* c_owner_ptr = NULL + const char* c_group_ptr = NULL + + self._ensure_client() + + c_path = tobytes(path) + if owner is not None: + c_owner = tobytes(owner) + c_owner_ptr = c_owner.c_str() + + if group is not None: + c_group = tobytes(group) + c_group_ptr = c_group.c_str() + + with nogil: + check_status(self.client.get() + .Chown(c_path, c_owner_ptr, c_group_ptr)) + + def mkdir(self, path): + """ + Create indicated directory and any necessary parent directories + """ + self._ensure_client() + cdef c_string c_path = tobytes(path) + with nogil: + check_status(self.client.get() + .MakeDirectory(c_path)) + + def delete(self, path, bint recursive=False): + """ + Delete the indicated file or directory + + Parameters + ---------- + path : string + recursive : boolean, default False + If True, also delete child paths for directories + """ + self._ensure_client() + + cdef c_string c_path = tobytes(path) + with nogil: + check_status(self.client.get() + .Delete(c_path, recursive == 1)) + + def open(self, path, mode='rb', buffer_size=None, replication=None, + default_block_size=None): + """ + Open HDFS file for reading or writing + + Parameters + ---------- + mode : string + Must be one of 'rb', 'wb', 'ab' + + Returns + ------- + handle : HdfsFile + """ + self._ensure_client() + + cdef HdfsFile out = HdfsFile() + + if mode not in ('rb', 'wb', 'ab'): + raise Exception("Mode must be 'rb' (read), " + "'wb' (write, new file), or 'ab' (append)") + + cdef c_string c_path = tobytes(path) + cdef c_bool append = False + + # 0 in libhdfs means "use the default" + cdef int32_t c_buffer_size = buffer_size or 0 + cdef int16_t c_replication = replication or 0 + cdef int64_t c_default_block_size = default_block_size or 0 + + cdef shared_ptr[HdfsOutputStream] wr_handle + cdef shared_ptr[HdfsReadableFile] rd_handle + + if mode in ('wb', 'ab'): + if mode == 'ab': + append = True + + with nogil: + check_status( + self.client.get() + .OpenWritable(c_path, append, c_buffer_size, + c_replication, c_default_block_size, + &wr_handle)) + + out.set_output_stream( wr_handle) + out.is_writable = True + else: + with nogil: + check_status(self.client.get() + .OpenReadable(c_path, &rd_handle)) + + out.set_random_access_file( + rd_handle) + out.is_readable = True + + assert not out.closed + + if c_buffer_size == 0: + c_buffer_size = 2 ** 16 + + out.mode = mode + out.buffer_size = c_buffer_size + out.parent = _HdfsFileNanny(self, out) + out.own_file = True + + return out + + def download(self, path, stream, buffer_size=None): + with self.open(path, 'rb') as f: + f.download(stream, buffer_size=buffer_size) + + def upload(self, path, stream, buffer_size=None): + """ + Upload file-like object to HDFS path + """ + with self.open(path, 'wb') as f: + f.upload(stream, buffer_size=buffer_size) + + +# ARROW-404: Helper class to ensure that files are closed before the +# client. During deallocation of the extension class, the attributes are +# decref'd which can cause the client to get closed first if the file has the +# last remaining reference +cdef class _HdfsFileNanny(_Weakrefable): + cdef: + object client + object file_handle_ref + + def __cinit__(self, client, file_handle): + import weakref + self.client = client + self.file_handle_ref = weakref.ref(file_handle) + + def __dealloc__(self): + fh = self.file_handle_ref() + if fh: + fh.close() + # avoid cyclic GC + self.file_handle_ref = None + self.client = None + + +cdef class HdfsFile(NativeFile): + cdef readonly: + int32_t buffer_size + object mode + object parent + + def __dealloc__(self): + self.parent = None diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_json.pxd b/env-llmeval/lib/python3.10/site-packages/pyarrow/_json.pxd new file mode 100644 index 0000000000000000000000000000000000000000..42a0a678a9b6a543c657c905f3eb4fa4490b6edf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_json.pxd @@ -0,0 +1,36 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport _Weakrefable + + +cdef class ParseOptions(_Weakrefable): + cdef: + CJSONParseOptions options + + @staticmethod + cdef ParseOptions wrap(CJSONParseOptions options) + +cdef class ReadOptions(_Weakrefable): + cdef: + CJSONReadOptions options + + @staticmethod + cdef ReadOptions wrap(CJSONReadOptions options) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..dfc40b8e93cde0014e1b0cf751d700eeee1f3755 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd b/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd new file mode 100644 index 0000000000000000000000000000000000000000..d52669501a4044838e576d3dac8f8a422874eaa6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libparquet_encryption cimport * +from pyarrow._parquet cimport (ParquetCipher, + CFileEncryptionProperties, + CFileDecryptionProperties, + FileEncryptionProperties, + FileDecryptionProperties, + ParquetCipher_AES_GCM_V1, + ParquetCipher_AES_GCM_CTR_V1) +from pyarrow.lib cimport _Weakrefable + +cdef class CryptoFactory(_Weakrefable): + cdef shared_ptr[CPyCryptoFactory] factory + cdef init(self, callable_client_factory) + cdef inline shared_ptr[CPyCryptoFactory] unwrap(self) + +cdef class EncryptionConfiguration(_Weakrefable): + cdef shared_ptr[CEncryptionConfiguration] configuration + cdef inline shared_ptr[CEncryptionConfiguration] unwrap(self) nogil + +cdef class DecryptionConfiguration(_Weakrefable): + cdef shared_ptr[CDecryptionConfiguration] configuration + cdef inline shared_ptr[CDecryptionConfiguration] unwrap(self) nogil + +cdef class KmsConnectionConfig(_Weakrefable): + cdef shared_ptr[CKmsConnectionConfig] configuration + cdef inline shared_ptr[CKmsConnectionConfig] unwrap(self) nogil + + @staticmethod + cdef wrap(const CKmsConnectionConfig& config) + + +cdef shared_ptr[CCryptoFactory] pyarrow_unwrap_cryptofactory(object crypto_factory) except * +cdef shared_ptr[CKmsConnectionConfig] pyarrow_unwrap_kmsconnectionconfig(object kmsconnectionconfig) except * +cdef shared_ptr[CEncryptionConfiguration] pyarrow_unwrap_encryptionconfig(object encryptionconfig) except * +cdef shared_ptr[CDecryptionConfiguration] pyarrow_unwrap_decryptionconfig(object decryptionconfig) except * diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd b/env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd new file mode 100644 index 0000000000000000000000000000000000000000..91c0220d7310870a7803ecceb2c32b8b32f8c11d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport CStatus + + +ctypedef CStatus cb_test_func() + +cdef extern from "arrow/python/python_test.h" namespace "arrow::py::testing" nogil: + + cdef cppclass CTestCase "arrow::py::testing::TestCase": + c_string name + cb_test_func func + + vector[CTestCase] GetCppTestCases() diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/acero.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/acero.py new file mode 100644 index 0000000000000000000000000000000000000000..a5583c9e657d23d7b3da0908fd498b3f664eef82 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/acero.py @@ -0,0 +1,308 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# --------------------------------------------------------------------- +# Implement Internal ExecPlan bindings + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.lib import Table +from pyarrow.compute import Expression, field + +try: + from pyarrow._acero import ( # noqa + Declaration, + ExecNodeOptions, + TableSourceNodeOptions, + FilterNodeOptions, + ProjectNodeOptions, + AggregateNodeOptions, + OrderByNodeOptions, + HashJoinNodeOptions, + ) +except ImportError as exc: + raise ImportError( + f"The pyarrow installation is not built with support for 'acero' ({str(exc)})" + ) from None + + +try: + import pyarrow.dataset as ds + from pyarrow._dataset import ScanNodeOptions +except ImportError: + class DatasetModuleStub: + class Dataset: + pass + + class InMemoryDataset: + pass + ds = DatasetModuleStub + + +def _dataset_to_decl(dataset, use_threads=True): + decl = Declaration("scan", ScanNodeOptions(dataset, use_threads=use_threads)) + + # Get rid of special dataset columns + # "__fragment_index", "__batch_index", "__last_in_fragment", "__filename" + projections = [field(f) for f in dataset.schema.names] + decl = Declaration.from_sequence( + [decl, Declaration("project", ProjectNodeOptions(projections))] + ) + + filter_expr = dataset._scan_options.get("filter") + if filter_expr is not None: + # Filters applied in CScanNodeOptions are "best effort" for the scan node itself + # so we always need to inject an additional Filter node to apply them for real. + decl = Declaration.from_sequence( + [decl, Declaration("filter", FilterNodeOptions(filter_expr))] + ) + + return decl + + +def _perform_join(join_type, left_operand, left_keys, + right_operand, right_keys, + left_suffix=None, right_suffix=None, + use_threads=True, coalesce_keys=False, + output_type=Table): + """ + Perform join of two tables or datasets. + + The result will be an output table with the result of the join operation + + Parameters + ---------- + join_type : str + One of supported join types. + left_operand : Table or Dataset + The left operand for the join operation. + left_keys : str or list[str] + The left key (or keys) on which the join operation should be performed. + right_operand : Table or Dataset + The right operand for the join operation. + right_keys : str or list[str] + The right key (or keys) on which the join operation should be performed. + left_suffix : str, default None + Which suffix to add to left column names. This prevents confusion + when the columns in left and right operands have colliding names. + right_suffix : str, default None + Which suffix to add to the right column names. This prevents confusion + when the columns in left and right operands have colliding names. + use_threads : bool, default True + Whether to use multithreading or not. + coalesce_keys : bool, default False + If the duplicated keys should be omitted from one of the sides + in the join result. + output_type: Table or InMemoryDataset + The output type for the exec plan result. + + Returns + ------- + result_table : Table or InMemoryDataset + """ + if not isinstance(left_operand, (Table, ds.Dataset)): + raise TypeError(f"Expected Table or Dataset, got {type(left_operand)}") + if not isinstance(right_operand, (Table, ds.Dataset)): + raise TypeError(f"Expected Table or Dataset, got {type(right_operand)}") + + # Prepare left and right tables Keys to send them to the C++ function + left_keys_order = {} + if not isinstance(left_keys, (tuple, list)): + left_keys = [left_keys] + for idx, key in enumerate(left_keys): + left_keys_order[key] = idx + + right_keys_order = {} + if not isinstance(right_keys, (list, tuple)): + right_keys = [right_keys] + for idx, key in enumerate(right_keys): + right_keys_order[key] = idx + + # By default expose all columns on both left and right table + left_columns = left_operand.schema.names + right_columns = right_operand.schema.names + + # Pick the join type + if join_type == "left semi" or join_type == "left anti": + right_columns = [] + elif join_type == "right semi" or join_type == "right anti": + left_columns = [] + elif join_type == "inner" or join_type == "left outer": + right_columns = [ + col for col in right_columns if col not in right_keys_order + ] + elif join_type == "right outer": + left_columns = [ + col for col in left_columns if col not in left_keys_order + ] + + # Turn the columns to vectors of FieldRefs + # and set aside indices of keys. + left_column_keys_indices = {} + for idx, colname in enumerate(left_columns): + if colname in left_keys: + left_column_keys_indices[colname] = idx + right_column_keys_indices = {} + for idx, colname in enumerate(right_columns): + if colname in right_keys: + right_column_keys_indices[colname] = idx + + # Add the join node to the execplan + if isinstance(left_operand, ds.Dataset): + left_source = _dataset_to_decl(left_operand, use_threads=use_threads) + else: + left_source = Declaration("table_source", TableSourceNodeOptions(left_operand)) + if isinstance(right_operand, ds.Dataset): + right_source = _dataset_to_decl(right_operand, use_threads=use_threads) + else: + right_source = Declaration( + "table_source", TableSourceNodeOptions(right_operand) + ) + + if coalesce_keys: + join_opts = HashJoinNodeOptions( + join_type, left_keys, right_keys, left_columns, right_columns, + output_suffix_for_left=left_suffix or "", + output_suffix_for_right=right_suffix or "", + ) + else: + join_opts = HashJoinNodeOptions( + join_type, left_keys, right_keys, + output_suffix_for_left=left_suffix or "", + output_suffix_for_right=right_suffix or "", + ) + decl = Declaration( + "hashjoin", options=join_opts, inputs=[left_source, right_source] + ) + + if coalesce_keys and join_type == "full outer": + # In case of full outer joins, the join operation will output all columns + # so that we can coalesce the keys and exclude duplicates in a subsequent + # projection. + left_columns_set = set(left_columns) + right_columns_set = set(right_columns) + # Where the right table columns start. + right_operand_index = len(left_columns) + projected_col_names = [] + projections = [] + for idx, col in enumerate(left_columns + right_columns): + if idx < len(left_columns) and col in left_column_keys_indices: + # Include keys only once and coalesce left+right table keys. + projected_col_names.append(col) + # Get the index of the right key that is being paired + # with this left key. We do so by retrieving the name + # of the right key that is in the same position in the provided keys + # and then looking up the index for that name in the right table. + right_key_index = right_column_keys_indices[ + right_keys[left_keys_order[col]]] + projections.append( + Expression._call("coalesce", [ + Expression._field(idx), Expression._field( + right_operand_index+right_key_index) + ]) + ) + elif idx >= right_operand_index and col in right_column_keys_indices: + # Do not include right table keys. As they would lead to duplicated keys + continue + else: + # For all the other columns include them as they are. + # Just recompute the suffixes that the join produced as the projection + # would lose them otherwise. + if ( + left_suffix and idx < right_operand_index + and col in right_columns_set + ): + col += left_suffix + if ( + right_suffix and idx >= right_operand_index + and col in left_columns_set + ): + col += right_suffix + projected_col_names.append(col) + projections.append( + Expression._field(idx) + ) + projection = Declaration( + "project", ProjectNodeOptions(projections, projected_col_names) + ) + decl = Declaration.from_sequence([decl, projection]) + + result_table = decl.to_table(use_threads=use_threads) + + if output_type == Table: + return result_table + elif output_type == ds.InMemoryDataset: + return ds.InMemoryDataset(result_table) + else: + raise TypeError("Unsupported output type") + + +def _filter_table(table, expression): + """Filter rows of a table based on the provided expression. + + The result will be an output table with only the rows matching + the provided expression. + + Parameters + ---------- + table : Table or Dataset + Table or Dataset that should be filtered. + expression : Expression + The expression on which rows should be filtered. + + Returns + ------- + Table + """ + decl = Declaration.from_sequence([ + Declaration("table_source", options=TableSourceNodeOptions(table)), + Declaration("filter", options=FilterNodeOptions(expression)) + ]) + return decl.to_table(use_threads=True) + + +def _sort_source(table_or_dataset, sort_keys, output_type=Table, **kwargs): + + if isinstance(table_or_dataset, ds.Dataset): + data_source = _dataset_to_decl(table_or_dataset, use_threads=True) + else: + data_source = Declaration( + "table_source", TableSourceNodeOptions(table_or_dataset) + ) + + order_by = Declaration("order_by", OrderByNodeOptions(sort_keys, **kwargs)) + + decl = Declaration.from_sequence([data_source, order_by]) + result_table = decl.to_table(use_threads=True) + + if output_type == Table: + return result_table + elif output_type == ds.InMemoryDataset: + return ds.InMemoryDataset(result_table) + else: + raise TypeError("Unsupported output type") + + +def _group_by(table, aggregates, keys, use_threads=True): + + decl = Declaration.from_sequence([ + Declaration("table_source", TableSourceNodeOptions(table)), + Declaration("aggregate", AggregateNodeOptions(aggregates, keys=keys)) + ]) + return decl.to_table(use_threads=use_threads) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/benchmark.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..25ee1141f08d1f4ac19ab7ade92eafbf786d685a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/benchmark.py @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# flake8: noqa + + +from pyarrow.lib import benchmark_PandasObjectIsNull diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/conftest.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..6f6807e907d62f7e04dc6e92d017d5b20ef3ee95 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/conftest.py @@ -0,0 +1,336 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest +import pyarrow as pa +from pyarrow import Codec +from pyarrow import fs + +import numpy as np + +groups = [ + 'acero', + 'brotli', + 'bz2', + 'cython', + 'dataset', + 'hypothesis', + 'fastparquet', + 'gandiva', + 'gcs', + 'gdb', + 'gzip', + 'hdfs', + 'large_memory', + 'lz4', + 'memory_leak', + 'nopandas', + 'orc', + 'pandas', + 'parquet', + 'parquet_encryption', + 's3', + 'snappy', + 'substrait', + 'flight', + 'slow', + 'requires_testing_data', + 'zstd', +] + +defaults = { + 'acero': False, + 'brotli': Codec.is_available('brotli'), + 'bz2': Codec.is_available('bz2'), + 'cython': False, + 'dataset': False, + 'fastparquet': False, + 'flight': False, + 'gandiva': False, + 'gcs': False, + 'gdb': True, + 'gzip': Codec.is_available('gzip'), + 'hdfs': False, + 'hypothesis': False, + 'large_memory': False, + 'lz4': Codec.is_available('lz4'), + 'memory_leak': False, + 'nopandas': False, + 'orc': False, + 'pandas': False, + 'parquet': False, + 'parquet_encryption': False, + 'requires_testing_data': True, + 's3': False, + 'slow': False, + 'snappy': Codec.is_available('snappy'), + 'substrait': False, + 'zstd': Codec.is_available('zstd'), +} + +try: + import cython # noqa + defaults['cython'] = True +except ImportError: + pass + +try: + import fastparquet # noqa + defaults['fastparquet'] = True +except ImportError: + pass + +try: + import pyarrow.gandiva # noqa + defaults['gandiva'] = True +except ImportError: + pass + +try: + import pyarrow.acero # noqa + defaults['acero'] = True +except ImportError: + pass + +try: + import pyarrow.dataset # noqa + defaults['dataset'] = True +except ImportError: + pass + +try: + import pyarrow.orc # noqa + defaults['orc'] = True +except ImportError: + pass + +try: + import pandas # noqa + defaults['pandas'] = True +except ImportError: + defaults['nopandas'] = True + +try: + import pyarrow.parquet # noqa + defaults['parquet'] = True +except ImportError: + pass + +try: + import pyarrow.parquet.encryption # noqa + defaults['parquet_encryption'] = True +except ImportError: + pass + +try: + import pyarrow.flight # noqa + defaults['flight'] = True +except ImportError: + pass + +try: + from pyarrow.fs import GcsFileSystem # noqa + defaults['gcs'] = True +except ImportError: + pass + + +try: + from pyarrow.fs import S3FileSystem # noqa + defaults['s3'] = True +except ImportError: + pass + +try: + from pyarrow.fs import HadoopFileSystem # noqa + defaults['hdfs'] = True +except ImportError: + pass + +try: + import pyarrow.substrait # noqa + defaults['substrait'] = True +except ImportError: + pass + + +# Doctest should ignore files for the modules that are not built +def pytest_ignore_collect(path, config): + if config.option.doctestmodules: + # don't try to run doctests on the /tests directory + if "/pyarrow/tests/" in str(path): + return True + + doctest_groups = [ + 'dataset', + 'orc', + 'parquet', + 'flight', + 'substrait', + ] + + # handle cuda, flight, etc + for group in doctest_groups: + if 'pyarrow/{}'.format(group) in str(path): + if not defaults[group]: + return True + + if 'pyarrow/parquet/encryption' in str(path): + if not defaults['parquet_encryption']: + return True + + if 'pyarrow/cuda' in str(path): + try: + import pyarrow.cuda # noqa + return False + except ImportError: + return True + + if 'pyarrow/fs' in str(path): + try: + from pyarrow.fs import S3FileSystem # noqa + return False + except ImportError: + return True + + if getattr(config.option, "doctest_cython", False): + if "/pyarrow/tests/" in str(path): + return True + if "/pyarrow/_parquet_encryption" in str(path): + return True + + return False + + +# Save output files from doctest examples into temp dir +@pytest.fixture(autouse=True) +def _docdir(request): + + # Trigger ONLY for the doctests + doctest_m = request.config.option.doctestmodules + doctest_c = getattr(request.config.option, "doctest_cython", False) + + if doctest_m or doctest_c: + + # Get the fixture dynamically by its name. + tmpdir = request.getfixturevalue('tmpdir') + + # Chdir only for the duration of the test. + with tmpdir.as_cwd(): + yield + + else: + yield + + +# Define doctest_namespace for fs module docstring import +@pytest.fixture(autouse=True) +def add_fs(doctest_namespace, request, tmp_path): + + # Trigger ONLY for the doctests + doctest_m = request.config.option.doctestmodules + doctest_c = getattr(request.config.option, "doctest_cython", False) + + if doctest_m or doctest_c: + # fs import + doctest_namespace["fs"] = fs + + # Creation of an object and file with data + local = fs.LocalFileSystem() + path = tmp_path / 'pyarrow-fs-example.dat' + with local.open_output_stream(str(path)) as stream: + stream.write(b'data') + doctest_namespace["local"] = local + doctest_namespace["local_path"] = str(tmp_path) + doctest_namespace["path"] = str(path) + yield + + +# Define udf fixture for test_udf.py and test_substrait.py +@pytest.fixture(scope="session") +def unary_func_fixture(): + """ + Register a unary scalar function. + """ + from pyarrow import compute as pc + + def unary_function(ctx, x): + return pc.call_function("add", [x, 1], + memory_pool=ctx.memory_pool) + func_name = "y=x+1" + unary_doc = {"summary": "add function", + "description": "test add function"} + pc.register_scalar_function(unary_function, + func_name, + unary_doc, + {"array": pa.int64()}, + pa.int64()) + return unary_function, func_name + + +@pytest.fixture(scope="session") +def unary_agg_func_fixture(): + """ + Register a unary aggregate function (mean) + """ + from pyarrow import compute as pc + + def func(ctx, x): + return pa.scalar(np.nanmean(x)) + + func_name = "mean_udf" + func_doc = {"summary": "y=avg(x)", + "description": "find mean of x"} + + pc.register_aggregate_function(func, + func_name, + func_doc, + { + "x": pa.float64(), + }, + pa.float64() + ) + return func, func_name + + +@pytest.fixture(scope="session") +def varargs_agg_func_fixture(): + """ + Register a unary aggregate function + """ + from pyarrow import compute as pc + + def func(ctx, *args): + sum = 0.0 + for arg in args: + sum += np.nanmean(arg) + return pa.scalar(sum) + + func_name = "sum_mean" + func_doc = {"summary": "Varargs aggregate", + "description": "Varargs aggregate"} + + pc.register_aggregate_function(func, + func_name, + func_doc, + { + "x": pa.int64(), + "y": pa.float64() + }, + pa.float64() + ) + return func, func_name diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/error.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/error.pxi new file mode 100644 index 0000000000000000000000000000000000000000..4357cde32c31db36763225643ac7976217aed0e6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/error.pxi @@ -0,0 +1,271 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from cpython.exc cimport PyErr_CheckSignals, PyErr_SetInterrupt + +from pyarrow.includes.libarrow cimport CStatus +from pyarrow.includes.libarrow_python cimport IsPyError, RestorePyError +from pyarrow.includes.common cimport c_string + +from contextlib import contextmanager +import os +import signal +import threading + +from pyarrow.util import _break_traceback_cycle_from_frame + + +class ArrowException(Exception): + pass + + +class ArrowInvalid(ValueError, ArrowException): + pass + + +class ArrowMemoryError(MemoryError, ArrowException): + pass + + +class ArrowKeyError(KeyError, ArrowException): + def __str__(self): + # Override KeyError.__str__, as it uses the repr() of the key + return ArrowException.__str__(self) + + +class ArrowTypeError(TypeError, ArrowException): + pass + + +class ArrowNotImplementedError(NotImplementedError, ArrowException): + pass + + +class ArrowCapacityError(ArrowException): + pass + + +class ArrowIndexError(IndexError, ArrowException): + pass + + +class ArrowSerializationError(ArrowException): + pass + + +class ArrowCancelled(ArrowException): + def __init__(self, message, signum=None): + super().__init__(message) + self.signum = signum + + +# Compatibility alias +ArrowIOError = IOError + + +# check_status() and convert_status() could be written directly in C++ +# if we didn't define Arrow-specific subclasses (ArrowInvalid etc.) +cdef int check_status(const CStatus& status) except -1 nogil: + if status.ok(): + return 0 + + with gil: + if IsPyError(status): + RestorePyError(status) + return -1 + + raise convert_status(status) + + +cdef object convert_status(const CStatus& status): + if IsPyError(status): + try: + RestorePyError(status) + except BaseException as e: + return e + + # We don't use Status::ToString() as it would redundantly include + # the C++ class name. + message = frombytes(status.message(), safe=True) + detail = status.detail() + if detail != nullptr: + message += ". Detail: " + frombytes(detail.get().ToString(), + safe=True) + + if status.IsInvalid(): + return ArrowInvalid(message) + elif status.IsIOError(): + # Note: OSError constructor is + # OSError(message) + # or + # OSError(errno, message, filename=None) + # or (on Windows) + # OSError(errno, message, filename, winerror) + errno = ErrnoFromStatus(status) + winerror = WinErrorFromStatus(status) + if winerror != 0: + return IOError(errno, message, None, winerror) + elif errno != 0: + return IOError(errno, message) + else: + return IOError(message) + elif status.IsOutOfMemory(): + return ArrowMemoryError(message) + elif status.IsKeyError(): + return ArrowKeyError(message) + elif status.IsNotImplemented(): + return ArrowNotImplementedError(message) + elif status.IsTypeError(): + return ArrowTypeError(message) + elif status.IsCapacityError(): + return ArrowCapacityError(message) + elif status.IsIndexError(): + return ArrowIndexError(message) + elif status.IsSerializationError(): + return ArrowSerializationError(message) + elif status.IsCancelled(): + signum = SignalFromStatus(status) + if signum > 0: + return ArrowCancelled(message, signum) + else: + return ArrowCancelled(message) + else: + message = frombytes(status.ToString(), safe=True) + return ArrowException(message) + + +# These are API functions for C++ PyArrow +cdef api int pyarrow_internal_check_status(const CStatus& status) \ + except -1 nogil: + return check_status(status) + +cdef api object pyarrow_internal_convert_status(const CStatus& status): + return convert_status(status) + + +cdef class StopToken: + cdef void init(self, CStopToken stop_token): + self.stop_token = move(stop_token) + + +cdef c_bool signal_handlers_enabled = True + + +def enable_signal_handlers(c_bool enable): + """ + Enable or disable interruption of long-running operations. + + By default, certain long running operations will detect user + interruptions, such as by pressing Ctrl-C. This detection relies + on setting a signal handler for the duration of the long-running + operation, and may therefore interfere with other frameworks or + libraries (such as an event loop). + + Parameters + ---------- + enable : bool + Whether to enable user interruption by setting a temporary + signal handler. + """ + global signal_handlers_enabled + signal_handlers_enabled = enable + + +# For internal use + +# Whether we need a workaround for https://bugs.python.org/issue42248 +have_signal_refcycle = (sys.version_info < (3, 8, 10) or + (3, 9) <= sys.version_info < (3, 9, 5) or + sys.version_info[:2] == (3, 10)) + +cdef class SignalStopHandler: + cdef: + StopToken _stop_token + vector[int] _signals + c_bool _enabled + + def __cinit__(self): + self._enabled = False + + self._init_signals() + if have_signal_refcycle: + _break_traceback_cycle_from_frame(sys._getframe(0)) + + self._stop_token = StopToken() + + if not self._signals.empty(): + maybe_source = SetSignalStopSource() + if not maybe_source.ok(): + # See ARROW-11841 / ARROW-17173: in complex interaction + # scenarios (such as R calling into Python), SetSignalStopSource() + # may have already activated a signal-receiving StopSource. + # Just warn instead of erroring out. + maybe_source.status().Warn() + else: + self._stop_token.init(deref(maybe_source).token()) + self._enabled = True + + def _init_signals(self): + if (signal_handlers_enabled and + threading.current_thread() is threading.main_thread()): + self._signals = [ + sig for sig in (signal.SIGINT, signal.SIGTERM) + if signal.getsignal(sig) not in (signal.SIG_DFL, + signal.SIG_IGN, None)] + + def __enter__(self): + if self._enabled: + check_status(RegisterCancellingSignalHandler(self._signals)) + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + if self._enabled: + UnregisterCancellingSignalHandler() + if exc_value is None: + # Make sure we didn't lose a signal + try: + check_status(self._stop_token.stop_token.Poll()) + except ArrowCancelled as e: + exc_value = e + if isinstance(exc_value, ArrowCancelled): + if exc_value.signum: + # Re-emit the exact same signal. We restored the Python signal + # handler above, so it should receive it. + if os.name == 'nt': + SendSignal(exc_value.signum) + else: + SendSignalToThread(exc_value.signum, + threading.main_thread().ident) + else: + # Simulate Python receiving a SIGINT + # (see https://bugs.python.org/issue43356 for why we can't + # simulate the exact signal number) + PyErr_SetInterrupt() + # Maximize chances of the Python signal handler being executed now. + # Otherwise a potential KeyboardInterrupt might be missed by an + # immediately enclosing try/except block. + PyErr_CheckSignals() + # ArrowCancelled will be re-raised if PyErr_CheckSignals() + # returned successfully. + + def __dealloc__(self): + if self._enabled: + ResetSignalStopSource() + + @property + def stop_token(self): + return self._stop_token diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/filesystem.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/filesystem.py new file mode 100644 index 0000000000000000000000000000000000000000..c1e70a1ee699f627af42d5fddf27ce877ea01230 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/filesystem.py @@ -0,0 +1,511 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import os +import posixpath +import sys +import urllib.parse +import warnings + +from os.path import join as pjoin + +import pyarrow as pa +from pyarrow.util import doc, _stringify_path, _is_path_like, _DEPR_MSG + + +_FS_DEPR_MSG = _DEPR_MSG.format( + "filesystem.LocalFileSystem", "2.0.0", "fs.LocalFileSystem" +) + + +class FileSystem: + """ + Abstract filesystem interface. + """ + + def cat(self, path): + """ + Return contents of file as a bytes object. + + Parameters + ---------- + path : str + File path to read content from. + + Returns + ------- + contents : bytes + """ + with self.open(path, 'rb') as f: + return f.read() + + def ls(self, path): + """ + Return list of file paths. + + Parameters + ---------- + path : str + Directory to list contents from. + """ + raise NotImplementedError + + def delete(self, path, recursive=False): + """ + Delete the indicated file or directory. + + Parameters + ---------- + path : str + Path to delete. + recursive : bool, default False + If True, also delete child paths for directories. + """ + raise NotImplementedError + + def disk_usage(self, path): + """ + Compute bytes used by all contents under indicated path in file tree. + + Parameters + ---------- + path : str + Can be a file path or directory. + + Returns + ------- + usage : int + """ + path = _stringify_path(path) + path_info = self.stat(path) + if path_info['kind'] == 'file': + return path_info['size'] + + total = 0 + for root, directories, files in self.walk(path): + for child_path in files: + abspath = self._path_join(root, child_path) + total += self.stat(abspath)['size'] + + return total + + def _path_join(self, *args): + return self.pathsep.join(args) + + def stat(self, path): + """ + Information about a filesystem entry. + + Returns + ------- + stat : dict + """ + raise NotImplementedError('FileSystem.stat') + + def rm(self, path, recursive=False): + """ + Alias for FileSystem.delete. + """ + return self.delete(path, recursive=recursive) + + def mv(self, path, new_path): + """ + Alias for FileSystem.rename. + """ + return self.rename(path, new_path) + + def rename(self, path, new_path): + """ + Rename file, like UNIX mv command. + + Parameters + ---------- + path : str + Path to alter. + new_path : str + Path to move to. + """ + raise NotImplementedError('FileSystem.rename') + + def mkdir(self, path, create_parents=True): + """ + Create a directory. + + Parameters + ---------- + path : str + Path to the directory. + create_parents : bool, default True + If the parent directories don't exists create them as well. + """ + raise NotImplementedError + + def exists(self, path): + """ + Return True if path exists. + + Parameters + ---------- + path : str + Path to check. + """ + raise NotImplementedError + + def isdir(self, path): + """ + Return True if path is a directory. + + Parameters + ---------- + path : str + Path to check. + """ + raise NotImplementedError + + def isfile(self, path): + """ + Return True if path is a file. + + Parameters + ---------- + path : str + Path to check. + """ + raise NotImplementedError + + def _isfilestore(self): + """ + Returns True if this FileSystem is a unix-style file store with + directories. + """ + raise NotImplementedError + + def read_parquet(self, path, columns=None, metadata=None, schema=None, + use_threads=True, use_pandas_metadata=False): + """ + Read Parquet data from path in file system. Can read from a single file + or a directory of files. + + Parameters + ---------- + path : str + Single file path or directory + columns : List[str], optional + Subset of columns to read. + metadata : pyarrow.parquet.FileMetaData + Known metadata to validate files against. + schema : pyarrow.parquet.Schema + Known schema to validate files against. Alternative to metadata + argument. + use_threads : bool, default True + Perform multi-threaded column reads. + use_pandas_metadata : bool, default False + If True and file has custom pandas schema metadata, ensure that + index columns are also loaded. + + Returns + ------- + table : pyarrow.Table + """ + from pyarrow.parquet import ParquetDataset + dataset = ParquetDataset(path, schema=schema, metadata=metadata, + filesystem=self) + return dataset.read(columns=columns, use_threads=use_threads, + use_pandas_metadata=use_pandas_metadata) + + def open(self, path, mode='rb'): + """ + Open file for reading or writing. + """ + raise NotImplementedError + + @property + def pathsep(self): + return '/' + + +class LocalFileSystem(FileSystem): + + _instance = None + + def __init__(self): + warnings.warn(_FS_DEPR_MSG, FutureWarning, stacklevel=2) + super().__init__() + + @classmethod + def _get_instance(cls): + if cls._instance is None: + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + cls._instance = LocalFileSystem() + return cls._instance + + @classmethod + def get_instance(cls): + warnings.warn(_FS_DEPR_MSG, FutureWarning, stacklevel=2) + return cls._get_instance() + + @doc(FileSystem.ls) + def ls(self, path): + path = _stringify_path(path) + return sorted(pjoin(path, x) for x in os.listdir(path)) + + @doc(FileSystem.mkdir) + def mkdir(self, path, create_parents=True): + path = _stringify_path(path) + if create_parents: + os.makedirs(path) + else: + os.mkdir(path) + + @doc(FileSystem.isdir) + def isdir(self, path): + path = _stringify_path(path) + return os.path.isdir(path) + + @doc(FileSystem.isfile) + def isfile(self, path): + path = _stringify_path(path) + return os.path.isfile(path) + + @doc(FileSystem._isfilestore) + def _isfilestore(self): + return True + + @doc(FileSystem.exists) + def exists(self, path): + path = _stringify_path(path) + return os.path.exists(path) + + @doc(FileSystem.open) + def open(self, path, mode='rb'): + """ + Open file for reading or writing. + """ + path = _stringify_path(path) + return open(path, mode=mode) + + @property + def pathsep(self): + return os.path.sep + + def walk(self, path): + """ + Directory tree generator, see os.walk. + """ + path = _stringify_path(path) + return os.walk(path) + + +class DaskFileSystem(FileSystem): + """ + Wraps s3fs Dask filesystem implementation like s3fs, gcsfs, etc. + """ + + def __init__(self, fs): + warnings.warn( + "The pyarrow.filesystem.DaskFileSystem/S3FSWrapper are deprecated " + "as of pyarrow 3.0.0, and will be removed in a future version.", + FutureWarning, stacklevel=2) + self.fs = fs + + @doc(FileSystem.isdir) + def isdir(self, path): + raise NotImplementedError("Unsupported file system API") + + @doc(FileSystem.isfile) + def isfile(self, path): + raise NotImplementedError("Unsupported file system API") + + @doc(FileSystem._isfilestore) + def _isfilestore(self): + """ + Object Stores like S3 and GCSFS are based on key lookups, not true + file-paths. + """ + return False + + @doc(FileSystem.delete) + def delete(self, path, recursive=False): + path = _stringify_path(path) + return self.fs.rm(path, recursive=recursive) + + @doc(FileSystem.exists) + def exists(self, path): + path = _stringify_path(path) + return self.fs.exists(path) + + @doc(FileSystem.mkdir) + def mkdir(self, path, create_parents=True): + path = _stringify_path(path) + if create_parents: + return self.fs.mkdirs(path) + else: + return self.fs.mkdir(path) + + @doc(FileSystem.open) + def open(self, path, mode='rb'): + """ + Open file for reading or writing. + """ + path = _stringify_path(path) + return self.fs.open(path, mode=mode) + + def ls(self, path, detail=False): + path = _stringify_path(path) + return self.fs.ls(path, detail=detail) + + def walk(self, path): + """ + Directory tree generator, like os.walk. + """ + path = _stringify_path(path) + return self.fs.walk(path) + + +class S3FSWrapper(DaskFileSystem): + + @doc(FileSystem.isdir) + def isdir(self, path): + path = _sanitize_s3(_stringify_path(path)) + try: + contents = self.fs.ls(path) + if len(contents) == 1 and contents[0] == path: + return False + else: + return True + except OSError: + return False + + @doc(FileSystem.isfile) + def isfile(self, path): + path = _sanitize_s3(_stringify_path(path)) + try: + contents = self.fs.ls(path) + return len(contents) == 1 and contents[0] == path + except OSError: + return False + + def walk(self, path, refresh=False): + """ + Directory tree generator, like os.walk. + + Generator version of what is in s3fs, which yields a flattened list of + files. + """ + path = _sanitize_s3(_stringify_path(path)) + directories = set() + files = set() + + for key in list(self.fs._ls(path, refresh=refresh)): + path = key['Key'] + if key['StorageClass'] == 'DIRECTORY': + directories.add(path) + elif key['StorageClass'] == 'BUCKET': + pass + else: + files.add(path) + + # s3fs creates duplicate 'DIRECTORY' entries + files = sorted([posixpath.split(f)[1] for f in files + if f not in directories]) + directories = sorted([posixpath.split(x)[1] + for x in directories]) + + yield path, directories, files + + for directory in directories: + yield from self.walk(directory, refresh=refresh) + + +def _sanitize_s3(path): + if path.startswith('s3://'): + return path.replace('s3://', '') + else: + return path + + +def _ensure_filesystem(fs): + fs_type = type(fs) + + # If the arrow filesystem was subclassed, assume it supports the full + # interface and return it + if not issubclass(fs_type, FileSystem): + if "fsspec" in sys.modules: + fsspec = sys.modules["fsspec"] + if isinstance(fs, fsspec.AbstractFileSystem): + # for recent fsspec versions that stop inheriting from + # pyarrow.filesystem.FileSystem, still allow fsspec + # filesystems (which should be compatible with our legacy fs) + return fs + + raise OSError('Unrecognized filesystem: {}'.format(fs_type)) + else: + return fs + + +def resolve_filesystem_and_path(where, filesystem=None): + """ + Return filesystem from path which could be an HDFS URI, a local URI, + or a plain filesystem path. + """ + if not _is_path_like(where): + if filesystem is not None: + raise ValueError("filesystem passed but where is file-like, so" + " there is nothing to open with filesystem.") + return filesystem, where + + if filesystem is not None: + filesystem = _ensure_filesystem(filesystem) + if isinstance(filesystem, LocalFileSystem): + path = _stringify_path(where) + elif not isinstance(where, str): + raise TypeError( + "Expected string path; path-like objects are only allowed " + "with a local filesystem" + ) + else: + path = where + return filesystem, path + + path = _stringify_path(where) + + parsed_uri = urllib.parse.urlparse(path) + if parsed_uri.scheme == 'hdfs' or parsed_uri.scheme == 'viewfs': + # Input is hdfs URI such as hdfs://host:port/myfile.parquet + netloc_split = parsed_uri.netloc.split(':') + host = netloc_split[0] + if host == '': + host = 'default' + else: + host = parsed_uri.scheme + "://" + host + port = 0 + if len(netloc_split) == 2 and netloc_split[1].isnumeric(): + port = int(netloc_split[1]) + fs = pa.hdfs._connect(host=host, port=port) + fs_path = parsed_uri.path + elif parsed_uri.scheme == 'file': + # Input is local URI such as file:///home/user/myfile.parquet + fs = LocalFileSystem._get_instance() + fs_path = parsed_uri.path + else: + # Input is local path such as /home/user/myfile.parquet + fs = LocalFileSystem._get_instance() + fs_path = path + + return fs, fs_path diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/ipc.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/ipc.pxi new file mode 100644 index 0000000000000000000000000000000000000000..da9636dfc86e18fb8dc55cd295845e50a57471b9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/ipc.pxi @@ -0,0 +1,1371 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from cpython.pycapsule cimport PyCapsule_CheckExact, PyCapsule_GetPointer, PyCapsule_New + +from collections import namedtuple +import warnings +from cython import sizeof + +cpdef enum MetadataVersion: + V1 = CMetadataVersion_V1 + V2 = CMetadataVersion_V2 + V3 = CMetadataVersion_V3 + V4 = CMetadataVersion_V4 + V5 = CMetadataVersion_V5 + + +cdef object _wrap_metadata_version(CMetadataVersion version): + return MetadataVersion( version) + + +cdef CMetadataVersion _unwrap_metadata_version( + MetadataVersion version) except *: + if version == MetadataVersion.V1: + return CMetadataVersion_V1 + elif version == MetadataVersion.V2: + return CMetadataVersion_V2 + elif version == MetadataVersion.V3: + return CMetadataVersion_V3 + elif version == MetadataVersion.V4: + return CMetadataVersion_V4 + elif version == MetadataVersion.V5: + return CMetadataVersion_V5 + raise ValueError("Not a metadata version: " + repr(version)) + + +_WriteStats = namedtuple( + 'WriteStats', + ('num_messages', 'num_record_batches', 'num_dictionary_batches', + 'num_dictionary_deltas', 'num_replaced_dictionaries')) + + +class WriteStats(_WriteStats): + """IPC write statistics + + Parameters + ---------- + num_messages : int + Number of messages. + num_record_batches : int + Number of record batches. + num_dictionary_batches : int + Number of dictionary batches. + num_dictionary_deltas : int + Delta of dictionaries. + num_replaced_dictionaries : int + Number of replaced dictionaries. + """ + __slots__ = () + + +@staticmethod +cdef _wrap_write_stats(CIpcWriteStats c): + return WriteStats(c.num_messages, c.num_record_batches, + c.num_dictionary_batches, c.num_dictionary_deltas, + c.num_replaced_dictionaries) + + +_ReadStats = namedtuple( + 'ReadStats', + ('num_messages', 'num_record_batches', 'num_dictionary_batches', + 'num_dictionary_deltas', 'num_replaced_dictionaries')) + + +class ReadStats(_ReadStats): + """IPC read statistics + + Parameters + ---------- + num_messages : int + Number of messages. + num_record_batches : int + Number of record batches. + num_dictionary_batches : int + Number of dictionary batches. + num_dictionary_deltas : int + Delta of dictionaries. + num_replaced_dictionaries : int + Number of replaced dictionaries. + """ + __slots__ = () + + +@staticmethod +cdef _wrap_read_stats(CIpcReadStats c): + return ReadStats(c.num_messages, c.num_record_batches, + c.num_dictionary_batches, c.num_dictionary_deltas, + c.num_replaced_dictionaries) + + +cdef class IpcReadOptions(_Weakrefable): + """ + Serialization options for reading IPC format. + + Parameters + ---------- + ensure_native_endian : bool, default True + Whether to convert incoming data to platform-native endianness. + use_threads : bool + Whether to use the global CPU thread pool to parallelize any + computational tasks like decompression + included_fields : list + If empty (the default), return all deserialized fields. + If non-empty, the values are the indices of fields to read on + the top-level schema + """ + __slots__ = () + + # cdef block is in lib.pxd + + def __init__(self, *, bint ensure_native_endian=True, + bint use_threads=True, list included_fields=None): + self.c_options = CIpcReadOptions.Defaults() + self.ensure_native_endian = ensure_native_endian + self.use_threads = use_threads + if included_fields is not None: + self.included_fields = included_fields + + @property + def ensure_native_endian(self): + return self.c_options.ensure_native_endian + + @ensure_native_endian.setter + def ensure_native_endian(self, bint value): + self.c_options.ensure_native_endian = value + + @property + def use_threads(self): + return self.c_options.use_threads + + @use_threads.setter + def use_threads(self, bint value): + self.c_options.use_threads = value + + @property + def included_fields(self): + return self.c_options.included_fields + + @included_fields.setter + def included_fields(self, list value not None): + self.c_options.included_fields = value + + +cdef class IpcWriteOptions(_Weakrefable): + """ + Serialization options for the IPC format. + + Parameters + ---------- + metadata_version : MetadataVersion, default MetadataVersion.V5 + The metadata version to write. V5 is the current and latest, + V4 is the pre-1.0 metadata version (with incompatible Union layout). + allow_64bit : bool, default False + If true, allow field lengths that don't fit in a signed 32-bit int. + use_legacy_format : bool, default False + Whether to use the pre-Arrow 0.15 IPC format. + compression : str, Codec, or None + compression codec to use for record batch buffers. + If None then batch buffers will be uncompressed. + Must be "lz4", "zstd" or None. + To specify a compression_level use `pyarrow.Codec` + use_threads : bool + Whether to use the global CPU thread pool to parallelize any + computational tasks like compression. + emit_dictionary_deltas : bool + Whether to emit dictionary deltas. Default is false for maximum + stream compatibility. + unify_dictionaries : bool + If true then calls to write_table will attempt to unify dictionaries + across all batches in the table. This can help avoid the need for + replacement dictionaries (which the file format does not support) + but requires computing the unified dictionary and then remapping + the indices arrays. + + This parameter is ignored when writing to the IPC stream format as + the IPC stream format can support replacement dictionaries. + """ + __slots__ = () + + # cdef block is in lib.pxd + + def __init__(self, *, metadata_version=MetadataVersion.V5, + bint allow_64bit=False, use_legacy_format=False, + compression=None, bint use_threads=True, + bint emit_dictionary_deltas=False, + bint unify_dictionaries=False): + self.c_options = CIpcWriteOptions.Defaults() + self.allow_64bit = allow_64bit + self.use_legacy_format = use_legacy_format + self.metadata_version = metadata_version + if compression is not None: + self.compression = compression + self.use_threads = use_threads + self.emit_dictionary_deltas = emit_dictionary_deltas + self.unify_dictionaries = unify_dictionaries + + @property + def allow_64bit(self): + return self.c_options.allow_64bit + + @allow_64bit.setter + def allow_64bit(self, bint value): + self.c_options.allow_64bit = value + + @property + def use_legacy_format(self): + return self.c_options.write_legacy_ipc_format + + @use_legacy_format.setter + def use_legacy_format(self, bint value): + self.c_options.write_legacy_ipc_format = value + + @property + def metadata_version(self): + return _wrap_metadata_version(self.c_options.metadata_version) + + @metadata_version.setter + def metadata_version(self, value): + self.c_options.metadata_version = _unwrap_metadata_version(value) + + @property + def compression(self): + if self.c_options.codec == nullptr: + return None + else: + return frombytes(self.c_options.codec.get().name()) + + @compression.setter + def compression(self, value): + if value is None: + self.c_options.codec.reset() + elif isinstance(value, str): + codec_type = _ensure_compression(value) + if codec_type != CCompressionType_ZSTD and codec_type != CCompressionType_LZ4_FRAME: + raise ValueError("Compression type must be lz4, zstd or None") + self.c_options.codec = shared_ptr[CCodec](GetResultValue( + CCodec.Create(codec_type)).release()) + elif isinstance(value, Codec): + if value.name != "lz4" and value.name != "zstd": + raise ValueError("Compression type must be lz4, zstd or None") + self.c_options.codec = (value).wrapped + else: + raise TypeError( + "Property `compression` must be None, str, or pyarrow.Codec") + + @property + def use_threads(self): + return self.c_options.use_threads + + @use_threads.setter + def use_threads(self, bint value): + self.c_options.use_threads = value + + @property + def emit_dictionary_deltas(self): + return self.c_options.emit_dictionary_deltas + + @emit_dictionary_deltas.setter + def emit_dictionary_deltas(self, bint value): + self.c_options.emit_dictionary_deltas = value + + @property + def unify_dictionaries(self): + return self.c_options.unify_dictionaries + + @unify_dictionaries.setter + def unify_dictionaries(self, bint value): + self.c_options.unify_dictionaries = value + + +cdef class Message(_Weakrefable): + """ + Container for an Arrow IPC message with metadata and optional body + """ + + def __cinit__(self): + pass + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "`pyarrow.ipc.read_message` function instead." + .format(self.__class__.__name__)) + + @property + def type(self): + return frombytes(FormatMessageType(self.message.get().type())) + + @property + def metadata(self): + return pyarrow_wrap_buffer(self.message.get().metadata()) + + @property + def metadata_version(self): + return _wrap_metadata_version(self.message.get().metadata_version()) + + @property + def body(self): + cdef shared_ptr[CBuffer] body = self.message.get().body() + if body.get() == NULL: + return None + else: + return pyarrow_wrap_buffer(body) + + def equals(self, Message other): + """ + Returns True if the message contents (metadata and body) are identical + + Parameters + ---------- + other : Message + + Returns + ------- + are_equal : bool + """ + cdef c_bool result + with nogil: + result = self.message.get().Equals(deref(other.message.get())) + return result + + def serialize_to(self, NativeFile sink, alignment=8, memory_pool=None): + """ + Write message to generic OutputStream + + Parameters + ---------- + sink : NativeFile + alignment : int, default 8 + Byte alignment for metadata and body + memory_pool : MemoryPool, default None + Uses default memory pool if not specified + """ + cdef: + int64_t output_length = 0 + COutputStream* out + CIpcWriteOptions options + + options.alignment = alignment + out = sink.get_output_stream().get() + with nogil: + check_status(self.message.get() + .SerializeTo(out, options, &output_length)) + + def serialize(self, alignment=8, memory_pool=None): + """ + Write message as encapsulated IPC message + + Parameters + ---------- + alignment : int, default 8 + Byte alignment for metadata and body + memory_pool : MemoryPool, default None + Uses default memory pool if not specified + + Returns + ------- + serialized : Buffer + """ + stream = BufferOutputStream(memory_pool) + self.serialize_to(stream, alignment=alignment, memory_pool=memory_pool) + return stream.getvalue() + + def __repr__(self): + if self.message == nullptr: + return """pyarrow.Message(uninitialized)""" + + metadata_len = self.metadata.size + body = self.body + body_len = 0 if body is None else body.size + + return """pyarrow.Message +type: {0} +metadata length: {1} +body length: {2}""".format(self.type, metadata_len, body_len) + + +cdef class MessageReader(_Weakrefable): + """ + Interface for reading Message objects from some source (like an + InputStream) + """ + cdef: + unique_ptr[CMessageReader] reader + + def __cinit__(self): + pass + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "`pyarrow.ipc.MessageReader.open_stream` function " + "instead.".format(self.__class__.__name__)) + + @staticmethod + def open_stream(source): + """ + Open stream from source, if you want to use memory map use + MemoryMappedFile as source. + + Parameters + ---------- + source : bytes/buffer-like, pyarrow.NativeFile, or file-like Python object + A readable source, like an InputStream + """ + cdef: + MessageReader result = MessageReader.__new__(MessageReader) + shared_ptr[CInputStream] in_stream + unique_ptr[CMessageReader] reader + + _get_input_stream(source, &in_stream) + with nogil: + reader = CMessageReader.Open(in_stream) + result.reader.reset(reader.release()) + + return result + + def __iter__(self): + return self + + def __next__(self): + return self.read_next_message() + + def read_next_message(self): + """ + Read next Message from the stream. + + Raises + ------ + StopIteration + At end of stream + """ + cdef Message result = Message.__new__(Message) + + with nogil: + result.message = move(GetResultValue(self.reader.get() + .ReadNextMessage())) + + if result.message.get() == NULL: + raise StopIteration + + return result + +# ---------------------------------------------------------------------- +# File and stream readers and writers + +cdef class _CRecordBatchWriter(_Weakrefable): + """The base RecordBatchWriter wrapper. + + Provides common implementations of convenience methods. Should not + be instantiated directly by user code. + """ + + # cdef block is in lib.pxd + + def write(self, table_or_batch): + """ + Write RecordBatch or Table to stream. + + Parameters + ---------- + table_or_batch : {RecordBatch, Table} + """ + if isinstance(table_or_batch, RecordBatch): + self.write_batch(table_or_batch) + elif isinstance(table_or_batch, Table): + self.write_table(table_or_batch) + else: + raise ValueError(type(table_or_batch)) + + def write_batch(self, RecordBatch batch, custom_metadata=None): + """ + Write RecordBatch to stream. + + Parameters + ---------- + batch : RecordBatch + custom_metadata : mapping or KeyValueMetadata + Keys and values must be string-like / coercible to bytes + """ + metadata = ensure_metadata(custom_metadata, allow_none=True) + c_meta = pyarrow_unwrap_metadata(metadata) + + with nogil: + check_status(self.writer.get() + .WriteRecordBatch(deref(batch.batch), c_meta)) + + def write_table(self, Table table, max_chunksize=None): + """ + Write Table to stream in (contiguous) RecordBatch objects. + + Parameters + ---------- + table : Table + max_chunksize : int, default None + Maximum size for RecordBatch chunks. Individual chunks may be + smaller depending on the chunk layout of individual columns. + """ + cdef: + # max_chunksize must be > 0 to have any impact + int64_t c_max_chunksize = -1 + + if max_chunksize is not None: + c_max_chunksize = max_chunksize + + with nogil: + check_status(self.writer.get().WriteTable(table.table[0], + c_max_chunksize)) + + def close(self): + """ + Close stream and write end-of-stream 0 marker. + """ + with nogil: + check_status(self.writer.get().Close()) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + @property + def stats(self): + """ + Current IPC write statistics. + """ + if not self.writer: + raise ValueError("Operation on closed writer") + return _wrap_write_stats(self.writer.get().stats()) + + +cdef class _RecordBatchStreamWriter(_CRecordBatchWriter): + cdef: + CIpcWriteOptions options + bint closed + + def __cinit__(self): + pass + + def __dealloc__(self): + pass + + @property + def _use_legacy_format(self): + # For testing (see test_ipc.py) + return self.options.write_legacy_ipc_format + + @property + def _metadata_version(self): + # For testing (see test_ipc.py) + return _wrap_metadata_version(self.options.metadata_version) + + def _open(self, sink, Schema schema not None, + IpcWriteOptions options=IpcWriteOptions()): + cdef: + shared_ptr[COutputStream] c_sink + + self.options = options.c_options + get_writer(sink, &c_sink) + with nogil: + self.writer = GetResultValue( + MakeStreamWriter(c_sink, schema.sp_schema, + self.options)) + + +cdef _get_input_stream(object source, shared_ptr[CInputStream]* out): + try: + source = as_buffer(source) + except TypeError: + # Non-buffer-like + pass + + get_input_stream(source, True, out) + + +class _ReadPandasMixin: + + def read_pandas(self, **options): + """ + Read contents of stream to a pandas.DataFrame. + + Read all record batches as a pyarrow.Table then convert it to a + pandas.DataFrame using Table.to_pandas. + + Parameters + ---------- + **options + Arguments to forward to :meth:`Table.to_pandas`. + + Returns + ------- + df : pandas.DataFrame + """ + table = self.read_all() + return table.to_pandas(**options) + + +cdef class RecordBatchReader(_Weakrefable): + """Base class for reading stream of record batches. + + Record batch readers function as iterators of record batches that also + provide the schema (without the need to get any batches). + + Warnings + -------- + Do not call this class's constructor directly, use one of the + ``RecordBatchReader.from_*`` functions instead. + + Notes + ----- + To import and export using the Arrow C stream interface, use the + ``_import_from_c`` and ``_export_to_c`` methods. However, keep in mind this + interface is intended for expert users. + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([('x', pa.int64())]) + >>> def iter_record_batches(): + ... for i in range(2): + ... yield pa.RecordBatch.from_arrays([pa.array([1, 2, 3])], schema=schema) + >>> reader = pa.RecordBatchReader.from_batches(schema, iter_record_batches()) + >>> print(reader.schema) + x: int64 + >>> for batch in reader: + ... print(batch) + pyarrow.RecordBatch + x: int64 + ---- + x: [1,2,3] + pyarrow.RecordBatch + x: int64 + ---- + x: [1,2,3] + """ + + # cdef block is in lib.pxd + + def __iter__(self): + return self + + def __next__(self): + return self.read_next_batch() + + @property + def schema(self): + """ + Shared schema of the record batches in the stream. + + Returns + ------- + Schema + """ + cdef shared_ptr[CSchema] c_schema + + with nogil: + c_schema = self.reader.get().schema() + + return pyarrow_wrap_schema(c_schema) + + def read_next_batch(self): + """ + Read next RecordBatch from the stream. + + Raises + ------ + StopIteration: + At end of stream. + + Returns + ------- + RecordBatch + """ + cdef shared_ptr[CRecordBatch] batch + + with nogil: + check_status(self.reader.get().ReadNext(&batch)) + + if batch.get() == NULL: + raise StopIteration + + return pyarrow_wrap_batch(batch) + + def read_next_batch_with_custom_metadata(self): + """ + Read next RecordBatch from the stream along with its custom metadata. + + Raises + ------ + StopIteration: + At end of stream. + + Returns + ------- + batch : RecordBatch + custom_metadata : KeyValueMetadata + """ + cdef: + CRecordBatchWithMetadata batch_with_metadata + + with nogil: + batch_with_metadata = GetResultValue(self.reader.get().ReadNext()) + + if batch_with_metadata.batch.get() == NULL: + raise StopIteration + + return _wrap_record_batch_with_metadata(batch_with_metadata) + + def iter_batches_with_custom_metadata(self): + """ + Iterate over record batches from the stream along with their custom + metadata. + + Yields + ------ + RecordBatchWithMetadata + """ + while True: + try: + yield self.read_next_batch_with_custom_metadata() + except StopIteration: + return + + def read_all(self): + """ + Read all record batches as a pyarrow.Table. + + Returns + ------- + Table + """ + cdef shared_ptr[CTable] table + with nogil: + check_status(self.reader.get().ToTable().Value(&table)) + return pyarrow_wrap_table(table) + + read_pandas = _ReadPandasMixin.read_pandas + + def close(self): + """ + Release any resources associated with the reader. + """ + with nogil: + check_status(self.reader.get().Close()) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + def _export_to_c(self, out_ptr): + """ + Export to a C ArrowArrayStream struct, given its pointer. + + Parameters + ---------- + out_ptr: int + The raw pointer to a C ArrowArrayStream struct. + + Be careful: if you don't pass the ArrowArrayStream struct to a + consumer, array memory will leak. This is a low-level function + intended for expert users. + """ + cdef: + void* c_ptr = _as_c_pointer(out_ptr) + with nogil: + check_status(ExportRecordBatchReader( + self.reader, c_ptr)) + + @staticmethod + def _import_from_c(in_ptr): + """ + Import RecordBatchReader from a C ArrowArrayStream struct, + given its pointer. + + Parameters + ---------- + in_ptr: int + The raw pointer to a C ArrowArrayStream struct. + + This is a low-level function intended for expert users. + """ + cdef: + void* c_ptr = _as_c_pointer(in_ptr) + shared_ptr[CRecordBatchReader] c_reader + RecordBatchReader self + + with nogil: + c_reader = GetResultValue(ImportRecordBatchReader( + c_ptr)) + + self = RecordBatchReader.__new__(RecordBatchReader) + self.reader = c_reader + return self + + def __arrow_c_stream__(self, requested_schema=None): + """ + Export to a C ArrowArrayStream PyCapsule. + + Parameters + ---------- + requested_schema : PyCapsule, default None + The schema to which the stream should be casted, passed as a + PyCapsule containing a C ArrowSchema representation of the + requested schema. + Currently, this is not supported and will raise a + NotImplementedError if the schema doesn't match the current schema. + + Returns + ------- + PyCapsule + A capsule containing a C ArrowArrayStream struct. + """ + cdef: + ArrowArrayStream* c_stream + + if requested_schema is not None: + out_schema = Schema._import_from_c_capsule(requested_schema) + # TODO: figure out a way to check if one schema is castable to + # another. Once we have that, we can perform validation here and + # if successful creating a wrapping reader that casts each batch. + if self.schema != out_schema: + raise NotImplementedError("Casting to requested_schema") + + stream_capsule = alloc_c_stream(&c_stream) + + with nogil: + check_status(ExportRecordBatchReader(self.reader, c_stream)) + + return stream_capsule + + @staticmethod + def _import_from_c_capsule(stream): + """ + Import RecordBatchReader from a C ArrowArrayStream PyCapsule. + + Parameters + ---------- + stream: PyCapsule + A capsule containing a C ArrowArrayStream PyCapsule. + + Returns + ------- + RecordBatchReader + """ + cdef: + ArrowArrayStream* c_stream + shared_ptr[CRecordBatchReader] c_reader + RecordBatchReader self + + c_stream = PyCapsule_GetPointer( + stream, 'arrow_array_stream' + ) + + with nogil: + c_reader = GetResultValue(ImportRecordBatchReader(c_stream)) + + self = RecordBatchReader.__new__(RecordBatchReader) + self.reader = c_reader + return self + + @staticmethod + def from_stream(data, schema=None): + """ + Create RecordBatchReader from a Arrow-compatible stream object. + + This accepts objects implementing the Arrow PyCapsule Protocol for + streams, i.e. objects that have a ``__arrow_c_stream__`` method. + + Parameters + ---------- + data : Arrow-compatible stream object + Any object that implements the Arrow PyCapsule Protocol for + streams. + schema : Schema, default None + The schema to which the stream should be casted, if supported + by the stream object. + + Returns + ------- + RecordBatchReader + """ + + if not hasattr(data, "__arrow_c_stream__"): + raise TypeError( + "Expected an object implementing the Arrow PyCapsule Protocol for " + "streams (i.e. having a `__arrow_c_stream__` method), " + f"got {type(data)!r}." + ) + + if schema is not None: + if not hasattr(schema, "__arrow_c_schema__"): + raise TypeError( + "Expected an object implementing the Arrow PyCapsule Protocol for " + "schema (i.e. having a `__arrow_c_schema__` method), " + f"got {type(schema)!r}." + ) + requested = schema.__arrow_c_schema__() + else: + requested = None + + capsule = data.__arrow_c_stream__(requested) + return RecordBatchReader._import_from_c_capsule(capsule) + + @staticmethod + def from_batches(Schema schema not None, batches): + """ + Create RecordBatchReader from an iterable of batches. + + Parameters + ---------- + schema : Schema + The shared schema of the record batches + batches : Iterable[RecordBatch] + The batches that this reader will return. + + Returns + ------- + reader : RecordBatchReader + """ + cdef: + shared_ptr[CSchema] c_schema + shared_ptr[CRecordBatchReader] c_reader + RecordBatchReader self + + c_schema = pyarrow_unwrap_schema(schema) + c_reader = GetResultValue(CPyRecordBatchReader.Make( + c_schema, batches)) + + self = RecordBatchReader.__new__(RecordBatchReader) + self.reader = c_reader + return self + + +cdef class _RecordBatchStreamReader(RecordBatchReader): + cdef: + shared_ptr[CInputStream] in_stream + CIpcReadOptions options + CRecordBatchStreamReader* stream_reader + + def __cinit__(self): + pass + + def _open(self, source, IpcReadOptions options=IpcReadOptions(), + MemoryPool memory_pool=None): + self.options = options.c_options + self.options.memory_pool = maybe_unbox_memory_pool(memory_pool) + _get_input_stream(source, &self.in_stream) + with nogil: + self.reader = GetResultValue(CRecordBatchStreamReader.Open( + self.in_stream, self.options)) + self.stream_reader = self.reader.get() + + @property + def stats(self): + """ + Current IPC read statistics. + """ + if not self.reader: + raise ValueError("Operation on closed reader") + return _wrap_read_stats(self.stream_reader.stats()) + + +cdef class _RecordBatchFileWriter(_RecordBatchStreamWriter): + + def _open(self, sink, Schema schema not None, + IpcWriteOptions options=IpcWriteOptions()): + cdef: + shared_ptr[COutputStream] c_sink + + self.options = options.c_options + get_writer(sink, &c_sink) + with nogil: + self.writer = GetResultValue( + MakeFileWriter(c_sink, schema.sp_schema, self.options)) + +_RecordBatchWithMetadata = namedtuple( + 'RecordBatchWithMetadata', + ('batch', 'custom_metadata')) + + +class RecordBatchWithMetadata(_RecordBatchWithMetadata): + """RecordBatch with its custom metadata + + Parameters + ---------- + batch : RecordBatch + custom_metadata : KeyValueMetadata + """ + __slots__ = () + + +@staticmethod +cdef _wrap_record_batch_with_metadata(CRecordBatchWithMetadata c): + return RecordBatchWithMetadata(pyarrow_wrap_batch(c.batch), + pyarrow_wrap_metadata(c.custom_metadata)) + + +cdef class _RecordBatchFileReader(_Weakrefable): + cdef: + SharedPtrNoGIL[CRecordBatchFileReader] reader + shared_ptr[CRandomAccessFile] file + CIpcReadOptions options + + cdef readonly: + Schema schema + + def __cinit__(self): + pass + + def _open(self, source, footer_offset=None, + IpcReadOptions options=IpcReadOptions(), + MemoryPool memory_pool=None): + self.options = options.c_options + self.options.memory_pool = maybe_unbox_memory_pool(memory_pool) + try: + source = as_buffer(source) + except TypeError: + pass + + get_reader(source, False, &self.file) + + cdef int64_t offset = 0 + if footer_offset is not None: + offset = footer_offset + + with nogil: + if offset != 0: + self.reader = GetResultValue( + CRecordBatchFileReader.Open2(self.file.get(), offset, + self.options)) + + else: + self.reader = GetResultValue( + CRecordBatchFileReader.Open(self.file.get(), + self.options)) + + self.schema = pyarrow_wrap_schema(self.reader.get().schema()) + + @property + def num_record_batches(self): + """ + The number of record batches in the IPC file. + """ + return self.reader.get().num_record_batches() + + def get_batch(self, int i): + """ + Read the record batch with the given index. + + Parameters + ---------- + i : int + The index of the record batch in the IPC file. + + Returns + ------- + batch : RecordBatch + """ + cdef shared_ptr[CRecordBatch] batch + + if i < 0 or i >= self.num_record_batches: + raise ValueError('Batch number {0} out of range'.format(i)) + + with nogil: + batch = GetResultValue(self.reader.get().ReadRecordBatch(i)) + + return pyarrow_wrap_batch(batch) + + # TODO(wesm): ARROW-503: Function was renamed. Remove after a period of + # time has passed + get_record_batch = get_batch + + def get_batch_with_custom_metadata(self, int i): + """ + Read the record batch with the given index along with + its custom metadata + + Parameters + ---------- + i : int + The index of the record batch in the IPC file. + + Returns + ------- + batch : RecordBatch + custom_metadata : KeyValueMetadata + """ + cdef: + CRecordBatchWithMetadata batch_with_metadata + + if i < 0 or i >= self.num_record_batches: + raise ValueError('Batch number {0} out of range'.format(i)) + + with nogil: + batch_with_metadata = GetResultValue( + self.reader.get().ReadRecordBatchWithCustomMetadata(i)) + + return _wrap_record_batch_with_metadata(batch_with_metadata) + + def read_all(self): + """ + Read all record batches as a pyarrow.Table + """ + cdef: + vector[shared_ptr[CRecordBatch]] batches + shared_ptr[CTable] table + int i, nbatches + + nbatches = self.num_record_batches + + batches.resize(nbatches) + with nogil: + for i in range(nbatches): + batches[i] = GetResultValue(self.reader.get() + .ReadRecordBatch(i)) + table = GetResultValue( + CTable.FromRecordBatches(self.schema.sp_schema, move(batches))) + + return pyarrow_wrap_table(table) + + read_pandas = _ReadPandasMixin.read_pandas + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + pass + + @property + def stats(self): + """ + Current IPC read statistics. + """ + if not self.reader: + raise ValueError("Operation on closed reader") + return _wrap_read_stats(self.reader.get().stats()) + + +def get_tensor_size(Tensor tensor): + """ + Return total size of serialized Tensor including metadata and padding. + + Parameters + ---------- + tensor : Tensor + The tensor for which we want to known the size. + """ + cdef int64_t size + with nogil: + check_status(GetTensorSize(deref(tensor.tp), &size)) + return size + + +def get_record_batch_size(RecordBatch batch): + """ + Return total size of serialized RecordBatch including metadata and padding. + + Parameters + ---------- + batch : RecordBatch + The recordbatch for which we want to know the size. + """ + cdef int64_t size + with nogil: + check_status(GetRecordBatchSize(deref(batch.batch), &size)) + return size + + +def write_tensor(Tensor tensor, NativeFile dest): + """ + Write pyarrow.Tensor to pyarrow.NativeFile object its current position. + + Parameters + ---------- + tensor : pyarrow.Tensor + dest : pyarrow.NativeFile + + Returns + ------- + bytes_written : int + Total number of bytes written to the file + """ + cdef: + int32_t metadata_length + int64_t body_length + + handle = dest.get_output_stream() + + with nogil: + check_status( + WriteTensor(deref(tensor.tp), handle.get(), + &metadata_length, &body_length)) + + return metadata_length + body_length + + +cdef NativeFile as_native_file(source): + if not isinstance(source, NativeFile): + if hasattr(source, 'read'): + source = PythonFile(source) + else: + source = BufferReader(source) + + if not isinstance(source, NativeFile): + raise ValueError('Unable to read message from object with type: {0}' + .format(type(source))) + return source + + +def read_tensor(source): + """Read pyarrow.Tensor from pyarrow.NativeFile object from current + position. If the file source supports zero copy (e.g. a memory map), then + this operation does not allocate any memory. This function not assume that + the stream is aligned + + Parameters + ---------- + source : pyarrow.NativeFile + + Returns + ------- + tensor : Tensor + + """ + cdef: + shared_ptr[CTensor] sp_tensor + CInputStream* c_stream + NativeFile nf = as_native_file(source) + + c_stream = nf.get_input_stream().get() + with nogil: + sp_tensor = GetResultValue(ReadTensor(c_stream)) + return pyarrow_wrap_tensor(sp_tensor) + + +def read_message(source): + """ + Read length-prefixed message from file or buffer-like object + + Parameters + ---------- + source : pyarrow.NativeFile, file-like object, or buffer-like object + + Returns + ------- + message : Message + """ + cdef: + Message result = Message.__new__(Message) + CInputStream* c_stream + + cdef NativeFile nf = as_native_file(source) + c_stream = nf.get_input_stream().get() + + with nogil: + result.message = move( + GetResultValue(ReadMessage(c_stream, c_default_memory_pool()))) + + if result.message == nullptr: + raise EOFError("End of Arrow stream") + + return result + + +def read_schema(obj, DictionaryMemo dictionary_memo=None): + """ + Read Schema from message or buffer + + Parameters + ---------- + obj : buffer or Message + dictionary_memo : DictionaryMemo, optional + Needed to be able to reconstruct dictionary-encoded fields + with read_record_batch + + Returns + ------- + schema : Schema + """ + cdef: + shared_ptr[CSchema] result + shared_ptr[CRandomAccessFile] cpp_file + Message message + CDictionaryMemo temp_memo + CDictionaryMemo* arg_dict_memo + + if dictionary_memo is not None: + arg_dict_memo = dictionary_memo.memo + else: + arg_dict_memo = &temp_memo + + if isinstance(obj, Message): + message = obj + with nogil: + result = GetResultValue(ReadSchema( + deref(message.message.get()), arg_dict_memo)) + else: + get_reader(obj, False, &cpp_file) + with nogil: + result = GetResultValue(ReadSchema(cpp_file.get(), arg_dict_memo)) + + return pyarrow_wrap_schema(result) + + +def read_record_batch(obj, Schema schema, + DictionaryMemo dictionary_memo=None): + """ + Read RecordBatch from message, given a known schema. If reading data from a + complete IPC stream, use ipc.open_stream instead + + Parameters + ---------- + obj : Message or Buffer-like + schema : Schema + dictionary_memo : DictionaryMemo, optional + If message contains dictionaries, must pass a populated + DictionaryMemo + + Returns + ------- + batch : RecordBatch + """ + cdef: + shared_ptr[CRecordBatch] result + Message message + CDictionaryMemo temp_memo + CDictionaryMemo* arg_dict_memo + + if isinstance(obj, Message): + message = obj + else: + message = read_message(obj) + + if dictionary_memo is not None: + arg_dict_memo = dictionary_memo.memo + else: + arg_dict_memo = &temp_memo + + with nogil: + result = GetResultValue( + ReadRecordBatch(deref(message.message.get()), + schema.sp_schema, + arg_dict_memo, + CIpcReadOptions.Defaults())) + + return pyarrow_wrap_batch(result) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/jvm.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/jvm.py new file mode 100644 index 0000000000000000000000000000000000000000..161c5ff4d6d74512dfcd76ddac5a4c4781ad63c3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/jvm.py @@ -0,0 +1,335 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Functions to interact with Arrow memory allocated by Arrow Java. + +These functions convert the objects holding the metadata, the actual +data is not copied at all. + +This will only work with a JVM running in the same process such as provided +through jpype. Modules that talk to a remote JVM like py4j will not work as the +memory addresses reported by them are not reachable in the python process. +""" + +import pyarrow as pa + + +class _JvmBufferNanny: + """ + An object that keeps a org.apache.arrow.memory.ArrowBuf's underlying + memory alive. + """ + ref_manager = None + + def __init__(self, jvm_buf): + ref_manager = jvm_buf.getReferenceManager() + # Will raise a java.lang.IllegalArgumentException if the buffer + # is already freed. It seems that exception cannot easily be + # caught... + ref_manager.retain() + self.ref_manager = ref_manager + + def __del__(self): + if self.ref_manager is not None: + self.ref_manager.release() + + +def jvm_buffer(jvm_buf): + """ + Construct an Arrow buffer from org.apache.arrow.memory.ArrowBuf + + Parameters + ---------- + + jvm_buf: org.apache.arrow.memory.ArrowBuf + Arrow Buffer representation on the JVM. + + Returns + ------- + pyarrow.Buffer + Python Buffer that references the JVM memory. + """ + nanny = _JvmBufferNanny(jvm_buf) + address = jvm_buf.memoryAddress() + size = jvm_buf.capacity() + return pa.foreign_buffer(address, size, base=nanny) + + +def _from_jvm_int_type(jvm_type): + """ + Convert a JVM int type to its Python equivalent. + + Parameters + ---------- + jvm_type : org.apache.arrow.vector.types.pojo.ArrowType$Int + + Returns + ------- + typ : pyarrow.DataType + """ + + bit_width = jvm_type.getBitWidth() + if jvm_type.getIsSigned(): + if bit_width == 8: + return pa.int8() + elif bit_width == 16: + return pa.int16() + elif bit_width == 32: + return pa.int32() + elif bit_width == 64: + return pa.int64() + else: + if bit_width == 8: + return pa.uint8() + elif bit_width == 16: + return pa.uint16() + elif bit_width == 32: + return pa.uint32() + elif bit_width == 64: + return pa.uint64() + + +def _from_jvm_float_type(jvm_type): + """ + Convert a JVM float type to its Python equivalent. + + Parameters + ---------- + jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$FloatingPoint + + Returns + ------- + typ: pyarrow.DataType + """ + precision = jvm_type.getPrecision().toString() + if precision == 'HALF': + return pa.float16() + elif precision == 'SINGLE': + return pa.float32() + elif precision == 'DOUBLE': + return pa.float64() + + +def _from_jvm_time_type(jvm_type): + """ + Convert a JVM time type to its Python equivalent. + + Parameters + ---------- + jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$Time + + Returns + ------- + typ: pyarrow.DataType + """ + time_unit = jvm_type.getUnit().toString() + if time_unit == 'SECOND': + assert jvm_type.getBitWidth() == 32 + return pa.time32('s') + elif time_unit == 'MILLISECOND': + assert jvm_type.getBitWidth() == 32 + return pa.time32('ms') + elif time_unit == 'MICROSECOND': + assert jvm_type.getBitWidth() == 64 + return pa.time64('us') + elif time_unit == 'NANOSECOND': + assert jvm_type.getBitWidth() == 64 + return pa.time64('ns') + + +def _from_jvm_timestamp_type(jvm_type): + """ + Convert a JVM timestamp type to its Python equivalent. + + Parameters + ---------- + jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$Timestamp + + Returns + ------- + typ: pyarrow.DataType + """ + time_unit = jvm_type.getUnit().toString() + timezone = jvm_type.getTimezone() + if timezone is not None: + timezone = str(timezone) + if time_unit == 'SECOND': + return pa.timestamp('s', tz=timezone) + elif time_unit == 'MILLISECOND': + return pa.timestamp('ms', tz=timezone) + elif time_unit == 'MICROSECOND': + return pa.timestamp('us', tz=timezone) + elif time_unit == 'NANOSECOND': + return pa.timestamp('ns', tz=timezone) + + +def _from_jvm_date_type(jvm_type): + """ + Convert a JVM date type to its Python equivalent + + Parameters + ---------- + jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$Date + + Returns + ------- + typ: pyarrow.DataType + """ + day_unit = jvm_type.getUnit().toString() + if day_unit == 'DAY': + return pa.date32() + elif day_unit == 'MILLISECOND': + return pa.date64() + + +def field(jvm_field): + """ + Construct a Field from a org.apache.arrow.vector.types.pojo.Field + instance. + + Parameters + ---------- + jvm_field: org.apache.arrow.vector.types.pojo.Field + + Returns + ------- + pyarrow.Field + """ + name = str(jvm_field.getName()) + jvm_type = jvm_field.getType() + + typ = None + if not jvm_type.isComplex(): + type_str = jvm_type.getTypeID().toString() + if type_str == 'Null': + typ = pa.null() + elif type_str == 'Int': + typ = _from_jvm_int_type(jvm_type) + elif type_str == 'FloatingPoint': + typ = _from_jvm_float_type(jvm_type) + elif type_str == 'Utf8': + typ = pa.string() + elif type_str == 'Binary': + typ = pa.binary() + elif type_str == 'FixedSizeBinary': + typ = pa.binary(jvm_type.getByteWidth()) + elif type_str == 'Bool': + typ = pa.bool_() + elif type_str == 'Time': + typ = _from_jvm_time_type(jvm_type) + elif type_str == 'Timestamp': + typ = _from_jvm_timestamp_type(jvm_type) + elif type_str == 'Date': + typ = _from_jvm_date_type(jvm_type) + elif type_str == 'Decimal': + typ = pa.decimal128(jvm_type.getPrecision(), jvm_type.getScale()) + else: + raise NotImplementedError( + "Unsupported JVM type: {}".format(type_str)) + else: + # TODO: The following JVM types are not implemented: + # Struct, List, FixedSizeList, Union, Dictionary + raise NotImplementedError( + "JVM field conversion only implemented for primitive types.") + + nullable = jvm_field.isNullable() + jvm_metadata = jvm_field.getMetadata() + if jvm_metadata.isEmpty(): + metadata = None + else: + metadata = {str(entry.getKey()): str(entry.getValue()) + for entry in jvm_metadata.entrySet()} + return pa.field(name, typ, nullable, metadata) + + +def schema(jvm_schema): + """ + Construct a Schema from a org.apache.arrow.vector.types.pojo.Schema + instance. + + Parameters + ---------- + jvm_schema: org.apache.arrow.vector.types.pojo.Schema + + Returns + ------- + pyarrow.Schema + """ + fields = jvm_schema.getFields() + fields = [field(f) for f in fields] + jvm_metadata = jvm_schema.getCustomMetadata() + if jvm_metadata.isEmpty(): + metadata = None + else: + metadata = {str(entry.getKey()): str(entry.getValue()) + for entry in jvm_metadata.entrySet()} + return pa.schema(fields, metadata) + + +def array(jvm_array): + """ + Construct an (Python) Array from its JVM equivalent. + + Parameters + ---------- + jvm_array : org.apache.arrow.vector.ValueVector + + Returns + ------- + array : Array + """ + if jvm_array.getField().getType().isComplex(): + minor_type_str = jvm_array.getMinorType().toString() + raise NotImplementedError( + "Cannot convert JVM Arrow array of type {}," + " complex types not yet implemented.".format(minor_type_str)) + dtype = field(jvm_array.getField()).type + buffers = [jvm_buffer(buf) + for buf in list(jvm_array.getBuffers(False))] + + # If JVM has an empty Vector, buffer list will be empty so create manually + if len(buffers) == 0: + return pa.array([], type=dtype) + + length = jvm_array.getValueCount() + null_count = jvm_array.getNullCount() + return pa.Array.from_buffers(dtype, length, buffers, null_count) + + +def record_batch(jvm_vector_schema_root): + """ + Construct a (Python) RecordBatch from a JVM VectorSchemaRoot + + Parameters + ---------- + jvm_vector_schema_root : org.apache.arrow.vector.VectorSchemaRoot + + Returns + ------- + record_batch: pyarrow.RecordBatch + """ + pa_schema = schema(jvm_vector_schema_root.getSchema()) + + arrays = [] + for name in pa_schema.names: + arrays.append(array(jvm_vector_schema_root.getVector(name))) + + return pa.RecordBatch.from_arrays( + arrays, + pa_schema.names, + metadata=pa_schema.metadata + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/lib_api.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/lib_api.h new file mode 100644 index 0000000000000000000000000000000000000000..91bd80f90d19d5e309f0d8f7fb870734e4835fd6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/lib_api.h @@ -0,0 +1,201 @@ +/* Generated by Cython 3.0.9 */ + +#ifndef __PYX_HAVE_API__pyarrow__lib +#define __PYX_HAVE_API__pyarrow__lib +#ifdef __MINGW64__ +#define MS_WIN64 +#endif +#include "Python.h" +#include "lib.h" + +static PyObject *(*__pyx_api_f_7pyarrow_3lib_box_memory_pool)( arrow::MemoryPool *) = 0; +#define box_memory_pool __pyx_api_f_7pyarrow_3lib_box_memory_pool +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer)(std::shared_ptr< arrow::Buffer> const &) = 0; +#define pyarrow_wrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer)(std::shared_ptr< arrow::ResizableBuffer> const &) = 0; +#define pyarrow_wrap_resizable_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type)(std::shared_ptr< arrow::DataType> const &) = 0; +#define pyarrow_wrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field)(std::shared_ptr< arrow::Field> const &) = 0; +#define pyarrow_wrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema)(std::shared_ptr< arrow::Schema> const &) = 0; +#define pyarrow_wrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar)(std::shared_ptr< arrow::Scalar> const &) = 0; +#define pyarrow_wrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array)(std::shared_ptr< arrow::Array> const &) = 0; +#define pyarrow_wrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array)(std::shared_ptr< arrow::ChunkedArray> const &) = 0; +#define pyarrow_wrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor)(std::shared_ptr< arrow::SparseCOOTensor> const &) = 0; +#define pyarrow_wrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix)(std::shared_ptr< arrow::SparseCSCMatrix> const &) = 0; +#define pyarrow_wrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor)(std::shared_ptr< arrow::SparseCSFTensor> const &) = 0; +#define pyarrow_wrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix)(std::shared_ptr< arrow::SparseCSRMatrix> const &) = 0; +#define pyarrow_wrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor)(std::shared_ptr< arrow::Tensor> const &) = 0; +#define pyarrow_wrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch)(std::shared_ptr< arrow::RecordBatch> const &) = 0; +#define pyarrow_wrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table)(std::shared_ptr< arrow::Table> const &) = 0; +#define pyarrow_wrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table +static std::shared_ptr< arrow::Buffer> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer)(PyObject *) = 0; +#define pyarrow_unwrap_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer +static std::shared_ptr< arrow::DataType> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type)(PyObject *) = 0; +#define pyarrow_unwrap_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type +static std::shared_ptr< arrow::Field> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field)(PyObject *) = 0; +#define pyarrow_unwrap_field __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field +static std::shared_ptr< arrow::Schema> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema)(PyObject *) = 0; +#define pyarrow_unwrap_schema __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema +static std::shared_ptr< arrow::Scalar> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar)(PyObject *) = 0; +#define pyarrow_unwrap_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar +static std::shared_ptr< arrow::Array> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array)(PyObject *) = 0; +#define pyarrow_unwrap_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array +static std::shared_ptr< arrow::ChunkedArray> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array)(PyObject *) = 0; +#define pyarrow_unwrap_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array +static std::shared_ptr< arrow::SparseCOOTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor +static std::shared_ptr< arrow::SparseCSCMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix +static std::shared_ptr< arrow::SparseCSFTensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor +static std::shared_ptr< arrow::SparseCSRMatrix> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix)(PyObject *) = 0; +#define pyarrow_unwrap_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix +static std::shared_ptr< arrow::Tensor> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor)(PyObject *) = 0; +#define pyarrow_unwrap_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor +static std::shared_ptr< arrow::RecordBatch> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch)(PyObject *) = 0; +#define pyarrow_unwrap_batch __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch +static std::shared_ptr< arrow::Table> (*__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table)(PyObject *) = 0; +#define pyarrow_unwrap_table __pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status)(arrow::Status const &) = 0; +#define pyarrow_internal_check_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status +static PyObject *(*__pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status)(arrow::Status const &) = 0; +#define pyarrow_internal_convert_status __pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer)(PyObject *) = 0; +#define pyarrow_is_buffer __pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type)(PyObject *) = 0; +#define pyarrow_is_data_type __pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata)(PyObject *) = 0; +#define pyarrow_is_metadata __pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_field)(PyObject *) = 0; +#define pyarrow_is_field __pyx_api_f_7pyarrow_3lib_pyarrow_is_field +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema)(PyObject *) = 0; +#define pyarrow_is_schema __pyx_api_f_7pyarrow_3lib_pyarrow_is_schema +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_array)(PyObject *) = 0; +#define pyarrow_is_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_array +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array)(PyObject *) = 0; +#define pyarrow_is_chunked_array __pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar)(PyObject *) = 0; +#define pyarrow_is_scalar __pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor)(PyObject *) = 0; +#define pyarrow_is_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor)(PyObject *) = 0; +#define pyarrow_is_sparse_coo_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix)(PyObject *) = 0; +#define pyarrow_is_sparse_csr_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix)(PyObject *) = 0; +#define pyarrow_is_sparse_csc_matrix __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor)(PyObject *) = 0; +#define pyarrow_is_sparse_csf_tensor __pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_table)(PyObject *) = 0; +#define pyarrow_is_table __pyx_api_f_7pyarrow_3lib_pyarrow_is_table +static int (*__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch)(PyObject *) = 0; +#define pyarrow_is_batch __pyx_api_f_7pyarrow_3lib_pyarrow_is_batch +#ifndef __PYX_HAVE_RT_ImportFunction_3_0_9 +#define __PYX_HAVE_RT_ImportFunction_3_0_9 +static int __Pyx_ImportFunction_3_0_9(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { + PyObject *d = 0; + PyObject *cobj = 0; + union { + void (*fp)(void); + void *p; + } tmp; + d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); + if (!d) + goto bad; + cobj = PyDict_GetItemString(d, funcname); + if (!cobj) { + PyErr_Format(PyExc_ImportError, + "%.200s does not export expected C function %.200s", + PyModule_GetName(module), funcname); + goto bad; + } + if (!PyCapsule_IsValid(cobj, sig)) { + PyErr_Format(PyExc_TypeError, + "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", + PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); + goto bad; + } + tmp.p = PyCapsule_GetPointer(cobj, sig); + *f = tmp.fp; + if (!(*f)) + goto bad; + Py_DECREF(d); + return 0; +bad: + Py_XDECREF(d); + return -1; +} +#endif + + +static int import_pyarrow__lib(void) { + PyObject *module = 0; + module = PyImport_ImportModule("pyarrow.lib"); + if (!module) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "box_memory_pool", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_box_memory_pool, "PyObject *( arrow::MemoryPool *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_buffer, "PyObject *(std::shared_ptr< arrow::Buffer> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_resizable_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_resizable_buffer, "PyObject *(std::shared_ptr< arrow::ResizableBuffer> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_data_type, "PyObject *(std::shared_ptr< arrow::DataType> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_field, "PyObject *(std::shared_ptr< arrow::Field> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_schema, "PyObject *(std::shared_ptr< arrow::Schema> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_scalar, "PyObject *(std::shared_ptr< arrow::Scalar> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_array, "PyObject *(std::shared_ptr< arrow::Array> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_chunked_array, "PyObject *(std::shared_ptr< arrow::ChunkedArray> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_coo_tensor, "PyObject *(std::shared_ptr< arrow::SparseCOOTensor> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csc_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSCMatrix> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csf_tensor, "PyObject *(std::shared_ptr< arrow::SparseCSFTensor> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_sparse_csr_matrix, "PyObject *(std::shared_ptr< arrow::SparseCSRMatrix> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_tensor, "PyObject *(std::shared_ptr< arrow::Tensor> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_batch, "PyObject *(std::shared_ptr< arrow::RecordBatch> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_wrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_wrap_table, "PyObject *(std::shared_ptr< arrow::Table> const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_buffer, "std::shared_ptr< arrow::Buffer> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_data_type, "std::shared_ptr< arrow::DataType> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_field, "std::shared_ptr< arrow::Field> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_schema, "std::shared_ptr< arrow::Schema> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_scalar, "std::shared_ptr< arrow::Scalar> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_array, "std::shared_ptr< arrow::Array> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_chunked_array, "std::shared_ptr< arrow::ChunkedArray> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_coo_tensor, "std::shared_ptr< arrow::SparseCOOTensor> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csc_matrix, "std::shared_ptr< arrow::SparseCSCMatrix> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csf_tensor, "std::shared_ptr< arrow::SparseCSFTensor> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_sparse_csr_matrix, "std::shared_ptr< arrow::SparseCSRMatrix> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_tensor, "std::shared_ptr< arrow::Tensor> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_batch, "std::shared_ptr< arrow::RecordBatch> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_unwrap_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_unwrap_table, "std::shared_ptr< arrow::Table> (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_internal_check_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_check_status, "int (arrow::Status const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_internal_convert_status", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_internal_convert_status, "PyObject *(arrow::Status const &)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_buffer", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_buffer, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_data_type", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_data_type, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_metadata", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_metadata, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_field", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_field, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_schema", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_schema, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_array, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_chunked_array", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_chunked_array, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_scalar", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_scalar, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_tensor, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_sparse_coo_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_coo_tensor, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_sparse_csr_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csr_matrix, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_sparse_csc_matrix", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csc_matrix, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_sparse_csf_tensor", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_sparse_csf_tensor, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_table", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_table, "int (PyObject *)") < 0) goto bad; + if (__Pyx_ImportFunction_3_0_9(module, "pyarrow_is_batch", (void (**)(void))&__pyx_api_f_7pyarrow_3lib_pyarrow_is_batch, "int (PyObject *)") < 0) goto bad; + Py_DECREF(module); module = 0; + return 0; + bad: + Py_XDECREF(module); + return -1; +} + +#endif /* !__PYX_HAVE_API__pyarrow__lib */ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/pandas_compat.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/pandas_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..39dee85492400a472aa51cc974a42ffe3567dfc7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/pandas_compat.py @@ -0,0 +1,1229 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import ast +from collections.abc import Sequence +from concurrent import futures +# import threading submodule upfront to avoid partially initialized +# module bug (ARROW-11983) +import concurrent.futures.thread # noqa +from copy import deepcopy +from itertools import zip_longest +import json +import operator +import re +import warnings + +import numpy as np + +import pyarrow as pa +from pyarrow.lib import _pandas_api, frombytes # noqa + + +_logical_type_map = {} + + +def get_logical_type_map(): + global _logical_type_map + + if not _logical_type_map: + _logical_type_map.update({ + pa.lib.Type_NA: 'empty', + pa.lib.Type_BOOL: 'bool', + pa.lib.Type_INT8: 'int8', + pa.lib.Type_INT16: 'int16', + pa.lib.Type_INT32: 'int32', + pa.lib.Type_INT64: 'int64', + pa.lib.Type_UINT8: 'uint8', + pa.lib.Type_UINT16: 'uint16', + pa.lib.Type_UINT32: 'uint32', + pa.lib.Type_UINT64: 'uint64', + pa.lib.Type_HALF_FLOAT: 'float16', + pa.lib.Type_FLOAT: 'float32', + pa.lib.Type_DOUBLE: 'float64', + pa.lib.Type_DATE32: 'date', + pa.lib.Type_DATE64: 'date', + pa.lib.Type_TIME32: 'time', + pa.lib.Type_TIME64: 'time', + pa.lib.Type_BINARY: 'bytes', + pa.lib.Type_FIXED_SIZE_BINARY: 'bytes', + pa.lib.Type_STRING: 'unicode', + }) + return _logical_type_map + + +def get_logical_type(arrow_type): + logical_type_map = get_logical_type_map() + + try: + return logical_type_map[arrow_type.id] + except KeyError: + if isinstance(arrow_type, pa.lib.DictionaryType): + return 'categorical' + elif isinstance(arrow_type, pa.lib.ListType): + return 'list[{}]'.format(get_logical_type(arrow_type.value_type)) + elif isinstance(arrow_type, pa.lib.TimestampType): + return 'datetimetz' if arrow_type.tz is not None else 'datetime' + elif isinstance(arrow_type, pa.lib.Decimal128Type): + return 'decimal' + return 'object' + + +_numpy_logical_type_map = { + np.bool_: 'bool', + np.int8: 'int8', + np.int16: 'int16', + np.int32: 'int32', + np.int64: 'int64', + np.uint8: 'uint8', + np.uint16: 'uint16', + np.uint32: 'uint32', + np.uint64: 'uint64', + np.float32: 'float32', + np.float64: 'float64', + 'datetime64[D]': 'date', + np.str_: 'string', + np.bytes_: 'bytes', +} + + +def get_logical_type_from_numpy(pandas_collection): + try: + return _numpy_logical_type_map[pandas_collection.dtype.type] + except KeyError: + if hasattr(pandas_collection.dtype, 'tz'): + return 'datetimetz' + # See https://github.com/pandas-dev/pandas/issues/24739 + if str(pandas_collection.dtype) == 'datetime64[ns]': + return 'datetime64[ns]' + result = _pandas_api.infer_dtype(pandas_collection) + if result == 'string': + return 'unicode' + return result + + +def get_extension_dtype_info(column): + dtype = column.dtype + if str(dtype) == 'category': + cats = getattr(column, 'cat', column) + assert cats is not None + metadata = { + 'num_categories': len(cats.categories), + 'ordered': cats.ordered, + } + physical_dtype = str(cats.codes.dtype) + elif hasattr(dtype, 'tz'): + metadata = {'timezone': pa.lib.tzinfo_to_string(dtype.tz)} + physical_dtype = 'datetime64[ns]' + else: + metadata = None + physical_dtype = str(dtype) + return physical_dtype, metadata + + +def get_column_metadata(column, name, arrow_type, field_name): + """Construct the metadata for a given column + + Parameters + ---------- + column : pandas.Series or pandas.Index + name : str + arrow_type : pyarrow.DataType + field_name : str + Equivalent to `name` when `column` is a `Series`, otherwise if `column` + is a pandas Index then `field_name` will not be the same as `name`. + This is the name of the field in the arrow Table's schema. + + Returns + ------- + dict + """ + logical_type = get_logical_type(arrow_type) + + string_dtype, extra_metadata = get_extension_dtype_info(column) + if logical_type == 'decimal': + extra_metadata = { + 'precision': arrow_type.precision, + 'scale': arrow_type.scale, + } + string_dtype = 'object' + + if name is not None and not isinstance(name, str): + raise TypeError( + 'Column name must be a string. Got column {} of type {}'.format( + name, type(name).__name__ + ) + ) + + assert field_name is None or isinstance(field_name, str), \ + str(type(field_name)) + return { + 'name': name, + 'field_name': 'None' if field_name is None else field_name, + 'pandas_type': logical_type, + 'numpy_type': string_dtype, + 'metadata': extra_metadata, + } + + +def construct_metadata(columns_to_convert, df, column_names, index_levels, + index_descriptors, preserve_index, types): + """Returns a dictionary containing enough metadata to reconstruct a pandas + DataFrame as an Arrow Table, including index columns. + + Parameters + ---------- + columns_to_convert : list[pd.Series] + df : pandas.DataFrame + index_levels : List[pd.Index] + index_descriptors : List[Dict] + preserve_index : bool + types : List[pyarrow.DataType] + + Returns + ------- + dict + """ + num_serialized_index_levels = len([descr for descr in index_descriptors + if not isinstance(descr, dict)]) + # Use ntypes instead of Python shorthand notation [:-len(x)] as [:-0] + # behaves differently to what we want. + ntypes = len(types) + df_types = types[:ntypes - num_serialized_index_levels] + index_types = types[ntypes - num_serialized_index_levels:] + + column_metadata = [] + for col, sanitized_name, arrow_type in zip(columns_to_convert, + column_names, df_types): + metadata = get_column_metadata(col, name=sanitized_name, + arrow_type=arrow_type, + field_name=sanitized_name) + column_metadata.append(metadata) + + index_column_metadata = [] + if preserve_index is not False: + non_str_index_names = [] + for level, arrow_type, descriptor in zip(index_levels, index_types, + index_descriptors): + if isinstance(descriptor, dict): + # The index is represented in a non-serialized fashion, + # e.g. RangeIndex + continue + + if level.name is not None and not isinstance(level.name, str): + non_str_index_names.append(level.name) + + metadata = get_column_metadata( + level, + name=_column_name_to_strings(level.name), + arrow_type=arrow_type, + field_name=descriptor, + ) + index_column_metadata.append(metadata) + + if len(non_str_index_names) > 0: + warnings.warn( + f"The DataFrame has non-str index name `{non_str_index_names}`" + " which will be converted to string" + " and not roundtrip correctly.", + UserWarning, stacklevel=4) + + column_indexes = [] + + levels = getattr(df.columns, 'levels', [df.columns]) + names = getattr(df.columns, 'names', [df.columns.name]) + for level, name in zip(levels, names): + metadata = _get_simple_index_descriptor(level, name) + column_indexes.append(metadata) + else: + index_descriptors = index_column_metadata = column_indexes = [] + + return { + b'pandas': json.dumps({ + 'index_columns': index_descriptors, + 'column_indexes': column_indexes, + 'columns': column_metadata + index_column_metadata, + 'creator': { + 'library': 'pyarrow', + 'version': pa.__version__ + }, + 'pandas_version': _pandas_api.version + }).encode('utf8') + } + + +def _get_simple_index_descriptor(level, name): + string_dtype, extra_metadata = get_extension_dtype_info(level) + pandas_type = get_logical_type_from_numpy(level) + if 'mixed' in pandas_type: + warnings.warn( + "The DataFrame has column names of mixed type. They will be " + "converted to strings and not roundtrip correctly.", + UserWarning, stacklevel=4) + if pandas_type == 'unicode': + assert not extra_metadata + extra_metadata = {'encoding': 'UTF-8'} + return { + 'name': name, + 'field_name': name, + 'pandas_type': pandas_type, + 'numpy_type': string_dtype, + 'metadata': extra_metadata, + } + + +def _column_name_to_strings(name): + """Convert a column name (or level) to either a string or a recursive + collection of strings. + + Parameters + ---------- + name : str or tuple + + Returns + ------- + value : str or tuple + + Examples + -------- + >>> name = 'foo' + >>> _column_name_to_strings(name) + 'foo' + >>> name = ('foo', 'bar') + >>> _column_name_to_strings(name) + "('foo', 'bar')" + >>> import pandas as pd + >>> name = (1, pd.Timestamp('2017-02-01 00:00:00')) + >>> _column_name_to_strings(name) + "('1', '2017-02-01 00:00:00')" + """ + if isinstance(name, str): + return name + elif isinstance(name, bytes): + # XXX: should we assume that bytes in Python 3 are UTF-8? + return name.decode('utf8') + elif isinstance(name, tuple): + return str(tuple(map(_column_name_to_strings, name))) + elif isinstance(name, Sequence): + raise TypeError("Unsupported type for MultiIndex level") + elif name is None: + return None + return str(name) + + +def _index_level_name(index, i, column_names): + """Return the name of an index level or a default name if `index.name` is + None or is already a column name. + + Parameters + ---------- + index : pandas.Index + i : int + + Returns + ------- + name : str + """ + if index.name is not None and index.name not in column_names: + return _column_name_to_strings(index.name) + else: + return '__index_level_{:d}__'.format(i) + + +def _get_columns_to_convert(df, schema, preserve_index, columns): + columns = _resolve_columns_of_interest(df, schema, columns) + + if not df.columns.is_unique: + raise ValueError( + 'Duplicate column names found: {}'.format(list(df.columns)) + ) + + if schema is not None: + return _get_columns_to_convert_given_schema(df, schema, preserve_index) + + column_names = [] + + index_levels = ( + _get_index_level_values(df.index) if preserve_index is not False + else [] + ) + + columns_to_convert = [] + convert_fields = [] + + for name in columns: + col = df[name] + name = _column_name_to_strings(name) + + if _pandas_api.is_sparse(col): + raise TypeError( + "Sparse pandas data (column {}) not supported.".format(name)) + + columns_to_convert.append(col) + convert_fields.append(None) + column_names.append(name) + + index_descriptors = [] + index_column_names = [] + for i, index_level in enumerate(index_levels): + name = _index_level_name(index_level, i, column_names) + if (isinstance(index_level, _pandas_api.pd.RangeIndex) and + preserve_index is None): + descr = _get_range_index_descriptor(index_level) + else: + columns_to_convert.append(index_level) + convert_fields.append(None) + descr = name + index_column_names.append(name) + index_descriptors.append(descr) + + all_names = column_names + index_column_names + + # all_names : all of the columns in the resulting table including the data + # columns and serialized index columns + # column_names : the names of the data columns + # index_column_names : the names of the serialized index columns + # index_descriptors : descriptions of each index to be used for + # reconstruction + # index_levels : the extracted index level values + # columns_to_convert : assembled raw data (both data columns and indexes) + # to be converted to Arrow format + # columns_fields : specified column to use for coercion / casting + # during serialization, if a Schema was provided + return (all_names, column_names, index_column_names, index_descriptors, + index_levels, columns_to_convert, convert_fields) + + +def _get_columns_to_convert_given_schema(df, schema, preserve_index): + """ + Specialized version of _get_columns_to_convert in case a Schema is + specified. + In that case, the Schema is used as the single point of truth for the + table structure (types, which columns are included, order of columns, ...). + """ + column_names = [] + columns_to_convert = [] + convert_fields = [] + index_descriptors = [] + index_column_names = [] + index_levels = [] + + for name in schema.names: + try: + col = df[name] + is_index = False + except KeyError: + try: + col = _get_index_level(df, name) + except (KeyError, IndexError): + # name not found as index level + raise KeyError( + "name '{}' present in the specified schema is not found " + "in the columns or index".format(name)) + if preserve_index is False: + raise ValueError( + "name '{}' present in the specified schema corresponds " + "to the index, but 'preserve_index=False' was " + "specified".format(name)) + elif (preserve_index is None and + isinstance(col, _pandas_api.pd.RangeIndex)): + raise ValueError( + "name '{}' is present in the schema, but it is a " + "RangeIndex which will not be converted as a column " + "in the Table, but saved as metadata-only not in " + "columns. Specify 'preserve_index=True' to force it " + "being added as a column, or remove it from the " + "specified schema".format(name)) + is_index = True + + name = _column_name_to_strings(name) + + if _pandas_api.is_sparse(col): + raise TypeError( + "Sparse pandas data (column {}) not supported.".format(name)) + + field = schema.field(name) + columns_to_convert.append(col) + convert_fields.append(field) + column_names.append(name) + + if is_index: + index_column_names.append(name) + index_descriptors.append(name) + index_levels.append(col) + + all_names = column_names + index_column_names + + return (all_names, column_names, index_column_names, index_descriptors, + index_levels, columns_to_convert, convert_fields) + + +def _get_index_level(df, name): + """ + Get the index level of a DataFrame given 'name' (column name in an arrow + Schema). + """ + key = name + if name not in df.index.names and _is_generated_index_name(name): + # we know we have an autogenerated name => extract number and get + # the index level positionally + key = int(name[len("__index_level_"):-2]) + return df.index.get_level_values(key) + + +def _level_name(name): + # preserve type when default serializable, otherwise str it + try: + json.dumps(name) + return name + except TypeError: + return str(name) + + +def _get_range_index_descriptor(level): + # public start/stop/step attributes added in pandas 0.25.0 + return { + 'kind': 'range', + 'name': _level_name(level.name), + 'start': _pandas_api.get_rangeindex_attribute(level, 'start'), + 'stop': _pandas_api.get_rangeindex_attribute(level, 'stop'), + 'step': _pandas_api.get_rangeindex_attribute(level, 'step') + } + + +def _get_index_level_values(index): + n = len(getattr(index, 'levels', [index])) + return [index.get_level_values(i) for i in range(n)] + + +def _resolve_columns_of_interest(df, schema, columns): + if schema is not None and columns is not None: + raise ValueError('Schema and columns arguments are mutually ' + 'exclusive, pass only one of them') + elif schema is not None: + columns = schema.names + elif columns is not None: + columns = [c for c in columns if c in df.columns] + else: + columns = df.columns + + return columns + + +def dataframe_to_types(df, preserve_index, columns=None): + (all_names, + column_names, + _, + index_descriptors, + index_columns, + columns_to_convert, + _) = _get_columns_to_convert(df, None, preserve_index, columns) + + types = [] + # If pandas knows type, skip conversion + for c in columns_to_convert: + values = c.values + if _pandas_api.is_categorical(values): + type_ = pa.array(c, from_pandas=True).type + elif _pandas_api.is_extension_array_dtype(values): + empty = c.head(0) if isinstance( + c, _pandas_api.pd.Series) else c[:0] + type_ = pa.array(empty, from_pandas=True).type + else: + values, type_ = get_datetimetz_type(values, c.dtype, None) + type_ = pa.lib._ndarray_to_arrow_type(values, type_) + if type_ is None: + type_ = pa.array(c, from_pandas=True).type + types.append(type_) + + metadata = construct_metadata( + columns_to_convert, df, column_names, index_columns, + index_descriptors, preserve_index, types + ) + + return all_names, types, metadata + + +def dataframe_to_arrays(df, schema, preserve_index, nthreads=1, columns=None, + safe=True): + (all_names, + column_names, + index_column_names, + index_descriptors, + index_columns, + columns_to_convert, + convert_fields) = _get_columns_to_convert(df, schema, preserve_index, + columns) + + # NOTE(wesm): If nthreads=None, then we use a heuristic to decide whether + # using a thread pool is worth it. Currently the heuristic is whether the + # nrows > 100 * ncols and ncols > 1. + if nthreads is None: + nrows, ncols = len(df), len(df.columns) + if nrows > ncols * 100 and ncols > 1: + nthreads = pa.cpu_count() + else: + nthreads = 1 + + def convert_column(col, field): + if field is None: + field_nullable = True + type_ = None + else: + field_nullable = field.nullable + type_ = field.type + + try: + result = pa.array(col, type=type_, from_pandas=True, safe=safe) + except (pa.ArrowInvalid, + pa.ArrowNotImplementedError, + pa.ArrowTypeError) as e: + e.args += ("Conversion failed for column {!s} with type {!s}" + .format(col.name, col.dtype),) + raise e + if not field_nullable and result.null_count > 0: + raise ValueError("Field {} was non-nullable but pandas column " + "had {} null values".format(str(field), + result.null_count)) + return result + + def _can_definitely_zero_copy(arr): + return (isinstance(arr, np.ndarray) and + arr.flags.contiguous and + issubclass(arr.dtype.type, np.integer)) + + if nthreads == 1: + arrays = [convert_column(c, f) + for c, f in zip(columns_to_convert, convert_fields)] + else: + arrays = [] + with futures.ThreadPoolExecutor(nthreads) as executor: + for c, f in zip(columns_to_convert, convert_fields): + if _can_definitely_zero_copy(c.values): + arrays.append(convert_column(c, f)) + else: + arrays.append(executor.submit(convert_column, c, f)) + + for i, maybe_fut in enumerate(arrays): + if isinstance(maybe_fut, futures.Future): + arrays[i] = maybe_fut.result() + + types = [x.type for x in arrays] + + if schema is None: + fields = [] + for name, type_ in zip(all_names, types): + name = name if name is not None else 'None' + fields.append(pa.field(name, type_)) + schema = pa.schema(fields) + + pandas_metadata = construct_metadata( + columns_to_convert, df, column_names, index_columns, + index_descriptors, preserve_index, types + ) + metadata = deepcopy(schema.metadata) if schema.metadata else dict() + metadata.update(pandas_metadata) + schema = schema.with_metadata(metadata) + + # If dataframe is empty but with RangeIndex -> + # remember the length of the indexes + n_rows = None + if len(arrays) == 0: + try: + kind = index_descriptors[0]["kind"] + if kind == "range": + start = index_descriptors[0]["start"] + stop = index_descriptors[0]["stop"] + step = index_descriptors[0]["step"] + n_rows = len(range(start, stop, step)) + except IndexError: + pass + + return arrays, schema, n_rows + + +def get_datetimetz_type(values, dtype, type_): + if values.dtype.type != np.datetime64: + return values, type_ + + if _pandas_api.is_datetimetz(dtype) and type_ is None: + # If no user type passed, construct a tz-aware timestamp type + tz = dtype.tz + unit = dtype.unit + type_ = pa.timestamp(unit, tz) + elif type_ is None: + # Trust the NumPy dtype + type_ = pa.from_numpy_dtype(values.dtype) + + return values, type_ + +# ---------------------------------------------------------------------- +# Converting pyarrow.Table efficiently to pandas.DataFrame + + +def _reconstruct_block(item, columns=None, extension_columns=None): + """ + Construct a pandas Block from the `item` dictionary coming from pyarrow's + serialization or returned by arrow::python::ConvertTableToPandas. + + This function takes care of converting dictionary types to pandas + categorical, Timestamp-with-timezones to the proper pandas Block, and + conversion to pandas ExtensionBlock + + Parameters + ---------- + item : dict + For basic types, this is a dictionary in the form of + {'block': np.ndarray of values, 'placement': pandas block placement}. + Additional keys are present for other types (dictionary, timezone, + object). + columns : + Column names of the table being constructed, used for extension types + extension_columns : dict + Dictionary of {column_name: pandas_dtype} that includes all columns + and corresponding dtypes that will be converted to a pandas + ExtensionBlock. + + Returns + ------- + pandas Block + + """ + import pandas.core.internals as _int + + block_arr = item.get('block', None) + placement = item['placement'] + if 'dictionary' in item: + cat = _pandas_api.categorical_type.from_codes( + block_arr, categories=item['dictionary'], + ordered=item['ordered']) + block = _int.make_block(cat, placement=placement) + elif 'timezone' in item: + unit, _ = np.datetime_data(block_arr.dtype) + dtype = make_datetimetz(unit, item['timezone']) + if _pandas_api.is_ge_v21(): + pd_arr = _pandas_api.pd.array( + block_arr.view("int64"), dtype=dtype, copy=False + ) + block = _int.make_block(pd_arr, placement=placement) + else: + block = _int.make_block(block_arr, placement=placement, + klass=_int.DatetimeTZBlock, + dtype=dtype) + elif 'py_array' in item: + # create ExtensionBlock + arr = item['py_array'] + assert len(placement) == 1 + name = columns[placement[0]] + pandas_dtype = extension_columns[name] + if not hasattr(pandas_dtype, '__from_arrow__'): + raise ValueError("This column does not support to be converted " + "to a pandas ExtensionArray") + pd_ext_arr = pandas_dtype.__from_arrow__(arr) + block = _int.make_block(pd_ext_arr, placement=placement) + else: + block = _int.make_block(block_arr, placement=placement) + + return block + + +def make_datetimetz(unit, tz): + if _pandas_api.is_v1(): + unit = 'ns' # ARROW-3789: Coerce date/timestamp types to datetime64[ns] + tz = pa.lib.string_to_tzinfo(tz) + return _pandas_api.datetimetz_type(unit, tz=tz) + + +def table_to_dataframe( + options, table, categories=None, ignore_metadata=False, types_mapper=None +): + from pandas.core.internals import BlockManager + from pandas import DataFrame + + all_columns = [] + column_indexes = [] + pandas_metadata = table.schema.pandas_metadata + + if not ignore_metadata and pandas_metadata is not None: + all_columns = pandas_metadata['columns'] + column_indexes = pandas_metadata.get('column_indexes', []) + index_descriptors = pandas_metadata['index_columns'] + table = _add_any_metadata(table, pandas_metadata) + table, index = _reconstruct_index(table, index_descriptors, + all_columns, types_mapper) + ext_columns_dtypes = _get_extension_dtypes( + table, all_columns, types_mapper) + else: + index = _pandas_api.pd.RangeIndex(table.num_rows) + ext_columns_dtypes = _get_extension_dtypes(table, [], types_mapper) + + _check_data_column_metadata_consistency(all_columns) + columns = _deserialize_column_index(table, all_columns, column_indexes) + blocks = _table_to_blocks(options, table, categories, ext_columns_dtypes) + + axes = [columns, index] + mgr = BlockManager(blocks, axes) + if _pandas_api.is_ge_v21(): + df = DataFrame._from_mgr(mgr, mgr.axes) + else: + df = DataFrame(mgr) + return df + + +# Set of the string repr of all numpy dtypes that can be stored in a pandas +# dataframe (complex not included since not supported by Arrow) +_pandas_supported_numpy_types = { + "int8", "int16", "int32", "int64", + "uint8", "uint16", "uint32", "uint64", + "float16", "float32", "float64", + "object", "bool" +} + + +def _get_extension_dtypes(table, columns_metadata, types_mapper=None): + """ + Based on the stored column pandas metadata and the extension types + in the arrow schema, infer which columns should be converted to a + pandas extension dtype. + + The 'numpy_type' field in the column metadata stores the string + representation of the original pandas dtype (and, despite its name, + not the 'pandas_type' field). + Based on this string representation, a pandas/numpy dtype is constructed + and then we can check if this dtype supports conversion from arrow. + + """ + ext_columns = {} + + # older pandas version that does not yet support extension dtypes + if _pandas_api.extension_dtype is None: + return ext_columns + + # infer the extension columns from the pandas metadata + for col_meta in columns_metadata: + try: + name = col_meta['field_name'] + except KeyError: + name = col_meta['name'] + dtype = col_meta['numpy_type'] + + if dtype not in _pandas_supported_numpy_types: + # pandas_dtype is expensive, so avoid doing this for types + # that are certainly numpy dtypes + pandas_dtype = _pandas_api.pandas_dtype(dtype) + if isinstance(pandas_dtype, _pandas_api.extension_dtype): + if hasattr(pandas_dtype, "__from_arrow__"): + ext_columns[name] = pandas_dtype + + # infer from extension type in the schema + for field in table.schema: + typ = field.type + if isinstance(typ, pa.BaseExtensionType): + try: + pandas_dtype = typ.to_pandas_dtype() + except NotImplementedError: + pass + else: + ext_columns[field.name] = pandas_dtype + + # use the specified mapping of built-in arrow types to pandas dtypes + if types_mapper: + for field in table.schema: + typ = field.type + pandas_dtype = types_mapper(typ) + if pandas_dtype is not None: + ext_columns[field.name] = pandas_dtype + + return ext_columns + + +def _check_data_column_metadata_consistency(all_columns): + # It can never be the case in a released version of pyarrow that + # c['name'] is None *and* 'field_name' is not a key in the column metadata, + # because the change to allow c['name'] to be None and the change to add + # 'field_name' are in the same release (0.8.0) + assert all( + (c['name'] is None and 'field_name' in c) or c['name'] is not None + for c in all_columns + ) + + +def _deserialize_column_index(block_table, all_columns, column_indexes): + column_strings = [frombytes(x) if isinstance(x, bytes) else x + for x in block_table.column_names] + if all_columns: + columns_name_dict = { + c.get('field_name', _column_name_to_strings(c['name'])): c['name'] + for c in all_columns + } + columns_values = [ + columns_name_dict.get(name, name) for name in column_strings + ] + else: + columns_values = column_strings + + # If we're passed multiple column indexes then evaluate with + # ast.literal_eval, since the column index values show up as a list of + # tuples + to_pair = ast.literal_eval if len(column_indexes) > 1 else lambda x: (x,) + + # Create the column index + + # Construct the base index + if not columns_values: + columns = _pandas_api.pd.Index(columns_values) + else: + columns = _pandas_api.pd.MultiIndex.from_tuples( + list(map(to_pair, columns_values)), + names=[col_index['name'] for col_index in column_indexes] or None, + ) + + # if we're reconstructing the index + if len(column_indexes) > 0: + columns = _reconstruct_columns_from_metadata(columns, column_indexes) + + # ARROW-1751: flatten a single level column MultiIndex for pandas 0.21.0 + columns = _flatten_single_level_multiindex(columns) + + return columns + + +def _reconstruct_index(table, index_descriptors, all_columns, types_mapper=None): + # 0. 'field_name' is the name of the column in the arrow Table + # 1. 'name' is the user-facing name of the column, that is, it came from + # pandas + # 2. 'field_name' and 'name' differ for index columns + # 3. We fall back on c['name'] for backwards compatibility + field_name_to_metadata = { + c.get('field_name', c['name']): c + for c in all_columns + } + + # Build up a list of index columns and names while removing those columns + # from the original table + index_arrays = [] + index_names = [] + result_table = table + for descr in index_descriptors: + if isinstance(descr, str): + result_table, index_level, index_name = _extract_index_level( + table, result_table, descr, field_name_to_metadata, types_mapper) + if index_level is None: + # ARROW-1883: the serialized index column was not found + continue + elif descr['kind'] == 'range': + index_name = descr['name'] + index_level = _pandas_api.pd.RangeIndex(descr['start'], + descr['stop'], + step=descr['step'], + name=index_name) + if len(index_level) != len(table): + # Possibly the result of munged metadata + continue + else: + raise ValueError("Unrecognized index kind: {}" + .format(descr['kind'])) + index_arrays.append(index_level) + index_names.append(index_name) + + pd = _pandas_api.pd + + # Reconstruct the row index + if len(index_arrays) > 1: + index = pd.MultiIndex.from_arrays(index_arrays, names=index_names) + elif len(index_arrays) == 1: + index = index_arrays[0] + if not isinstance(index, pd.Index): + # Box anything that wasn't boxed above + index = pd.Index(index, name=index_names[0]) + else: + index = pd.RangeIndex(table.num_rows) + + return result_table, index + + +def _extract_index_level(table, result_table, field_name, + field_name_to_metadata, types_mapper=None): + logical_name = field_name_to_metadata[field_name]['name'] + index_name = _backwards_compatible_index_name(field_name, logical_name) + i = table.schema.get_field_index(field_name) + + if i == -1: + # The serialized index column was removed by the user + return result_table, None, None + + pd = _pandas_api.pd + + col = table.column(i) + values = col.to_pandas(types_mapper=types_mapper).values + + if hasattr(values, 'flags') and not values.flags.writeable: + # ARROW-1054: in pandas 0.19.2, factorize will reject + # non-writeable arrays when calling MultiIndex.from_arrays + values = values.copy() + + if isinstance(col.type, pa.lib.TimestampType) and col.type.tz is not None: + index_level = make_tz_aware(pd.Series(values, copy=False), col.type.tz) + else: + index_level = pd.Series(values, dtype=values.dtype, copy=False) + result_table = result_table.remove_column( + result_table.schema.get_field_index(field_name) + ) + return result_table, index_level, index_name + + +def _backwards_compatible_index_name(raw_name, logical_name): + """Compute the name of an index column that is compatible with older + versions of :mod:`pyarrow`. + + Parameters + ---------- + raw_name : str + logical_name : str + + Returns + ------- + result : str + + Notes + ----- + * Part of :func:`~pyarrow.pandas_compat.table_to_blockmanager` + """ + # Part of table_to_blockmanager + if raw_name == logical_name and _is_generated_index_name(raw_name): + return None + else: + return logical_name + + +def _is_generated_index_name(name): + pattern = r'^__index_level_\d+__$' + return re.match(pattern, name) is not None + + +_pandas_logical_type_map = { + 'date': 'datetime64[D]', + 'datetime': 'datetime64[ns]', + 'datetimetz': 'datetime64[ns]', + 'unicode': np.str_, + 'bytes': np.bytes_, + 'string': np.str_, + 'integer': np.int64, + 'floating': np.float64, + 'empty': np.object_, +} + + +def _pandas_type_to_numpy_type(pandas_type): + """Get the numpy dtype that corresponds to a pandas type. + + Parameters + ---------- + pandas_type : str + The result of a call to pandas.lib.infer_dtype. + + Returns + ------- + dtype : np.dtype + The dtype that corresponds to `pandas_type`. + """ + try: + return _pandas_logical_type_map[pandas_type] + except KeyError: + if 'mixed' in pandas_type: + # catching 'mixed', 'mixed-integer' and 'mixed-integer-float' + return np.object_ + return np.dtype(pandas_type) + + +def _get_multiindex_codes(mi): + if isinstance(mi, _pandas_api.pd.MultiIndex): + return mi.codes + else: + return None + + +def _reconstruct_columns_from_metadata(columns, column_indexes): + """Construct a pandas MultiIndex from `columns` and column index metadata + in `column_indexes`. + + Parameters + ---------- + columns : List[pd.Index] + The columns coming from a pyarrow.Table + column_indexes : List[Dict[str, str]] + The column index metadata deserialized from the JSON schema metadata + in a :class:`~pyarrow.Table`. + + Returns + ------- + result : MultiIndex + The index reconstructed using `column_indexes` metadata with levels of + the correct type. + + Notes + ----- + * Part of :func:`~pyarrow.pandas_compat.table_to_blockmanager` + """ + pd = _pandas_api.pd + # Get levels and labels, and provide sane defaults if the index has a + # single level to avoid if/else spaghetti. + levels = getattr(columns, 'levels', None) or [columns] + labels = _get_multiindex_codes(columns) or [ + pd.RangeIndex(len(level)) for level in levels + ] + + # Convert each level to the dtype provided in the metadata + levels_dtypes = [ + (level, col_index.get('pandas_type', str(level.dtype)), + col_index.get('numpy_type', None)) + for level, col_index in zip_longest( + levels, column_indexes, fillvalue={} + ) + ] + + new_levels = [] + encoder = operator.methodcaller('encode', 'UTF-8') + + for level, pandas_dtype, numpy_dtype in levels_dtypes: + dtype = _pandas_type_to_numpy_type(pandas_dtype) + # Since our metadata is UTF-8 encoded, Python turns things that were + # bytes into unicode strings when json.loads-ing them. We need to + # convert them back to bytes to preserve metadata. + if dtype == np.bytes_: + level = level.map(encoder) + # ARROW-13756: if index is timezone aware DataTimeIndex + if pandas_dtype == "datetimetz": + tz = pa.lib.string_to_tzinfo( + column_indexes[0]['metadata']['timezone']) + level = pd.to_datetime(level, utc=True).tz_convert(tz) + elif level.dtype != dtype: + level = level.astype(dtype) + # ARROW-9096: if original DataFrame was upcast we keep that + if level.dtype != numpy_dtype and pandas_dtype != "datetimetz": + level = level.astype(numpy_dtype) + + new_levels.append(level) + + return pd.MultiIndex(new_levels, labels, names=columns.names) + + +def _table_to_blocks(options, block_table, categories, extension_columns): + # Part of table_to_blockmanager + + # Convert an arrow table to Block from the internal pandas API + columns = block_table.column_names + result = pa.lib.table_to_blocks(options, block_table, categories, + list(extension_columns.keys())) + return [_reconstruct_block(item, columns, extension_columns) + for item in result] + + +def _flatten_single_level_multiindex(index): + pd = _pandas_api.pd + if isinstance(index, pd.MultiIndex) and index.nlevels == 1: + levels, = index.levels + labels, = _get_multiindex_codes(index) + # ARROW-9096: use levels.dtype to match cast with original DataFrame + dtype = levels.dtype + + # Cheaply check that we do not somehow have duplicate column names + if not index.is_unique: + raise ValueError('Found non-unique column index') + + return pd.Index( + [levels[_label] if _label != -1 else None for _label in labels], + dtype=dtype, + name=index.names[0] + ) + return index + + +def _add_any_metadata(table, pandas_metadata): + modified_columns = {} + modified_fields = {} + + schema = table.schema + + index_columns = pandas_metadata['index_columns'] + # only take index columns into account if they are an actual table column + index_columns = [idx_col for idx_col in index_columns + if isinstance(idx_col, str)] + n_index_levels = len(index_columns) + n_columns = len(pandas_metadata['columns']) - n_index_levels + + # Add time zones + for i, col_meta in enumerate(pandas_metadata['columns']): + + raw_name = col_meta.get('field_name') + if not raw_name: + # deal with metadata written with arrow < 0.8 or fastparquet + raw_name = col_meta['name'] + if i >= n_columns: + # index columns + raw_name = index_columns[i - n_columns] + if raw_name is None: + raw_name = 'None' + + idx = schema.get_field_index(raw_name) + if idx != -1: + if col_meta['pandas_type'] == 'datetimetz': + col = table[idx] + if not isinstance(col.type, pa.lib.TimestampType): + continue + metadata = col_meta['metadata'] + if not metadata: + continue + metadata_tz = metadata.get('timezone') + if metadata_tz and metadata_tz != col.type.tz: + converted = col.to_pandas() + tz_aware_type = pa.timestamp('ns', tz=metadata_tz) + with_metadata = pa.Array.from_pandas(converted, + type=tz_aware_type) + + modified_fields[idx] = pa.field(schema[idx].name, + tz_aware_type) + modified_columns[idx] = with_metadata + + if len(modified_columns) > 0: + columns = [] + fields = [] + for i in range(len(table.schema)): + if i in modified_columns: + columns.append(modified_columns[i]) + fields.append(modified_fields[i]) + else: + columns.append(table[i]) + fields.append(table.schema[i]) + return pa.Table.from_arrays(columns, schema=pa.schema(fields)) + else: + return table + + +# ---------------------------------------------------------------------- +# Helper functions used in lib + + +def make_tz_aware(series, tz): + """ + Make a datetime64 Series timezone-aware for the given tz + """ + tz = pa.lib.string_to_tzinfo(tz) + series = (series.dt.tz_localize('utc') + .dt.tz_convert(tz)) + return series diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/substrait.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/substrait.py new file mode 100644 index 0000000000000000000000000000000000000000..a2b217f4936c56238f8aefb88ae6ca3791c099e6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/substrait.py @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +try: + from pyarrow._substrait import ( # noqa + BoundExpressions, + get_supported_functions, + run_query, + deserialize_expressions, + serialize_expressions + ) +except ImportError as exc: + raise ImportError( + "The pyarrow installation is not built with support " + f"for 'substrait' ({str(exc)})" + ) from None diff --git a/env-llmeval/lib/python3.10/site-packages/responses/__init__.py b/env-llmeval/lib/python3.10/site-packages/responses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..43d270e36cb14c59a34a3f73e11fa6ea62d83ad0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/responses/__init__.py @@ -0,0 +1,852 @@ +from __future__ import absolute_import, print_function, division, unicode_literals + +import _io +from http import client +from http import cookies +import json as json_module +import logging +import re +from itertools import groupby + + +from collections import namedtuple +from functools import wraps +from requests.adapters import HTTPAdapter +from requests.exceptions import ConnectionError +from requests.utils import cookiejar_from_dict +from responses.matchers import json_params_matcher as _json_params_matcher +from responses.matchers import urlencoded_params_matcher as _urlencoded_params_matcher +from responses.registries import FirstMatchRegistry +from responses.matchers import query_string_matcher as _query_string_matcher +from warnings import warn + +from collections.abc import Sequence, Sized + +try: + from requests.packages.urllib3.response import HTTPResponse +except ImportError: # pragma: no cover + from urllib3.response import HTTPResponse # pragma: no cover +try: + from requests.packages.urllib3.connection import HTTPHeaderDict +except ImportError: # pragma: no cover + from urllib3.response import HTTPHeaderDict # pragma: no cover +try: + from requests.packages.urllib3.util.url import parse_url +except ImportError: # pragma: no cover + from urllib3.util.url import parse_url # pragma: no cover + + +from urllib.parse import ( + urlparse, + urlunparse, + parse_qsl, + urlsplit, + urlunsplit, + quote, +) + +from io import BytesIO as BufferIO + +from unittest import mock as std_mock + + +Pattern = re.Pattern + +UNSET = object() + +Call = namedtuple("Call", ["request", "response"]) + +_real_send = HTTPAdapter.send + +logger = logging.getLogger("responses") + + +class FalseBool: + # used for backwards compatibility, see + # https://github.com/getsentry/responses/issues/464 + def __bool__(self): + return False + + __nonzero__ = __bool__ + + +def urlencoded_params_matcher(params): + warn( + "Function is deprecated. Use 'from responses.matchers import urlencoded_params_matcher'", + DeprecationWarning, + ) + return _urlencoded_params_matcher(params) + + +def json_params_matcher(params): + warn( + "Function is deprecated. Use 'from responses.matchers import json_params_matcher'", + DeprecationWarning, + ) + return _json_params_matcher(params) + + +def _has_unicode(s): + return any(ord(char) > 128 for char in s) + + +def _clean_unicode(url): + # Clean up domain names, which use punycode to handle unicode chars + urllist = list(urlsplit(url)) + netloc = urllist[1] + if _has_unicode(netloc): + domains = netloc.split(".") + for i, d in enumerate(domains): + if _has_unicode(d): + d = "xn--" + d.encode("punycode").decode("ascii") + domains[i] = d + urllist[1] = ".".join(domains) + url = urlunsplit(urllist) + + # Clean up path/query/params, which use url-encoding to handle unicode chars + chars = list(url) + for i, x in enumerate(chars): + if ord(x) > 128: + chars[i] = quote(x) + + return "".join(chars) + + +def _cookies_from_headers(headers): + resp_cookie = cookies.SimpleCookie() + resp_cookie.load(headers["set-cookie"]) + cookies_dict = {name: v.value for name, v in resp_cookie.items()} + + return cookiejar_from_dict(cookies_dict) + + +def get_wrapped(func, responses, registry=None): + if registry is not None: + responses._set_registry(registry) + + @wraps(func) + def wrapper(*args, **kwargs): + with responses: + return func(*args, **kwargs) + + return wrapper + + +class CallList(Sequence, Sized): + def __init__(self): + self._calls = [] + + def __iter__(self): + return iter(self._calls) + + def __len__(self): + return len(self._calls) + + def __getitem__(self, idx): + return self._calls[idx] + + def add(self, request, response): + self._calls.append(Call(request, response)) + + def reset(self): + self._calls = [] + + +def _ensure_url_default_path(url): + if isinstance(url, str): + url_parts = list(urlsplit(url)) + if url_parts[2] == "": + url_parts[2] = "/" + url = urlunsplit(url_parts) + return url + + +def _get_url_and_path(url): + url_parsed = urlparse(url) + url_and_path = urlunparse( + [url_parsed.scheme, url_parsed.netloc, url_parsed.path, None, None, None] + ) + return parse_url(url_and_path).url + + +def _handle_body(body): + if isinstance(body, str): + body = body.encode("utf-8") + if isinstance(body, _io.BufferedReader): + return body + + data = BufferIO(body) + + def is_closed(): + """ + Real Response uses HTTPResponse as body object. + Thus, when method is_closed is called first to check if there is any more + content to consume and the file-like object is still opened + + This method ensures stability to work for both: + https://github.com/getsentry/responses/issues/438 + https://github.com/getsentry/responses/issues/394 + + where file should be intentionally be left opened to continue consumption + """ + if not data.closed and data.read(1): + # if there is more bytes to read then keep open, but return pointer + data.seek(-1, 1) + return False + else: + if not data.closed: + # close but return False to mock like is still opened + data.close() + return False + + # only if file really closed (by us) return True + return True + + data.isclosed = is_closed + return data + + +class BaseResponse(object): + passthrough = False + content_type = None + headers = None + + stream = False + + def __init__(self, method, url, match_querystring=None, match=()): + self.method = method + # ensure the url has a default path set if the url is a string + self.url = _ensure_url_default_path(url) + + if self._should_match_querystring(match_querystring): + match = tuple(match) + (_query_string_matcher(urlparse(self.url).query),) + + self.match = match + self.call_count = 0 + + def __eq__(self, other): + if not isinstance(other, BaseResponse): + return False + + if self.method != other.method: + return False + + # Can't simply do an equality check on the objects directly here since __eq__ isn't + # implemented for regex. It might seem to work as regex is using a cache to return + # the same regex instances, but it doesn't in all cases. + self_url = self.url.pattern if isinstance(self.url, Pattern) else self.url + other_url = other.url.pattern if isinstance(other.url, Pattern) else other.url + + return self_url == other_url + + def __ne__(self, other): + return not self.__eq__(other) + + def _should_match_querystring(self, match_querystring_argument): + if isinstance(self.url, Pattern): + # the old default from <= 0.9.0 + return False + + if match_querystring_argument is not None: + if not isinstance(match_querystring_argument, FalseBool): + warn( + ( + "Argument 'match_querystring' is deprecated. " + "Use 'responses.matchers.query_param_matcher' or " + "'responses.matchers.query_string_matcher'" + ), + DeprecationWarning, + ) + return match_querystring_argument + + return bool(urlparse(self.url).query) + + def _url_matches(self, url, other): + if isinstance(url, str): + if _has_unicode(url): + url = _clean_unicode(url) + + return _get_url_and_path(url) == _get_url_and_path(other) + + elif isinstance(url, Pattern) and url.match(other): + return True + + else: + return False + + @staticmethod + def _req_attr_matches(match, request): + for matcher in match: + valid, reason = matcher(request) + if not valid: + return False, reason + + return True, "" + + def get_headers(self): + headers = HTTPHeaderDict() # Duplicate headers are legal + if self.content_type is not None: + headers["Content-Type"] = self.content_type + if self.headers: + headers.extend(self.headers) + return headers + + def get_response(self, request): + raise NotImplementedError + + def matches(self, request): + if request.method != self.method: + return False, "Method does not match" + + if not self._url_matches(self.url, request.url): + return False, "URL does not match" + + valid, reason = self._req_attr_matches(self.match, request) + if not valid: + return False, reason + + return True, "" + + +class Response(BaseResponse): + def __init__( + self, + method, + url, + body="", + json=None, + status=200, + headers=None, + stream=None, + content_type=UNSET, + auto_calculate_content_length=False, + **kwargs + ): + # if we were passed a `json` argument, + # override the body and content_type + if json is not None: + assert not body + body = json_module.dumps(json) + if content_type is UNSET: + content_type = "application/json" + + if content_type is UNSET: + if isinstance(body, str) and _has_unicode(body): + content_type = "text/plain; charset=utf-8" + else: + content_type = "text/plain" + + self.body = body + self.status = status + self.headers = headers + + if stream is not None: + warn( + "stream argument is deprecated. Use stream parameter in request directly", + DeprecationWarning, + ) + + self.stream = stream + self.content_type = content_type + self.auto_calculate_content_length = auto_calculate_content_length + super(Response, self).__init__(method, url, **kwargs) + + def get_response(self, request): + if self.body and isinstance(self.body, Exception): + raise self.body + + headers = self.get_headers() + status = self.status + body = _handle_body(self.body) + + if ( + self.auto_calculate_content_length + and isinstance(body, BufferIO) + and "Content-Length" not in headers + ): + content_length = len(body.getvalue()) + headers["Content-Length"] = str(content_length) + + return HTTPResponse( + status=status, + reason=client.responses.get(status, None), + body=body, + headers=headers, + original_response=OriginalResponseShim(headers), + preload_content=False, + ) + + def __repr__(self): + return ( + "".format( + url=self.url, + status=self.status, + content_type=self.content_type, + headers=json_module.dumps(self.headers), + ) + ) + + +class CallbackResponse(BaseResponse): + def __init__( + self, method, url, callback, stream=None, content_type="text/plain", **kwargs + ): + self.callback = callback + + if stream is not None: + warn( + "stream argument is deprecated. Use stream parameter in request directly", + DeprecationWarning, + ) + self.stream = stream + self.content_type = content_type + super(CallbackResponse, self).__init__(method, url, **kwargs) + + def get_response(self, request): + headers = self.get_headers() + + result = self.callback(request) + if isinstance(result, Exception): + raise result + + status, r_headers, body = result + if isinstance(body, Exception): + raise body + + # If the callback set a content-type remove the one + # set in add_callback() so that we don't have multiple + # content type values. + has_content_type = False + if isinstance(r_headers, dict) and "Content-Type" in r_headers: + has_content_type = True + elif isinstance(r_headers, list): + has_content_type = any( + [h for h in r_headers if h and h[0].lower() == "content-type"] + ) + if has_content_type: + headers.pop("Content-Type", None) + + body = _handle_body(body) + headers.extend(r_headers) + + return HTTPResponse( + status=status, + reason=client.responses.get(status, None), + body=body, + headers=headers, + original_response=OriginalResponseShim(headers), + preload_content=False, + ) + + +class PassthroughResponse(BaseResponse): + passthrough = True + + +class OriginalResponseShim(object): + """ + Shim for compatibility with older versions of urllib3 + + requests cookie handling depends on responses having a property chain of + `response._original_response.msg` which contains the response headers [1] + + Using HTTPResponse() for this purpose causes compatibility errors with + urllib3<1.23.0. To avoid adding more dependencies we can use this shim. + + [1]: https://github.com/psf/requests/blob/75bdc998e2d/requests/cookies.py#L125 + """ + + def __init__(self, headers): + self.msg = headers + + def isclosed(self): + return True + + def close(self): + return + + +class RequestsMock(object): + DELETE = "DELETE" + GET = "GET" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + PATCH = "PATCH" + POST = "POST" + PUT = "PUT" + response_callback = None + + def __init__( + self, + assert_all_requests_are_fired=True, + response_callback=None, + passthru_prefixes=(), + target="requests.adapters.HTTPAdapter.send", + registry=FirstMatchRegistry, + ): + self._calls = CallList() + self.reset() + self._registry = registry() # call only after reset + self.assert_all_requests_are_fired = assert_all_requests_are_fired + self.response_callback = response_callback + self.passthru_prefixes = tuple(passthru_prefixes) + self.target = target + self._patcher = None + + def _get_registry(self): + return self._registry + + def _set_registry(self, new_registry): + if self.registered(): + err_msg = ( + "Cannot replace Registry, current registry has responses.\n" + "Run 'responses.registry.reset()' first" + ) + raise AttributeError(err_msg) + + self._registry = new_registry() + + def reset(self): + self._registry = FirstMatchRegistry() + self._calls.reset() + self.passthru_prefixes = () + + def add( + self, + method=None, # method or ``Response`` + url=None, + body="", + adding_headers=None, + *args, + **kwargs + ): + """ + >>> import responses + + A basic request: + >>> responses.add(responses.GET, 'http://example.com') + + You can also directly pass an object which implements the + ``BaseResponse`` interface: + + >>> responses.add(Response(...)) + + A JSON payload: + + >>> responses.add( + >>> method='GET', + >>> url='http://example.com', + >>> json={'foo': 'bar'}, + >>> ) + + Custom headers: + + >>> responses.add( + >>> method='GET', + >>> url='http://example.com', + >>> headers={'X-Header': 'foo'}, + >>> ) + + """ + if isinstance(method, BaseResponse): + self._registry.add(method) + return + + if adding_headers is not None: + kwargs.setdefault("headers", adding_headers) + + self._registry.add(Response(method=method, url=url, body=body, **kwargs)) + + def add_passthru(self, prefix): + """ + Register a URL prefix or regex to passthru any non-matching mock requests to. + + For example, to allow any request to 'https://example.com', but require + mocks for the remainder, you would add the prefix as so: + + >>> import responses + >>> responses.add_passthru('https://example.com') + + Regex can be used like: + + >>> responses.add_passthru(re.compile('https://example.com/\\w+')) + """ + if not isinstance(prefix, Pattern) and _has_unicode(prefix): + prefix = _clean_unicode(prefix) + self.passthru_prefixes += (prefix,) + + def remove(self, method_or_response=None, url=None): + """ + Removes a response previously added using ``add()``, identified + either by a response object inheriting ``BaseResponse`` or + ``method`` and ``url``. Removes all matching responses. + + >>> import responses + >>> responses.add(responses.GET, 'http://example.org') + >>> responses.remove(responses.GET, 'http://example.org') + """ + if isinstance(method_or_response, BaseResponse): + response = method_or_response + else: + response = BaseResponse(method=method_or_response, url=url) + + self._registry.remove(response) + + def replace(self, method_or_response=None, url=None, body="", *args, **kwargs): + """ + Replaces a response previously added using ``add()``. The signature + is identical to ``add()``. The response is identified using ``method`` + and ``url``, and the first matching response is replaced. + + >>> import responses + >>> responses.add(responses.GET, 'http://example.org', json={'data': 1}) + >>> responses.replace(responses.GET, 'http://example.org', json={'data': 2}) + """ + if isinstance(method_or_response, BaseResponse): + url = method_or_response.url + response = method_or_response + else: + response = Response(method=method_or_response, url=url, body=body, **kwargs) + + self._registry.replace(response) + + def upsert(self, method_or_response=None, url=None, body="", *args, **kwargs): + """ + Replaces a response previously added using ``add()``, or adds the response + if no response exists. Responses are matched using ``method``and ``url``. + The first matching response is replaced. + + >>> import responses + >>> responses.add(responses.GET, 'http://example.org', json={'data': 1}) + >>> responses.upsert(responses.GET, 'http://example.org', json={'data': 2}) + """ + try: + self.replace(method_or_response, url, body, *args, **kwargs) + except ValueError: + self.add(method_or_response, url, body, *args, **kwargs) + + def add_callback( + self, + method, + url, + callback, + match_querystring=FalseBool(), + content_type="text/plain", + match=(), + ): + # ensure the url has a default path set if the url is a string + # url = _ensure_url_default_path(url, match_querystring) + + self._registry.add( + CallbackResponse( + url=url, + method=method, + callback=callback, + content_type=content_type, + match_querystring=match_querystring, + match=match, + ) + ) + + def registered(self): + return self._registry.registered + + @property + def calls(self): + return self._calls + + def __enter__(self): + self.start() + return self + + def __exit__(self, type, value, traceback): + success = type is None + self.stop(allow_assert=success) + self.reset() + return success + + def activate(self, func=None, registry=None): + if func is not None: + return get_wrapped(func, self) + + def deco_activate(func): + return get_wrapped(func, self, registry) + + return deco_activate + + def _find_match(self, request): + """ + Iterates through all available matches and validates if any of them matches the request + + :param request: (PreparedRequest), request object + :return: + (Response) found match. If multiple found, then remove & return the first match. + (list) list with reasons why other matches don't match + """ + return self._registry.find(request) + + def _parse_request_params(self, url): + params = {} + for key, val in groupby(parse_qsl(urlparse(url).query), lambda kv: kv[0]): + values = list(map(lambda x: x[1], val)) + if len(values) == 1: + values = values[0] + params[key] = values + return params + + def _on_request(self, adapter, request, **kwargs): + # add attributes params and req_kwargs to 'request' object for further match comparison + # original request object does not have these attributes + request.params = self._parse_request_params(request.path_url) + request.req_kwargs = kwargs + + match, match_failed_reasons = self._find_match(request) + resp_callback = self.response_callback + + if match is None: + if any( + [ + p.match(request.url) + if isinstance(p, Pattern) + else request.url.startswith(p) + for p in self.passthru_prefixes + ] + ): + logger.info("request.allowed-passthru", extra={"url": request.url}) + return _real_send(adapter, request, **kwargs) + + error_msg = ( + "Connection refused by Responses - the call doesn't " + "match any registered mock.\n\n" + "Request: \n" + "- %s %s\n\n" + "Available matches:\n" % (request.method, request.url) + ) + for i, m in enumerate(self.registered()): + error_msg += "- {} {} {}\n".format( + m.method, m.url, match_failed_reasons[i] + ) + + response = ConnectionError(error_msg) + response.request = request + + self._calls.add(request, response) + response = resp_callback(response) if resp_callback else response + raise response + + if match.passthrough: + logger.info("request.passthrough-response", extra={"url": request.url}) + response = _real_send(adapter, request, **kwargs) + else: + try: + response = adapter.build_response(request, match.get_response(request)) + except BaseException as response: + match.call_count += 1 + self._calls.add(request, response) + response = resp_callback(response) if resp_callback else response + raise + + response = resp_callback(response) if resp_callback else response + match.call_count += 1 + self._calls.add(request, response) + return response + + def start(self): + def unbound_on_send(adapter, request, *a, **kwargs): + return self._on_request(adapter, request, *a, **kwargs) + + self._patcher = std_mock.patch(target=self.target, new=unbound_on_send) + self._patcher.start() + + def stop(self, allow_assert=True): + self._patcher.stop() + if not self.assert_all_requests_are_fired: + return + + if not allow_assert: + return + + not_called = [m for m in self.registered() if m.call_count == 0] + if not_called: + raise AssertionError( + "Not all requests have been executed {0!r}".format( + [(match.method, match.url) for match in not_called] + ) + ) + + def assert_call_count(self, url, count): + call_count = len( + [ + 1 + for call in self.calls + if call.request.url == _ensure_url_default_path(url) + ] + ) + if call_count == count: + return True + else: + raise AssertionError( + "Expected URL '{0}' to be called {1} times. Called {2} times.".format( + url, count, call_count + ) + ) + + +# expose default mock namespace +mock = _default_mock = RequestsMock(assert_all_requests_are_fired=False) +__all__ = [ + "CallbackResponse", + "Response", + "RequestsMock", + # Exposed by the RequestsMock class: + "activate", + "add", + "add_callback", + "add_passthru", + "assert_all_requests_are_fired", + "assert_call_count", + "calls", + "DELETE", + "GET", + "HEAD", + "OPTIONS", + "passthru_prefixes", + "PATCH", + "POST", + "PUT", + "registered", + "remove", + "replace", + "reset", + "response_callback", + "start", + "stop", + "target", + "upsert", +] + +activate = _default_mock.activate +add = _default_mock.add +add_callback = _default_mock.add_callback +add_passthru = _default_mock.add_passthru +assert_all_requests_are_fired = _default_mock.assert_all_requests_are_fired +assert_call_count = _default_mock.assert_call_count +calls = _default_mock.calls +DELETE = _default_mock.DELETE +GET = _default_mock.GET +HEAD = _default_mock.HEAD +OPTIONS = _default_mock.OPTIONS +passthru_prefixes = _default_mock.passthru_prefixes +PATCH = _default_mock.PATCH +POST = _default_mock.POST +PUT = _default_mock.PUT +registered = _default_mock.registered +remove = _default_mock.remove +replace = _default_mock.replace +reset = _default_mock.reset +response_callback = _default_mock.response_callback +start = _default_mock.start +stop = _default_mock.stop +target = _default_mock.target +upsert = _default_mock.upsert diff --git a/env-llmeval/lib/python3.10/site-packages/responses/__init__.pyi b/env-llmeval/lib/python3.10/site-packages/responses/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..183f452b3ec66c89af9f97027e8ed1e7d1c08708 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/responses/__init__.pyi @@ -0,0 +1,352 @@ +from collections import Sequence, Sized +from typing import ( + Any, + Callable, + Iterator, + Mapping, + Optional, + NamedTuple, + Protocol, + TypeVar, + Dict, + List, + Tuple, + Union, + Iterable, + overload, + Type +) + +from io import BufferedReader, BytesIO +from re import Pattern +from requests.adapters import HTTPResponse, PreparedRequest +from requests.cookies import RequestsCookieJar +from typing_extensions import Literal +from unittest import mock as std_mock +from urllib.parse import quote as quote +from urllib3.response import HTTPHeaderDict # type: ignore # Not currently exposed in typestubs. + +from .matchers import urlencoded_params_matcher, json_params_matcher + + +def _clean_unicode(url: str) -> str: ... +def _cookies_from_headers(headers: Dict[str, str]) -> RequestsCookieJar: ... +def _ensure_str(s: str) -> str: ... +def _ensure_url_default_path( + url: Union[Pattern[str], str] +) -> Union[Pattern[str], str]: ... +def _get_url_and_path(url: str) -> str: ... +def _handle_body( + body: Optional[Union[bytes, BufferedReader, str]] +) -> Union[BufferedReader, BytesIO]: ... +def _has_unicode(s: str) -> bool: ... +def _is_string(s: Union[Pattern[str], str]) -> bool: ... +def get_wrapped( + func: Callable[..., Any], responses: RequestsMock, registry: Optional[Any] +) -> Callable[..., Any]: ... + + +class Call(NamedTuple): + request: PreparedRequest + response: Any + +_Body = Union[str, BaseException, "Response", BufferedReader, bytes] + +MatcherIterable = Iterable[Callable[[Any], Callable[..., Any]]] + +class CallList(Sequence[Call], Sized): + def __init__(self) -> None: + self._calls = List[Call] + ... + def __iter__(self) -> Iterator[Call]: ... + def __len__(self) -> int: ... + def __getitem__(self, idx: int) -> Call: ... # type: ignore [override] + def add(self, request: PreparedRequest, response: _Body) -> None: ... + def reset(self) -> None: ... + +class FalseBool: + def __bool__(self) -> bool: ... + +class BaseResponse: + passthrough: bool = ... + content_type: Optional[str] = ... + headers: Optional[Mapping[str, str]] = ... + stream: bool = ... + method: Any = ... + url: Any = ... + match_querystring: Any = ... + match: MatcherIterable = ... + call_count: int = ... + def __init__( + self, + method: str, + url: Union[Pattern[str], str], + match_querystring: Union[bool, object] = ..., + match: MatcherIterable = ..., + ) -> None: ... + def __eq__(self, other: Any) -> bool: ... + def __ne__(self, other: Any) -> bool: ... + def _req_attr_matches( + self, match: MatcherIterable, request: PreparedRequest + ) -> Tuple[bool, str]: ... + def _should_match_querystring( + self, match_querystring_argument: Union[bool, object] + ) -> bool: ... + def _url_matches( + self, url: Union[Pattern[str], str], other: str, match_querystring: bool = ... + ) -> bool: ... + def _url_matches_strict(self, url: str, other: str) -> bool: ... + def get_headers(self) -> HTTPHeaderDict: ... # type: ignore + def get_response(self, request: PreparedRequest) -> None: ... + def matches(self, request: PreparedRequest) -> Tuple[bool, str]: ... + +class Response(BaseResponse): + body: _Body = ... + status: int = ... + headers: Optional[Mapping[str, str]] = ... + stream: bool = ... + content_type: Optional[str] = ... + auto_calculate_content_length: bool = ... + def __init__( + self, + method: str, + url: Union[Pattern[str], str], + body: _Body = ..., + json: Optional[Any] = ..., + status: int = ..., + headers: Optional[Mapping[str, str]] = ..., + stream: bool = ..., + content_type: Optional[str] = ..., + auto_calculate_content_length: bool = ..., + match_querystring: bool = ..., + match: MatcherIterable = ..., + ) -> None: ... + def get_response( # type: ignore [override] + self, request: PreparedRequest + ) -> HTTPResponse: ... + +class CallbackResponse(BaseResponse): + callback: Callable[[Any], Any] = ... + stream: bool = ... + content_type: Optional[str] = ... + def __init__( + self, + method: str, + url: Union[Pattern[str], str], + callback: Callable[[Any], Any], + stream: bool = ..., + content_type: Optional[str] = ..., + match_querystring: Union[bool, FalseBool] = ..., + match: MatcherIterable = ..., + ) -> None: ... + def get_response( # type: ignore [override] + self, request: PreparedRequest + ) -> HTTPResponse: ... + +class PassthroughResponse(BaseResponse): + passthrough: bool = ... + +class OriginalResponseShim: + msg: Any = ... + def __init__( # type: ignore [no-any-unimported] + self, headers: HTTPHeaderDict + ) -> None: ... + def isclosed(self) -> bool: ... + +_F = TypeVar("_F", bound=Callable[..., Any]) + +class RequestsMock: + DELETE: Literal["DELETE"] + GET: Literal["GET"] + HEAD: Literal["HEAD"] + OPTIONS: Literal["OPTIONS"] + PATCH: Literal["PATCH"] + POST: Literal["POST"] + PUT: Literal["PUT"] + response_callback: Optional[Callable[[Any], Any]] = ... + assert_all_requests_are_fired: Any = ... + passthru_prefixes: Tuple[Union[str, Pattern[str]], ...] = ... + target: Any = ... + _matches: List[Any] + def __init__( + self, + assert_all_requests_are_fired: bool = ..., + response_callback: Optional[Callable[[Any], Any]] = ..., + passthru_prefixes: Tuple[str, ...] = ..., + target: str = ..., + registry: Any = ..., + ) -> None: + self._patcher = Callable[[Any], Any] + self._calls = CallList + ... + def reset(self) -> None: ... + add: _Add + add_passthru: _AddPassthru + def remove( + self, + method_or_response: Optional[Union[str, Response]] = ..., + url: Optional[Union[Pattern[str], str]] = ..., + ) -> None: ... + replace: _Replace + upsert: _Upsert + add_callback: _AddCallback + @property + def calls(self) -> CallList: ... + def __enter__(self) -> RequestsMock: ... + def __exit__(self, type: Any, value: Any, traceback: Any) -> bool: ... + def activate(self, func: Optional[_F], registry: Optional[Any]) -> _F: ... + def start(self) -> None: ... + def stop(self, allow_assert: bool = ...) -> None: ... + def assert_call_count(self, url: str, count: int) -> bool: ... + def registered(self) -> List[Any]: ... + def _set_registry(self, registry: Any) -> None: ... + def _get_registry(self) -> Any: ... + + +HeaderSet = Optional[Union[Mapping[str, str], List[Tuple[str, str]]]] + +class _Add(Protocol): + def __call__( + self, + method: Optional[Union[str, BaseResponse]] = ..., + url: Optional[Union[Pattern[str], str]] = ..., + body: _Body = ..., + json: Optional[Any] = ..., + status: int = ..., + headers: HeaderSet = ..., + stream: bool = ..., + content_type: Optional[str] = ..., + auto_calculate_content_length: bool = ..., + adding_headers: HeaderSet = ..., + match_querystring: bool = ..., + match: MatcherIterable = ..., + ) -> None: ... + +class _AddCallback(Protocol): + def __call__( + self, + method: str, + url: Union[Pattern[str], str], + callback: Callable[[PreparedRequest], Union[Exception, Tuple[int, Mapping[str, str], _Body]]], + match_querystring: bool = ..., + content_type: Optional[str] = ..., + match: MatcherIterable = ..., + ) -> None: ... + +class _AddPassthru(Protocol): + def __call__( + self, prefix: Union[Pattern[str], str] + ) -> None: ... + +class _Remove(Protocol): + def __call__( + self, + method_or_response: Optional[Union[str, BaseResponse]] = ..., + url: Optional[Union[Pattern[str], str]] = ..., + ) -> None: ... + +class _Replace(Protocol): + def __call__( + self, + method_or_response: Optional[Union[str, BaseResponse]] = ..., + url: Optional[Union[Pattern[str], str]] = ..., + body: _Body = ..., + json: Optional[Any] = ..., + status: int = ..., + headers: HeaderSet = ..., + stream: bool = ..., + content_type: Optional[str] = ..., + adding_headers: HeaderSet = ..., + match_querystring: bool = ..., + match: MatcherIterable = ..., + ) -> None: ... + +class _Upsert(Protocol): + def __call__( + self, + method: Optional[Union[str, BaseResponse]] = ..., + url: Optional[Union[Pattern[str], str]] = ..., + body: _Body = ..., + json: Optional[Any] = ..., + status: int = ..., + headers: HeaderSet = ..., + stream: bool = ..., + content_type: Optional[str] = ..., + adding_headers: HeaderSet = ..., + match_querystring: bool = ..., + match: MatcherIterable = ..., + ) -> None: ... + +class _Registered(Protocol): + def __call__(self) -> List[Response]: ... + + +class _Activate(Protocol): + # see https://github.com/getsentry/responses/pull/469 for more details + + @overload + def __call__(self, func: _F = ...) -> _F: ... + # use this overload for scenario when 'responses.activate' is used + + @overload + def __call__(self, registry: Type[Any] = ...) -> Callable[['_F'], '_F']: ... + # use this overload for scenario when 'responses.activate(registry=)' is used + + +activate: _Activate +add: _Add +add_callback: _AddCallback +add_passthru: _AddPassthru +assert_all_requests_are_fired: bool +assert_call_count: Callable[[str, int], bool] +calls: CallList +DELETE: Literal["DELETE"] +GET: Literal["GET"] +HEAD: Literal["HEAD"] +mock: RequestsMock +_default_mock: RequestsMock +OPTIONS: Literal["OPTIONS"] +passthru_prefixes: Tuple[str, ...] +PATCH: Literal["PATCH"] +POST: Literal["POST"] +PUT: Literal["PUT"] +registered: _Registered +remove: _Remove +replace: _Replace +reset: Callable[[], None] +response_callback: Callable[[Any], Any] +start: Callable[[], None] +stop: Callable[..., None] +target: Any +upsert: _Upsert + +__all__ = [ + "CallbackResponse", + "Response", + "RequestsMock", + # Exposed by the RequestsMock class: + "activate", + "add", + "add_callback", + "add_passthru", + "assert_all_requests_are_fired", + "assert_call_count", + "calls", + "DELETE", + "GET", + "HEAD", + "OPTIONS", + "passthru_prefixes", + "PATCH", + "POST", + "PUT", + "registered", + "remove", + "replace", + "reset", + "response_callback", + "start", + "stop", + "target", + "upsert", +] diff --git a/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb6e93086ef11e6338a372854a70685950934318 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/matchers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/matchers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..164335f8fa88fcbd1609aec80779e92cd089cf9e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/matchers.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/registries.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/registries.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97c31b87f0e129c077156983fd848a4766850e0a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/registries.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/test_matchers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/test_matchers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d389452dbed2bd4e0c59f05b2688bf13ac87c992 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/test_matchers.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/test_registries.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/test_registries.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee41939dc82add361e033916618fa460d9684051 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/test_registries.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/test_responses.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/test_responses.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..977cfa35c2fa96893e565a7e8fd26702936912b2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/responses/__pycache__/test_responses.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/responses/matchers.py b/env-llmeval/lib/python3.10/site-packages/responses/matchers.py new file mode 100644 index 0000000000000000000000000000000000000000..893edc19206e637689e5016a9900afef6b472ecc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/responses/matchers.py @@ -0,0 +1,325 @@ +import json as json_module + +from requests import PreparedRequest +from urllib.parse import parse_qsl, urlparse +from requests.packages.urllib3.util.url import parse_url +from json.decoder import JSONDecodeError + + +def _create_key_val_str(input_dict): + """ + Returns string of format {'key': val, 'key2': val2} + Function is called recursively for nested dictionaries + + :param input_dict: dictionary to transform + :return: (str) reformatted string + """ + + def list_to_str(input_list): + """ + Convert all list items to string. + Function is called recursively for nested lists + """ + converted_list = [] + for item in sorted(input_list, key=lambda x: str(x)): + if isinstance(item, dict): + item = _create_key_val_str(item) + elif isinstance(item, list): + item = list_to_str(item) + + converted_list.append(str(item)) + list_str = ", ".join(converted_list) + return "[" + list_str + "]" + + items_list = [] + for key in sorted(input_dict.keys(), key=lambda x: str(x)): + val = input_dict[key] + if isinstance(val, dict): + val = _create_key_val_str(val) + elif isinstance(val, list): + val = list_to_str(input_list=val) + + items_list.append("{}: {}".format(key, val)) + + key_val_str = "{{{}}}".format(", ".join(items_list)) + return key_val_str + + +def urlencoded_params_matcher(params): + """ + Matches URL encoded data + + :param params: (dict) data provided to 'data' arg of request + :return: (func) matcher + """ + + def match(request): + reason = "" + request_body = request.body + qsl_body = dict(parse_qsl(request_body)) if request_body else {} + params_dict = params or {} + valid = params is None if request_body is None else params_dict == qsl_body + if not valid: + reason = "request.body doesn't match: {} doesn't match {}".format( + _create_key_val_str(qsl_body), _create_key_val_str(params_dict) + ) + + return valid, reason + + return match + + +def json_params_matcher(params): + """ + Matches JSON encoded data + + :param params: (dict) JSON data provided to 'json' arg of request + :return: (func) matcher + """ + + def match(request): + reason = "" + request_body = request.body + params_dict = params or {} + try: + if isinstance(request_body, bytes): + request_body = request_body.decode("utf-8") + json_body = json_module.loads(request_body) if request_body else {} + + valid = params is None if request_body is None else params_dict == json_body + + if not valid: + reason = "request.body doesn't match: {} doesn't match {}".format( + _create_key_val_str(json_body), _create_key_val_str(params_dict) + ) + + except JSONDecodeError: + valid = False + reason = ( + "request.body doesn't match: JSONDecodeError: Cannot parse request.body" + ) + + return valid, reason + + return match + + +def fragment_identifier_matcher(identifier): + def match(request): + reason = "" + url_fragment = urlparse(request.url).fragment + if identifier: + url_fragment_qsl = sorted(parse_qsl(url_fragment)) + identifier_qsl = sorted(parse_qsl(identifier)) + valid = identifier_qsl == url_fragment_qsl + else: + valid = not url_fragment + + if not valid: + reason = "URL fragment identifier is different: {} doesn't match {}".format( + identifier, url_fragment + ) + return valid, reason + + return match + + +def query_param_matcher(params): + """ + Matcher to match 'params' argument in request + + :param params: (dict), same as provided to request + :return: (func) matcher + """ + + def match(request): + reason = "" + request_params = request.params + request_params_dict = request_params or {} + params_dict = params or {} + valid = ( + params is None + if request_params is None + else params_dict == request_params_dict + ) + + if not valid: + reason = "Parameters do not match. {} doesn't match {}".format( + _create_key_val_str(request_params_dict), + _create_key_val_str(params_dict), + ) + + return valid, reason + + return match + + +def query_string_matcher(query): + """ + Matcher to match query string part of request + + :param query: (str), same as constructed by request + :return: (func) matcher + """ + + def match(request): + reason = "" + data = parse_url(request.url) + request_query = data.query + + request_qsl = sorted(parse_qsl(request_query)) if request_query else {} + matcher_qsl = sorted(parse_qsl(query)) if query else {} + + valid = not query if request_query is None else request_qsl == matcher_qsl + + if not valid: + reason = "Query string doesn't match. {} doesn't match {}".format( + _create_key_val_str(dict(request_qsl)), + _create_key_val_str(dict(matcher_qsl)), + ) + + return valid, reason + + return match + + +def request_kwargs_matcher(kwargs): + """ + Matcher to match keyword arguments provided to request + + :param kwargs: (dict), keyword arguments, same as provided to request + :return: (func) matcher + """ + + def match(request): + reason = "" + kwargs_dict = kwargs or {} + # validate only kwargs that were requested for comparison, skip defaults + request_kwargs = { + k: v for k, v in request.req_kwargs.items() if k in kwargs_dict + } + + valid = ( + not kwargs_dict + if not request_kwargs + else sorted(kwargs.items()) == sorted(request_kwargs.items()) + ) + + if not valid: + reason = "Arguments don't match: {} doesn't match {}".format( + _create_key_val_str(request_kwargs), _create_key_val_str(kwargs_dict) + ) + + return valid, reason + + return match + + +def multipart_matcher(files, data=None): + """ + Matcher to match 'multipart/form-data' content-type. + This function constructs request body and headers from provided 'data' and 'files' + arguments and compares to actual request + + :param files: (dict), same as provided to request + :param data: (dict), same as provided to request + :return: (func) matcher + """ + if not files: + raise TypeError("files argument cannot be empty") + + prepared = PreparedRequest() + prepared.headers = {"Content-Type": ""} + prepared.prepare_body(data=data, files=files) + + def get_boundary(content_type): + """ + Parse 'boundary' value from header. + + :param content_type: (str) headers["Content-Type"] value + :return: (str) boundary value + """ + if "boundary=" not in content_type: + return "" + + return content_type.split("boundary=")[1] + + def match(request): + reason = "multipart/form-data doesn't match. " + if "Content-Type" not in request.headers: + return False, reason + "Request is missing the 'Content-Type' header" + + request_boundary = get_boundary(request.headers["Content-Type"]) + prepared_boundary = get_boundary(prepared.headers["Content-Type"]) + + # replace boundary value in header and in body, since by default + # urllib3.filepost.encode_multipart_formdata dynamically calculates + # random boundary alphanumeric value + request_content_type = request.headers["Content-Type"] + prepared_content_type = prepared.headers["Content-Type"].replace( + prepared_boundary, request_boundary + ) + + request_body = request.body + prepared_body = prepared.body + + if isinstance(prepared_body, bytes): + # since headers always come as str, need to convert to bytes + prepared_boundary = prepared_boundary.encode("utf-8") + request_boundary = request_boundary.encode("utf-8") + + prepared_body = prepared_body.replace(prepared_boundary, request_boundary) + + headers_valid = prepared_content_type == request_content_type + if not headers_valid: + return ( + False, + reason + + "Request headers['Content-Type'] is different. {} isn't equal to {}".format( + request_content_type, prepared_content_type + ), + ) + + body_valid = prepared_body == request_body + if not body_valid: + return False, reason + "Request body differs. {} aren't equal {}".format( + request_body, prepared_body + ) + + return True, "" + + return match + + +def header_matcher(headers, strict_match=False): + """ + Matcher to match 'headers' argument in request using the responses library. + + Because ``requests`` will send several standard headers in addition to what + was specified by your code, request headers that are additional to the ones + passed to the matcher are ignored by default. You can change this behaviour + by passing ``strict_match=True``. + + :param headers: (dict), same as provided to request + :param strict_match: (bool), whether headers in addition to those specified + in the matcher should cause the match to fail. + :return: (func) matcher + """ + + def match(request): + request_headers = request.headers or {} + + if not strict_match: + # filter down to just the headers specified in the matcher + request_headers = {k: v for k, v in request_headers.items() if k in headers} + + valid = sorted(headers.items()) == sorted(request_headers.items()) + + if not valid: + return False, "Headers do not match: {} doesn't match {}".format( + _create_key_val_str(request_headers), _create_key_val_str(headers) + ) + + return valid, "" + + return match diff --git a/env-llmeval/lib/python3.10/site-packages/responses/matchers.pyi b/env-llmeval/lib/python3.10/site-packages/responses/matchers.pyi new file mode 100644 index 0000000000000000000000000000000000000000..188de2e34896c79ba7249e97c158ef718a507717 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/responses/matchers.pyi @@ -0,0 +1,44 @@ +from typing import ( + Any, + Callable, + Optional, + Dict, +) + +JSONDecodeError = ValueError + + +def _create_key_val_str(input_dict: Dict[Any, Any]) -> str: ... + +def json_params_matcher( + params: Optional[Dict[str, Any]] +) -> Callable[..., Any]: ... + +def urlencoded_params_matcher( + params: Optional[Dict[str, str]] +) -> Callable[..., Any]: ... + +def query_param_matcher( + params: Optional[Dict[str, str]] +) -> Callable[..., Any]: ... + +def query_string_matcher( + query: Optional[str] +) -> Callable[..., Any]: ... + +def request_kwargs_matcher( + kwargs: Optional[Dict[str, Any]] +) -> Callable[..., Any]: ... + +def multipart_matcher( + files: Dict[str, Any], data: Optional[Dict[str, str]] = ... +) -> Callable[..., Any]: ... + +def header_matcher( + headers: Dict[str, str], + strict_match: bool = ... +) -> Callable[..., Any]: ... + +def fragment_identifier_matcher( + identifier: Optional[str] +) -> Callable[..., Any]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/responses/py.typed b/env-llmeval/lib/python3.10/site-packages/responses/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/responses/registries.py b/env-llmeval/lib/python3.10/site-packages/responses/registries.py new file mode 100644 index 0000000000000000000000000000000000000000..22f79519a1100db4a163d1615c4c3300824972e7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/responses/registries.py @@ -0,0 +1,63 @@ +from typing import ( + TYPE_CHECKING, + List, + Optional, + Tuple, +) + +if TYPE_CHECKING: # pragma: no cover + # import only for linter run + from requests import PreparedRequest + from responses import BaseResponse + + +class FirstMatchRegistry(object): + def __init__(self) -> None: + self._responses: List["BaseResponse"] = [] + + @property + def registered(self) -> List["BaseResponse"]: + return self._responses + + def reset(self) -> None: + self._responses = [] + + def find( + self, request: "PreparedRequest" + ) -> Tuple[Optional["BaseResponse"], List[str]]: + found = None + found_match = None + match_failed_reasons = [] + for i, response in enumerate(self.registered): + match_result, reason = response.matches(request) + if match_result: + if found is None: + found = i + found_match = response + else: + if self.registered[found].call_count > 0: + # that assumes that some responses were added between calls + self.registered.pop(found) + found_match = response + break + # Multiple matches found. Remove & return the first response. + return self.registered.pop(found), match_failed_reasons + else: + match_failed_reasons.append(reason) + return found_match, match_failed_reasons + + def add(self, response: "BaseResponse") -> None: + self.registered.append(response) + + def remove(self, response: "BaseResponse") -> None: + while response in self.registered: + self.registered.remove(response) + + def replace(self, response: "BaseResponse") -> None: + try: + index = self.registered.index(response) + except ValueError: + raise ValueError( + "Response is not registered for URL {}".format(response.url) + ) + self.registered[index] = response diff --git a/env-llmeval/lib/python3.10/site-packages/responses/test_matchers.py b/env-llmeval/lib/python3.10/site-packages/responses/test_matchers.py new file mode 100644 index 0000000000000000000000000000000000000000..d061d97b98b2c09f65c4e7105b3d70a8e018cff4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/responses/test_matchers.py @@ -0,0 +1,625 @@ +from __future__ import absolute_import, print_function, division, unicode_literals + +import pytest +import requests +import responses +from requests.exceptions import ConnectionError +from responses import matchers + + +def assert_response(resp, body=None, content_type="text/plain"): + assert resp.status_code == 200 + assert resp.reason == "OK" + assert resp.headers["Content-Type"] == content_type + assert resp.text == body + + +def assert_reset(): + assert len(responses._default_mock.registered()) == 0 + assert len(responses.calls) == 0 + + +def test_query_string_matcher(): + @responses.activate + def run(): + url = "http://example.com?test=1&foo=bar" + responses.add( + responses.GET, + url, + body=b"test", + match=[matchers.query_string_matcher("test=1&foo=bar")], + ) + resp = requests.get("http://example.com?test=1&foo=bar") + assert_response(resp, "test") + resp = requests.get("http://example.com?foo=bar&test=1") + assert_response(resp, "test") + resp = requests.get("http://example.com/?foo=bar&test=1") + assert_response(resp, "test") + + run() + assert_reset() + + +def test_request_matches_post_params(): + @responses.activate + def run(deprecated): + if deprecated: + json_params_matcher = getattr(responses, "json_params_matcher") + urlencoded_params_matcher = getattr(responses, "urlencoded_params_matcher") + else: + json_params_matcher = matchers.json_params_matcher + urlencoded_params_matcher = matchers.urlencoded_params_matcher + + responses.add( + method=responses.POST, + url="http://example.com/", + body="one", + match=[json_params_matcher({"page": {"name": "first", "type": "json"}})], + ) + responses.add( + method=responses.POST, + url="http://example.com/", + body="two", + match=[urlencoded_params_matcher({"page": "second", "type": "urlencoded"})], + ) + + resp = requests.request( + "POST", + "http://example.com/", + headers={"Content-Type": "x-www-form-urlencoded"}, + data={"page": "second", "type": "urlencoded"}, + ) + assert_response(resp, "two") + + resp = requests.request( + "POST", + "http://example.com/", + headers={"Content-Type": "application/json"}, + json={"page": {"name": "first", "type": "json"}}, + ) + assert_response(resp, "one") + + with pytest.deprecated_call(): + run(deprecated=True) + assert_reset() + + run(deprecated=False) + assert_reset() + + +def test_request_matches_empty_body(): + def run(): + with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps: + # test that both json and urlencoded body are empty in matcher and in request + rsps.add( + method=responses.POST, + url="http://example.com/", + body="one", + match=[matchers.json_params_matcher(None)], + ) + + rsps.add( + method=responses.POST, + url="http://example.com/", + body="two", + match=[matchers.urlencoded_params_matcher(None)], + ) + + resp = requests.request("POST", "http://example.com/") + assert_response(resp, "one") + + resp = requests.request( + "POST", + "http://example.com/", + headers={"Content-Type": "x-www-form-urlencoded"}, + ) + assert_response(resp, "two") + + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + # test exception raise if matcher body is None but request data is not None + rsps.add( + method=responses.POST, + url="http://example.com/", + body="one", + match=[matchers.json_params_matcher(None)], + ) + + with pytest.raises(ConnectionError) as excinfo: + resp = requests.request( + "POST", + "http://example.com/", + json={"my": "data"}, + headers={"Content-Type": "application/json"}, + ) + + msg = str(excinfo.value) + assert "request.body doesn't match: {my: data} doesn't match {}" in msg + + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + rsps.add( + method=responses.POST, + url="http://example.com/", + body="two", + match=[matchers.urlencoded_params_matcher(None)], + ) + with pytest.raises(ConnectionError) as excinfo: + resp = requests.request( + "POST", + "http://example.com/", + headers={"Content-Type": "x-www-form-urlencoded"}, + data={"page": "second", "type": "urlencoded"}, + ) + msg = str(excinfo.value) + assert ( + "request.body doesn't match: {page: second, type: urlencoded} doesn't match {}" + in msg + ) + + run() + assert_reset() + + +def test_request_matches_params(): + @responses.activate + def run(): + url = "http://example.com/test" + params = {"hello": "world", "I am": "a big test"} + responses.add( + method=responses.GET, + url=url, + body="test", + match=[matchers.query_param_matcher(params)], + match_querystring=False, + ) + + # exchange parameter places for the test + params = { + "I am": "a big test", + "hello": "world", + } + resp = requests.get(url, params=params) + + constructed_url = r"http://example.com/test?I+am=a+big+test&hello=world" + assert resp.url == constructed_url + assert resp.request.url == constructed_url + + resp_params = getattr(resp.request, "params") + assert resp_params == params + + run() + assert_reset() + + +def test_fail_matchers_error(): + """ + Validate that Exception is raised if request does not match responses.matchers + validate matchers.urlencoded_params_matcher + validate matchers.json_params_matcher + validate matchers.query_param_matcher + validate matchers.request_kwargs_matcher + :return: None + """ + + def run(): + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + rsps.add( + "POST", + "http://example.com", + match=[matchers.urlencoded_params_matcher({"foo": "bar"})], + ) + rsps.add( + "POST", + "http://example.com", + match=[matchers.json_params_matcher({"fail": "json"})], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.post("http://example.com", data={"id": "bad"}) + + msg = str(excinfo.value) + assert ( + "request.body doesn't match: {id: bad} doesn't match {foo: bar}" in msg + ) + + assert ( + "request.body doesn't match: JSONDecodeError: Cannot parse request.body" + in msg + ) + + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + rsps.add( + "GET", + "http://111.com", + match=[matchers.query_param_matcher({"my": "params"})], + ) + + rsps.add( + method=responses.GET, + url="http://111.com/", + body="two", + match=[matchers.json_params_matcher({"page": "one"})], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.get( + "http://111.com", params={"id": "bad"}, json={"page": "two"} + ) + + msg = str(excinfo.value) + assert ( + "Parameters do not match. {id: bad} doesn't match {my: params}" in msg + ) + assert ( + "request.body doesn't match: {page: two} doesn't match {page: one}" + in msg + ) + + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + req_kwargs = { + "stream": True, + "verify": False, + } + rsps.add( + "GET", + "http://111.com", + match=[matchers.request_kwargs_matcher(req_kwargs)], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.get("http://111.com", stream=True) + + msg = str(excinfo.value) + assert ( + "Arguments don't match: " + "{stream: True, verify: True} doesn't match {stream: True, verify: False}" + ) in msg + + run() + assert_reset() + + +@pytest.mark.parametrize( + "req_file,match_file", + [ + (b"Old World!", "Old World!"), + ("Old World!", b"Old World!"), + (b"Old World!", b"Old World!"), + ("Old World!", "Old World!"), + (b"\xacHello World!", b"\xacHello World!"), + ], +) +def test_multipart_matcher(req_file, match_file): + @responses.activate + def run(): + req_data = {"some": "other", "data": "fields"} + responses.add( + responses.POST, + url="http://httpbin.org/post", + match=[ + matchers.multipart_matcher( + files={"file_name": match_file}, data=req_data + ) + ], + ) + resp = requests.post( + "http://httpbin.org/post", data=req_data, files={"file_name": req_file} + ) + assert resp.status_code == 200 + + with pytest.raises(TypeError): + responses.add( + responses.POST, + url="http://httpbin.org/post", + match=[matchers.multipart_matcher(files={})], + ) + + run() + assert_reset() + + +def test_multipart_matcher_fail(): + """ + Validate that Exception is raised if request does not match responses.matchers + validate matchers.multipart_matcher + :return: None + """ + + def run(): + # different file contents + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + req_data = {"some": "other", "data": "fields"} + req_files = {"file_name": b"Old World!"} + rsps.add( + responses.POST, + url="http://httpbin.org/post", + match=[matchers.multipart_matcher(req_files, data=req_data)], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.post( + "http://httpbin.org/post", + data=req_data, + files={"file_name": b"New World!"}, + ) + + msg = str(excinfo.value) + assert "multipart/form-data doesn't match. Request body differs." in msg + + assert ( + r'\r\nContent-Disposition: form-data; name="file_name"; ' + r'filename="file_name"\r\n\r\nOld World!\r\n' + ) in msg + assert ( + r'\r\nContent-Disposition: form-data; name="file_name"; ' + r'filename="file_name"\r\n\r\nNew World!\r\n' + ) in msg + + # x-www-form-urlencoded request + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + req_data = {"some": "other", "data": "fields"} + req_files = {"file_name": b"Old World!"} + rsps.add( + responses.POST, + url="http://httpbin.org/post", + match=[matchers.multipart_matcher(req_files, data=req_data)], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.post("http://httpbin.org/post", data=req_data) + + msg = str(excinfo.value) + assert ( + "multipart/form-data doesn't match. Request headers['Content-Type'] is different." + in msg + ) + assert ( + "application/x-www-form-urlencoded isn't equal to multipart/form-data; boundary=" + in msg + ) + + # empty body request + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + req_files = {"file_name": b"Old World!"} + rsps.add( + responses.POST, + url="http://httpbin.org/post", + match=[matchers.multipart_matcher(req_files)], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.post("http://httpbin.org/post") + + msg = str(excinfo.value) + assert "Request is missing the 'Content-Type' header" in msg + + run() + assert_reset() + + +def test_query_string_matcher_raises(): + """ + Validate that Exception is raised if request does not match responses.matchers + validate matchers.query_string_matcher + :return: None + """ + + def run(): + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + rsps.add( + "GET", + "http://111.com", + match=[matchers.query_string_matcher("didi=pro")], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.get("http://111.com", params={"test": "1", "didi": "pro"}) + + msg = str(excinfo.value) + assert ( + "Query string doesn't match. {didi: pro, test: 1} doesn't match {didi: pro}" + in msg + ) + + run() + assert_reset() + + +def test_request_matches_headers(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add( + method=responses.GET, + url=url, + json={"success": True}, + match=[matchers.header_matcher({"Accept": "application/json"})], + ) + + responses.add( + method=responses.GET, + url=url, + body="success", + match=[matchers.header_matcher({"Accept": "text/plain"})], + ) + + # the actual request can contain extra headers (requests always adds some itself anyway) + resp = requests.get( + url, headers={"Accept": "application/json", "Accept-Charset": "utf-8"} + ) + assert_response(resp, body='{"success": true}', content_type="application/json") + + resp = requests.get(url, headers={"Accept": "text/plain"}) + assert_response(resp, body="success", content_type="text/plain") + + run() + assert_reset() + + +def test_request_matches_headers_no_match(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add( + method=responses.GET, + url=url, + json={"success": True}, + match=[matchers.header_matcher({"Accept": "application/json"})], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.get(url, headers={"Accept": "application/xml"}) + + msg = str(excinfo.value) + assert ( + "Headers do not match: {Accept: application/xml} doesn't match " + "{Accept: application/json}" + ) in msg + + run() + assert_reset() + + +def test_request_matches_headers_strict_match(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add( + method=responses.GET, + url=url, + body="success", + match=[ + matchers.header_matcher({"Accept": "text/plain"}, strict_match=True) + ], + ) + + # requests will add some extra headers of its own, so we have to use prepared requests + session = requests.Session() + + # make sure we send *just* the header we're expectin + prepped = session.prepare_request( + requests.Request( + method="GET", + url=url, + ) + ) + prepped.headers.clear() + prepped.headers["Accept"] = "text/plain" + + resp = session.send(prepped) + assert_response(resp, body="success", content_type="text/plain") + + # include the "Accept-Charset" header, which will fail to match + prepped = session.prepare_request( + requests.Request( + method="GET", + url=url, + ) + ) + prepped.headers.clear() + prepped.headers["Accept"] = "text/plain" + prepped.headers["Accept-Charset"] = "utf-8" + + with pytest.raises(ConnectionError) as excinfo: + session.send(prepped) + + msg = str(excinfo.value) + assert ( + "Headers do not match: {Accept: text/plain, Accept-Charset: utf-8} " + "doesn't match {Accept: text/plain}" + ) in msg + + run() + assert_reset() + + +def test_fragment_identifier_matcher(): + @responses.activate + def run(): + responses.add( + responses.GET, + "http://example.com", + match=[matchers.fragment_identifier_matcher("test=1&foo=bar")], + body=b"test", + ) + + resp = requests.get("http://example.com#test=1&foo=bar") + assert_response(resp, "test") + + run() + assert_reset() + + +def test_fragment_identifier_matcher_error(): + @responses.activate + def run(): + responses.add( + responses.GET, + "http://example.com/", + match=[matchers.fragment_identifier_matcher("test=1")], + ) + responses.add( + responses.GET, + "http://example.com/", + match=[matchers.fragment_identifier_matcher(None)], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.get("http://example.com/#test=2") + + msg = str(excinfo.value) + assert ( + "URL fragment identifier is different: test=1 doesn't match test=2" + ) in msg + assert ( + "URL fragment identifier is different: None doesn't match test=2" + ) in msg + + run() + assert_reset() + + +def test_fragment_identifier_matcher_and_match_querystring(): + @responses.activate + def run(): + url = "http://example.com?ab=xy&zed=qwe#test=1&foo=bar" + responses.add( + responses.GET, + url, + match_querystring=True, + match=[matchers.fragment_identifier_matcher("test=1&foo=bar")], + body=b"test", + ) + + # two requests to check reversed order of fragment identifier + resp = requests.get("http://example.com?ab=xy&zed=qwe#test=1&foo=bar") + assert_response(resp, "test") + resp = requests.get("http://example.com?zed=qwe&ab=xy#foo=bar&test=1") + assert_response(resp, "test") + + run() + assert_reset() + + +def test_matchers_create_key_val_str(): + """ + Test that matchers._create_key_val_str does recursive conversion + """ + data = { + "my_list": [ + 1, + 2, + "a", + {"key1": "val1", "key2": 2, 3: "test"}, + "!", + [["list", "nested"], {"nested": "dict"}], + ], + 1: 4, + "test": "val", + "high": {"nested": "nested_dict"}, + } + conv_str = matchers._create_key_val_str(data) + reference = ( + "{1: 4, high: {nested: nested_dict}, my_list: [!, 1, 2, [[list, nested], {nested: dict}], " + "a, {3: test, key1: val1, key2: 2}], test: val}" + ) + assert conv_str == reference diff --git a/env-llmeval/lib/python3.10/site-packages/responses/test_registries.py b/env-llmeval/lib/python3.10/site-packages/responses/test_registries.py new file mode 100644 index 0000000000000000000000000000000000000000..b4dd8cc5600403da68c89378646dee33b7e6b7f8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/responses/test_registries.py @@ -0,0 +1,70 @@ +import pytest + +import responses +from responses import registries +from responses.test_responses import assert_reset + + +def test_set_registry_not_empty(): + class CustomRegistry(registries.FirstMatchRegistry): + pass + + @responses.activate + def run(): + url = "http://fizzbuzz/foo" + responses.add(method=responses.GET, url=url) + with pytest.raises(AttributeError) as excinfo: + responses.mock._set_registry(CustomRegistry) + msg = str(excinfo.value) + assert "Cannot replace Registry, current registry has responses" in msg + + run() + assert_reset() + + +def test_set_registry(): + class CustomRegistry(registries.FirstMatchRegistry): + pass + + @responses.activate(registry=CustomRegistry) + def run_with_registry(): + assert type(responses.mock._get_registry()) == CustomRegistry + + @responses.activate + def run(): + # test that registry does not leak to another test + assert type(responses.mock._get_registry()) == registries.FirstMatchRegistry + + run_with_registry() + run() + assert_reset() + + +def test_set_registry_context_manager(): + def run(): + class CustomRegistry(registries.FirstMatchRegistry): + pass + + with responses.RequestsMock( + assert_all_requests_are_fired=False, registry=CustomRegistry + ) as rsps: + assert type(rsps._get_registry()) == CustomRegistry + assert type(responses.mock._get_registry()) == registries.FirstMatchRegistry + + run() + assert_reset() + + +def test_registry_reset(): + def run(): + class CustomRegistry(registries.FirstMatchRegistry): + pass + + with responses.RequestsMock( + assert_all_requests_are_fired=False, registry=CustomRegistry + ) as rsps: + rsps._get_registry().reset() + assert not rsps.registered() + + run() + assert_reset() diff --git a/env-llmeval/lib/python3.10/site-packages/responses/test_responses.py b/env-llmeval/lib/python3.10/site-packages/responses/test_responses.py new file mode 100644 index 0000000000000000000000000000000000000000..dd6f62249382a39c06a2cbc086a94a5481d60e4d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/responses/test_responses.py @@ -0,0 +1,1927 @@ +# coding: utf-8 + +from __future__ import absolute_import, print_function, division, unicode_literals + +import inspect +import os +import re +from io import BufferedReader, BytesIO + +import pytest +import requests +import responses +from requests.exceptions import ConnectionError, HTTPError, ChunkedEncodingError +from responses import ( + BaseResponse, + Response, + PassthroughResponse, + matchers, + CallbackResponse, +) + + +try: + from mock import patch, Mock +except ImportError: + from unittest.mock import patch, Mock # type: ignore + + +def assert_reset(): + assert len(responses._default_mock.registered()) == 0 + assert len(responses.calls) == 0 + + +def assert_response(resp, body=None, content_type="text/plain"): + assert resp.status_code == 200 + assert resp.reason == "OK" + if content_type is not None: + assert resp.headers["Content-Type"] == content_type + else: + assert "Content-Type" not in resp.headers + assert resp.text == body + + +def assert_params(resp, expected): + assert hasattr(resp, "request"), "Missing request" + assert hasattr( + resp.request, "params" + ), "Missing params on request that responses should add" + assert getattr(resp.request, "params") == expected, "Incorrect parameters" + + +def test_response(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com", body=b"test") + resp = requests.get("http://example.com") + assert_response(resp, "test") + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == "http://example.com/" + assert responses.calls[0].response.content == b"test" + + resp = requests.get("http://example.com?foo=bar") + assert_response(resp, "test") + assert len(responses.calls) == 2 + assert responses.calls[1].request.url == "http://example.com/?foo=bar" + assert responses.calls[1].response.content == b"test" + + run() + assert_reset() + + +def test_response_encoded(): + @responses.activate + def run(): + # Path contains urlencoded =/()[] + url = "http://example.org/foo.bar%3D%2F%28%29%5B%5D" + responses.add(responses.GET, url, body="it works", status=200) + resp = requests.get(url) + assert_response(resp, "it works") + + run() + assert_reset() + + +def test_response_with_instance(): + @responses.activate + def run(): + responses.add( + responses.Response(method=responses.GET, url="http://example.com") + ) + resp = requests.get("http://example.com") + assert_response(resp, "") + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == "http://example.com/" + + resp = requests.get("http://example.com?foo=bar") + assert_response(resp, "") + assert len(responses.calls) == 2 + assert responses.calls[1].request.url == "http://example.com/?foo=bar" + + run() + assert_reset() + + +@pytest.mark.parametrize( + "original,replacement", + [ + ("http://example.com/two", "http://example.com/two"), + ( + Response(method=responses.GET, url="http://example.com/two"), + Response( + method=responses.GET, url="http://example.com/two", body="testtwo" + ), + ), + ( + re.compile(r"http://example\.com/two"), + re.compile(r"http://example\.com/two"), + ), + ], +) +def test_replace(original, replacement): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com/one", body="test1") + + if isinstance(original, BaseResponse): + responses.add(original) + else: + responses.add(responses.GET, original, body="test2") + + responses.add(responses.GET, "http://example.com/three", body="test3") + responses.add( + responses.GET, re.compile(r"http://example\.com/four"), body="test3" + ) + + if isinstance(replacement, BaseResponse): + responses.replace(replacement) + else: + responses.replace(responses.GET, replacement, body="testtwo") + + resp = requests.get("http://example.com/two") + assert_response(resp, "testtwo") + + run() + assert_reset() + + +@pytest.mark.parametrize( + "original,replacement", + [ + ("http://example.com/one", re.compile(r"http://example\.com/one")), + (re.compile(r"http://example\.com/one"), "http://example.com/one"), + ], +) +def test_replace_error(original, replacement): + @responses.activate + def run(): + responses.add(responses.GET, original) + with pytest.raises(ValueError) as excinfo: + responses.replace(responses.GET, replacement) + assert "Response is not registered for URL %s" % replacement in str( + excinfo.value + ) + + run() + assert_reset() + + +def test_replace_response_object_error(): + @responses.activate + def run(): + responses.add(Response(method=responses.GET, url="http://example.com/one")) + with pytest.raises(ValueError) as excinfo: + responses.replace( + Response(method=responses.GET, url="http://example.com/two") + ) + assert "Response is not registered for URL http://example.com/two" in str( + excinfo.value + ) + + run() + assert_reset() + + +@pytest.mark.parametrize( + "original,replacement", + [ + ("http://example.com/two", "http://example.com/two"), + ( + Response(method=responses.GET, url="http://example.com/two"), + Response( + method=responses.GET, url="http://example.com/two", body="testtwo" + ), + ), + ( + re.compile(r"http://example\.com/two"), + re.compile(r"http://example\.com/two"), + ), + ], +) +def test_upsert_replace(original, replacement): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com/one", body="test1") + + if isinstance(original, BaseResponse): + responses.add(original) + else: + responses.add(responses.GET, original, body="test2") + + if isinstance(replacement, BaseResponse): + responses.upsert(replacement) + else: + responses.upsert(responses.GET, replacement, body="testtwo") + + resp = requests.get("http://example.com/two") + assert_response(resp, "testtwo") + + run() + assert_reset() + + +@pytest.mark.parametrize( + "original,replacement", + [ + ("http://example.com/two", "http://example.com/two"), + ( + Response(method=responses.GET, url="http://example.com/two"), + Response( + method=responses.GET, url="http://example.com/two", body="testtwo" + ), + ), + ( + re.compile(r"http://example\.com/two"), + re.compile(r"http://example\.com/two"), + ), + ], +) +def test_upsert_add(original, replacement): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com/one", body="test1") + + if isinstance(replacement, BaseResponse): + responses.upsert(replacement) + else: + responses.upsert(responses.GET, replacement, body="testtwo") + + resp = requests.get("http://example.com/two") + assert_response(resp, "testtwo") + + run() + assert_reset() + + +def test_remove(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com/zero") + responses.add(responses.GET, "http://example.com/one") + responses.add(responses.GET, "http://example.com/two") + responses.add(responses.GET, re.compile(r"http://example\.com/three")) + responses.add(responses.GET, re.compile(r"http://example\.com/four")) + re.purge() + responses.remove(responses.GET, "http://example.com/two") + responses.remove(Response(method=responses.GET, url="http://example.com/zero")) + responses.remove(responses.GET, re.compile(r"http://example\.com/four")) + + with pytest.raises(ConnectionError): + requests.get("http://example.com/zero") + requests.get("http://example.com/one") + with pytest.raises(ConnectionError): + requests.get("http://example.com/two") + requests.get("http://example.com/three") + with pytest.raises(ConnectionError): + requests.get("http://example.com/four") + + run() + assert_reset() + + +@pytest.mark.parametrize( + "args1,kwargs1,args2,kwargs2,expected", + [ + ((responses.GET, "a"), {}, (responses.GET, "a"), {}, True), + ((responses.GET, "a"), {}, (responses.GET, "b"), {}, False), + ((responses.GET, "a"), {}, (responses.POST, "a"), {}, False), + ( + (responses.GET, "a"), + {"match_querystring": True}, + (responses.GET, "a"), + {}, + True, + ), + ], +) +def test_response_equality(args1, kwargs1, args2, kwargs2, expected): + o1 = BaseResponse(*args1, **kwargs1) + o2 = BaseResponse(*args2, **kwargs2) + assert (o1 == o2) is expected + assert (o1 != o2) is not expected + + +def test_response_equality_different_objects(): + o1 = BaseResponse(method=responses.GET, url="a") + o2 = "str" + assert (o1 == o2) is False + assert (o1 != o2) is True + + +def test_connection_error(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com") + + with pytest.raises(ConnectionError): + requests.get("http://example.com/foo") + + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == "http://example.com/foo" + assert type(responses.calls[0].response) is ConnectionError + assert responses.calls[0].response.request + + run() + assert_reset() + + +def test_match_querystring(): + @responses.activate + def run(): + url = "http://example.com?test=1&foo=bar" + responses.add(responses.GET, url, match_querystring=True, body=b"test") + resp = requests.get("http://example.com?test=1&foo=bar") + assert_response(resp, "test") + resp = requests.get("http://example.com?foo=bar&test=1") + assert_response(resp, "test") + resp = requests.get("http://example.com/?foo=bar&test=1") + assert_response(resp, "test") + + run() + assert_reset() + + +def test_match_querystring_empty(): + @responses.activate + def run(): + responses.add( + responses.GET, "http://example.com", body=b"test", match_querystring=True + ) + resp = requests.get("http://example.com") + assert_response(resp, "test") + resp = requests.get("http://example.com/") + assert_response(resp, "test") + with pytest.raises(ConnectionError): + requests.get("http://example.com?query=foo") + + run() + assert_reset() + + +def test_match_querystring_error(): + @responses.activate + def run(): + responses.add( + responses.GET, "http://example.com/?test=1", match_querystring=True + ) + + with pytest.raises(ConnectionError): + requests.get("http://example.com/foo/?test=2") + + run() + assert_reset() + + +def test_match_querystring_regex(): + @responses.activate + def run(): + """Note that `match_querystring` value shouldn't matter when passing a + regular expression""" + + responses.add( + responses.GET, + re.compile(r"http://example\.com/foo/\?test=1"), + body="test1", + match_querystring=True, + ) + + resp = requests.get("http://example.com/foo/?test=1") + assert_response(resp, "test1") + + responses.add( + responses.GET, + re.compile(r"http://example\.com/foo/\?test=2"), + body="test2", + match_querystring=False, + ) + + resp = requests.get("http://example.com/foo/?test=2") + assert_response(resp, "test2") + + run() + assert_reset() + + +def test_match_querystring_error_regex(): + @responses.activate + def run(): + """Note that `match_querystring` value shouldn't matter when passing a + regular expression""" + + responses.add( + responses.GET, + re.compile(r"http://example\.com/foo/\?test=1"), + match_querystring=True, + ) + + with pytest.raises(ConnectionError): + requests.get("http://example.com/foo/?test=3") + + responses.add( + responses.GET, + re.compile(r"http://example\.com/foo/\?test=2"), + match_querystring=False, + ) + + with pytest.raises(ConnectionError): + requests.get("http://example.com/foo/?test=4") + + run() + assert_reset() + + +def test_match_querystring_auto_activates(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com?test=1", body=b"test") + resp = requests.get("http://example.com?test=1") + assert_response(resp, "test") + with pytest.raises(ConnectionError): + requests.get("http://example.com/?test=2") + + run() + assert_reset() + + +def test_match_querystring_missing_key(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com?foo=1&bar=2", body=b"test") + with pytest.raises(ConnectionError): + requests.get("http://example.com/?foo=1&baz=2") + + with pytest.raises(ConnectionError): + requests.get("http://example.com/?bar=2&fez=1") + + run() + assert_reset() + + +def test_accept_string_body(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add(responses.GET, url, body="test") + resp = requests.get(url) + assert_response(resp, "test") + + run() + assert_reset() + + +def test_accept_json_body(): + @responses.activate + def run(): + content_type = "application/json" + + url = "http://example.com/" + responses.add(responses.GET, url, json={"message": "success"}) + resp = requests.get(url) + assert_response(resp, '{"message": "success"}', content_type) + + url = "http://example.com/1/" + responses.add(responses.GET, url, json=[]) + resp = requests.get(url) + assert_response(resp, "[]", content_type) + + run() + assert_reset() + + +def test_no_content_type(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add(responses.GET, url, body="test", content_type=None) + resp = requests.get(url) + assert_response(resp, "test", content_type=None) + + run() + assert_reset() + + +def test_arbitrary_status_code(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add(responses.GET, url, body="test", status=419) + resp = requests.get(url) + assert resp.status_code == 419 + assert resp.reason is None + + run() + assert_reset() + + +def test_throw_connection_error_explicit(): + @responses.activate + def run(): + url = "http://example.com" + exception = HTTPError("HTTP Error") + responses.add(responses.GET, url, exception) + + with pytest.raises(HTTPError) as HE: + requests.get(url) + + assert str(HE.value) == "HTTP Error" + + run() + assert_reset() + + +def test_callback(): + body = b"test callback" + status = 400 + reason = "Bad Request" + headers = { + "foo": "bar", + "Content-Type": "application/json", + "Content-Length": "13", + } + url = "http://example.com/" + + def request_callback(_request): + return status, headers, body + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback) + resp = requests.get(url) + assert resp.text == "test callback" + assert resp.status_code == status + assert resp.reason == reason + assert "bar" == resp.headers.get("foo") + assert "application/json" == resp.headers.get("Content-Type") + assert "13" == resp.headers.get("Content-Length") + + run() + assert_reset() + + +def test_callback_deprecated_stream_argument(): + with pytest.deprecated_call(): + CallbackResponse(responses.GET, "url", lambda x: x, stream=False) + + +def test_callback_deprecated_match_querystring_argument(): + with pytest.deprecated_call(): + CallbackResponse(responses.GET, "url", lambda x: x, match_querystring=False) + + +def test_callback_match_querystring_default_false(): + """ + Test to ensure that by default 'match_querystring' in 'add_callback' is set to False + and does not raise deprecation + see: https://github.com/getsentry/responses/issues/464 and related PR + """ + body = b"test callback" + status = 200 + params = {"hello": "world", "I am": "a big test"} + headers = {"foo": "bar"} + url = "http://example.com/" + + def request_callback(_request): + return status, headers, body + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback, content_type=None) + resp = requests.get(url, params=params) + assert resp.text == "test callback" + assert resp.status_code == status + assert "foo" in resp.headers + + with pytest.warns(None) as record: + run() + + # check that no deprecation warning was raised + assert not record + + assert_reset() + + +def test_callback_exception_result(): + result = Exception() + url = "http://example.com/" + + def request_callback(request): + return result + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback) + + with pytest.raises(Exception) as e: + requests.get(url) + + assert e.value is result + + run() + assert_reset() + + +def test_callback_exception_body(): + body = Exception() + url = "http://example.com/" + + def request_callback(request): + return 200, {}, body + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback) + + with pytest.raises(Exception) as e: + requests.get(url) + + assert e.value is body + + run() + assert_reset() + + +def test_callback_no_content_type(): + body = b"test callback" + status = 400 + reason = "Bad Request" + headers = {"foo": "bar"} + url = "http://example.com/" + + def request_callback(_request): + return status, headers, body + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback, content_type=None) + resp = requests.get(url) + assert resp.text == "test callback" + assert resp.status_code == status + assert resp.reason == reason + assert "foo" in resp.headers + assert "Content-Type" not in resp.headers + + run() + assert_reset() + + +def test_callback_content_type_dict(): + def request_callback(request): + return ( + 200, + {"Content-Type": "application/json"}, + b"foo", + ) + + @responses.activate + def run(): + responses.add_callback("GET", "http://mockhost/.foo", callback=request_callback) + resp = requests.get("http://mockhost/.foo") + assert resp.text == "foo" + assert resp.headers["content-type"] == "application/json" + + run() + assert_reset() + + +def test_callback_matchers(): + def request_callback(request): + return ( + 200, + {"Content-Type": "application/json"}, + b"foo", + ) + + @responses.activate + def run(): + req_data = {"some": "other", "data": "fields"} + req_files = {"file_name": b"Old World!"} + + responses.add_callback( + responses.POST, + url="http://httpbin.org/post", + match=[matchers.multipart_matcher(req_files, data=req_data)], + callback=request_callback, + ) + resp = requests.post("http://httpbin.org/post", data=req_data, files=req_files) + assert resp.text == "foo" + assert resp.headers["content-type"] == "application/json" + + run() + assert_reset() + + +def test_callback_matchers_fail(): + @responses.activate + def run(): + req_data = {"some": "other", "data": "fields"} + req_files = {"file_name": b"Old World!"} + + responses.add_callback( + responses.POST, + url="http://httpbin.org/post", + match=[matchers.multipart_matcher(req_files, data=req_data)], + callback=lambda x: ( + 0, + {"a": ""}, + "", + ), + ) + with pytest.raises(ConnectionError) as exc: + requests.post( + "http://httpbin.org/post", + data={"some": "other", "data": "wrong"}, + files=req_files, + ) + + assert "multipart/form-data doesn't match." in str(exc.value) + + run() + assert_reset() + + +def test_callback_content_type_tuple(): + def request_callback(request): + return ( + 200, + [("Content-Type", "application/json")], + b"foo", + ) + + @responses.activate + def run(): + responses.add_callback("GET", "http://mockhost/.foo", callback=request_callback) + resp = requests.get("http://mockhost/.foo") + assert resp.text == "foo" + assert resp.headers["content-type"] == "application/json" + + run() + assert_reset() + + +def test_regular_expression_url(): + @responses.activate + def run(): + url = re.compile(r"https?://(.*\.)?example.com") + responses.add(responses.GET, url, body=b"test") + + resp = requests.get("http://example.com") + assert_response(resp, "test") + + resp = requests.get("https://example.com") + assert_response(resp, "test") + + resp = requests.get("https://uk.example.com") + assert_response(resp, "test") + + with pytest.raises(ConnectionError): + requests.get("https://uk.exaaample.com") + + run() + assert_reset() + + +def test_base_response_get_response(): + resp = BaseResponse("GET", ".com") + with pytest.raises(NotImplementedError): + resp.get_response(requests.PreparedRequest()) + + +def test_custom_adapter(): + @responses.activate + def run(): + url = "http://example.com" + responses.add(responses.GET, url, body=b"test") + + calls = [0] + + class DummyAdapter(requests.adapters.HTTPAdapter): + def send(self, *a, **k): + calls[0] += 1 + return super(DummyAdapter, self).send(*a, **k) + + # Test that the adapter is actually used + session = requests.Session() + session.mount("http://", DummyAdapter()) + + resp = session.get(url, allow_redirects=False) + assert calls[0] == 1 + + # Test that the response is still correctly emulated + session = requests.Session() + session.mount("http://", DummyAdapter()) + + resp = session.get(url) + assert_response(resp, "test") + + run() + + +def test_responses_as_context_manager(): + def run(): + with responses.mock: + responses.add(responses.GET, "http://example.com", body=b"test") + resp = requests.get("http://example.com") + assert_response(resp, "test") + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == "http://example.com/" + assert responses.calls[0].response.content == b"test" + + resp = requests.get("http://example.com?foo=bar") + assert_response(resp, "test") + assert len(responses.calls) == 2 + assert responses.calls[1].request.url == "http://example.com/?foo=bar" + assert responses.calls[1].response.content == b"test" + + run() + assert_reset() + + +def test_activate_doesnt_change_signature(): + def test_function(a, b=None): + return (a, b) + + decorated_test_function = responses.activate(test_function) + assert inspect.signature(test_function) == inspect.signature( + decorated_test_function + ) + + assert decorated_test_function(1, 2) == test_function(1, 2) + assert decorated_test_function(3) == test_function(3) + + +@pytest.fixture +def my_fruit(): + return "apple" + + +@pytest.fixture +def fruit_basket(my_fruit): + return ["banana", my_fruit] + + +@pytest.mark.usefixtures("my_fruit", "fruit_basket") +class TestFixtures(object): + """ + Test that pytest fixtures work well with 'activate' decorator + """ + + def test_function(self, my_fruit, fruit_basket): + assert my_fruit in fruit_basket + assert my_fruit == "apple" + + test_function_decorated = responses.activate(test_function) + + +def test_activate_mock_interaction(): + @patch("sys.stdout") + def test_function(mock_stdout): + return mock_stdout + + decorated_test_function = responses.activate(test_function) + assert inspect.signature(test_function) == inspect.signature( + decorated_test_function + ) + + value = test_function() + assert isinstance(value, Mock) + + value = decorated_test_function() + assert isinstance(value, Mock) + + +def test_activate_doesnt_change_signature_with_return_type(): + def test_function(a, b=None): + return a, b + + # Add type annotations as they are syntax errors in py2. + # Use a class to test for import errors in evaled code. + test_function.__annotations__["return"] = Mock + test_function.__annotations__["a"] = Mock + + decorated_test_function = responses.activate(test_function) + assert inspect.signature(test_function) == inspect.signature( + decorated_test_function + ) + + assert decorated_test_function(1, 2) == test_function(1, 2) + assert decorated_test_function(3) == test_function(3) + + +def test_activate_doesnt_change_signature_for_method(): + class TestCase(object): + def test_function(self, a, b=None): + return (self, a, b) + + decorated_test_function = responses.activate(test_function) + + test_case = TestCase() + assert test_case.decorated_test_function(1, 2) == test_case.test_function(1, 2) + assert test_case.decorated_test_function(3) == test_case.test_function(3) + + +def test_response_cookies(): + body = b"test callback" + status = 200 + headers = {"set-cookie": "session_id=12345; a=b; c=d"} + url = "http://example.com/" + + def request_callback(request): + return (status, headers, body) + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback) + resp = requests.get(url) + assert resp.text == "test callback" + assert resp.status_code == status + assert "session_id" in resp.cookies + assert resp.cookies["session_id"] == "12345" + assert set(resp.cookies.keys()) == set(["session_id"]) + + run() + assert_reset() + + +def test_response_cookies_secure(): + body = b"test callback" + status = 200 + headers = {"set-cookie": "session_id=12345; a=b; c=d; secure"} + url = "http://example.com/" + + def request_callback(request): + return (status, headers, body) + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback) + resp = requests.get(url) + assert resp.text == "test callback" + assert resp.status_code == status + assert "session_id" in resp.cookies + assert resp.cookies["session_id"] == "12345" + assert set(resp.cookies.keys()) == set(["session_id"]) + + run() + assert_reset() + + +def test_response_cookies_multiple(): + body = b"test callback" + status = 200 + headers = [ + ("set-cookie", "1P_JAR=2019-12-31-23; path=/; domain=.example.com; HttpOnly"), + ("set-cookie", "NID=some=value; path=/; domain=.example.com; secure"), + ] + url = "http://example.com/" + + def request_callback(request): + return (status, headers, body) + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback) + resp = requests.get(url) + assert resp.text == "test callback" + assert resp.status_code == status + assert set(resp.cookies.keys()) == set(["1P_JAR", "NID"]) + assert resp.cookies["1P_JAR"] == "2019-12-31-23" + assert resp.cookies["NID"] == "some=value" + + run() + assert_reset() + + +@pytest.mark.parametrize("request_stream", (True, False, None)) +@pytest.mark.parametrize("responses_stream", (True, False, None)) +def test_response_cookies_session(request_stream, responses_stream): + @responses.activate + def run(): + url = "https://example.com/path" + responses.add( + responses.GET, + url, + headers=[ + ("Set-cookie", "mycookie=cookieval; path=/; secure"), + ], + body="ok", + stream=responses_stream, + ) + session = requests.session() + resp = session.get(url, stream=request_stream) + assert resp.text == "ok" + assert resp.status_code == 200 + + assert "mycookie" in resp.cookies + assert resp.cookies["mycookie"] == "cookieval" + assert set(resp.cookies.keys()) == set(["mycookie"]) + + assert "mycookie" in session.cookies + assert session.cookies["mycookie"] == "cookieval" + assert set(session.cookies.keys()) == set(["mycookie"]) + + run() + assert_reset() + + +def test_response_callback(): + """adds a callback to decorate the response, then checks it""" + + def run(): + def response_callback(resp): + resp._is_mocked = True + return resp + + with responses.RequestsMock(response_callback=response_callback) as m: + m.add(responses.GET, "http://example.com", body=b"test") + resp = requests.get("http://example.com") + assert resp.text == "test" + assert hasattr(resp, "_is_mocked") + assert getattr(resp, "_is_mocked") is True + + run() + assert_reset() + + +def test_response_filebody(): + """ Adds the possibility to use actual (binary) files as responses """ + + def run(): + current_file = os.path.abspath(__file__) + with responses.RequestsMock() as m: + with open(current_file, "r") as out: + m.add(responses.GET, "http://example.com", body=out.read(), stream=True) + resp = requests.get("http://example.com", stream=True) + with open(current_file, "r") as out: + assert resp.text == out.read() + + run() + assert_reset() + + +def test_use_stream_twice_to_double_raw_io(): + @responses.activate + def run(): + url = "http://example.com" + responses.add(responses.GET, url, body=b"42", stream=True) + resp = requests.get(url, stream=True) + assert resp.raw.read() == b"42" + + run() + assert_reset() + + +def test_assert_all_requests_are_fired(): + def request_callback(request): + raise BaseException() + + def run(): + with pytest.raises(AssertionError) as excinfo: + with responses.RequestsMock(assert_all_requests_are_fired=True) as m: + m.add(responses.GET, "http://example.com", body=b"test") + assert "http://example.com" in str(excinfo.value) + assert responses.GET in str(excinfo.value) + + # check that assert_all_requests_are_fired default to True + with pytest.raises(AssertionError): + with responses.RequestsMock() as m: + m.add(responses.GET, "http://example.com", body=b"test") + + # check that assert_all_requests_are_fired doesn't swallow exceptions + with pytest.raises(ValueError): + with responses.RequestsMock() as m: + m.add(responses.GET, "http://example.com", body=b"test") + raise ValueError() + + # check that assert_all_requests_are_fired=True doesn't remove urls + with responses.RequestsMock(assert_all_requests_are_fired=True) as m: + m.add(responses.GET, "http://example.com", body=b"test") + assert len(m.registered()) == 1 + requests.get("http://example.com") + assert len(m.registered()) == 1 + + # check that assert_all_requests_are_fired=True counts mocked errors + with responses.RequestsMock(assert_all_requests_are_fired=True) as m: + m.add(responses.GET, "http://example.com", body=Exception()) + assert len(m.registered()) == 1 + with pytest.raises(Exception): + requests.get("http://example.com") + assert len(m.registered()) == 1 + + with responses.RequestsMock(assert_all_requests_are_fired=True) as m: + m.add_callback(responses.GET, "http://example.com", request_callback) + assert len(m.registered()) == 1 + with pytest.raises(BaseException): + requests.get("http://example.com") + assert len(m.registered()) == 1 + + run() + assert_reset() + + +def test_allow_redirects_samehost(): + redirecting_url = "http://example.com" + final_url_path = "/1" + final_url = "{0}{1}".format(redirecting_url, final_url_path) + url_re = re.compile(r"^http://example.com(/)?(\d+)?$") + + def request_callback(request): + # endpoint of chained redirect + if request.url.endswith(final_url_path): + return 200, (), b"test" + + # otherwise redirect to an integer path + else: + if request.url.endswith("/0"): + n = 1 + else: + n = 0 + redirect_headers = {"location": "/{0!s}".format(n)} + return 301, redirect_headers, None + + def run(): + # setup redirect + with responses.mock: + responses.add_callback(responses.GET, url_re, request_callback) + resp_no_redirects = requests.get(redirecting_url, allow_redirects=False) + assert resp_no_redirects.status_code == 301 + assert len(responses.calls) == 1 # 1x300 + assert responses.calls[0][1].status_code == 301 + assert_reset() + + with responses.mock: + responses.add_callback(responses.GET, url_re, request_callback) + resp_yes_redirects = requests.get(redirecting_url, allow_redirects=True) + assert len(responses.calls) == 3 # 2x300 + 1x200 + assert len(resp_yes_redirects.history) == 2 + assert resp_yes_redirects.status_code == 200 + assert final_url == resp_yes_redirects.url + status_codes = [call[1].status_code for call in responses.calls] + assert status_codes == [301, 301, 200] + assert_reset() + + run() + assert_reset() + + +def test_handles_unicode_querystring(): + url = "http://example.com/test?type=2&ie=utf8&query=汉字" + + @responses.activate + def run(): + responses.add(responses.GET, url, body="test", match_querystring=True) + + resp = requests.get(url) + + assert_response(resp, "test") + + run() + assert_reset() + + +def test_handles_unicode_url(): + url = "http://www.संजाल.भारत/hi/वेबसाइट-डिजाइन" + + @responses.activate + def run(): + responses.add(responses.GET, url, body="test") + + resp = requests.get(url) + + assert_response(resp, "test") + + run() + assert_reset() + + +def test_handles_unicode_body(): + url = "http://example.com/test" + + @responses.activate + def run(): + responses.add(responses.GET, url, body="михољско лето") + + resp = requests.get(url) + + assert_response(resp, "михољско лето", content_type="text/plain; charset=utf-8") + + run() + assert_reset() + + +def test_handles_buffered_reader_body(): + url = "http://example.com/test" + + @responses.activate + def run(): + responses.add(responses.GET, url, body=BufferedReader(BytesIO(b"test"))) # type: ignore + + resp = requests.get(url) + + assert_response(resp, "test") + + run() + assert_reset() + + +def test_headers(): + @responses.activate + def run(): + responses.add( + responses.GET, "http://example.com", body="", headers={"X-Test": "foo"} + ) + resp = requests.get("http://example.com") + assert resp.headers["X-Test"] == "foo" + + run() + assert_reset() + + +def test_content_length_error(monkeypatch): + """ + Currently 'requests' does not enforce content length validation, + (validation that body length matches header). However, this could + be expected in next major version, see + https://github.com/psf/requests/pull/3563 + + Now user can manually patch URL3 lib to achieve the same + + See discussion in + https://github.com/getsentry/responses/issues/394 + """ + + @responses.activate + def run(): + responses.add( + responses.GET, + "http://example.com/api/123", + json={"message": "this body is too large"}, + adding_headers={"content-length": "2"}, + ) + with pytest.raises(ChunkedEncodingError) as exc: + requests.get("http://example.com/api/123") + + assert "IncompleteRead" in str(exc.value) + + # Type errors here and on 1250 are ignored because the stubs for requests + # are off https://github.com/python/typeshed/blob/f8501d33c737482a829c6db557a0be26895c5941 + # /stubs/requests/requests/packages/__init__.pyi#L1 + original_init = getattr(requests.packages.urllib3.HTTPResponse, "__init__") # type: ignore + + def patched_init(self, *args, **kwargs): + kwargs["enforce_content_length"] = True + original_init(self, *args, **kwargs) + + monkeypatch.setattr( + requests.packages.urllib3.HTTPResponse, "__init__", patched_init # type: ignore + ) + + run() + assert_reset() + + +def test_stream_with_none_chunk_size(): + """ + See discussion in + https://github.com/getsentry/responses/issues/438 + """ + + @responses.activate + def run(): + responses.add( + responses.GET, + "https://example.com", + status=200, + content_type="application/octet-stream", + body=b"This is test", + auto_calculate_content_length=True, + ) + res = requests.get("https://example.com", stream=True) + for chunk in res.iter_content(chunk_size=None): + assert chunk == b"This is test" + + run() + assert_reset() + + +def test_legacy_adding_headers(): + @responses.activate + def run(): + responses.add( + responses.GET, + "http://example.com", + body="", + adding_headers={"X-Test": "foo"}, + ) + resp = requests.get("http://example.com") + assert resp.headers["X-Test"] == "foo" + + run() + assert_reset() + + +def test_auto_calculate_content_length_string_body(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add( + responses.GET, url, body="test", auto_calculate_content_length=True + ) + resp = requests.get(url) + assert_response(resp, "test") + assert resp.headers["Content-Length"] == "4" + + run() + assert_reset() + + +def test_auto_calculate_content_length_bytes_body(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add( + responses.GET, url, body=b"test bytes", auto_calculate_content_length=True + ) + resp = requests.get(url) + assert_response(resp, "test bytes") + assert resp.headers["Content-Length"] == "10" + + run() + assert_reset() + + +def test_auto_calculate_content_length_json_body(): + @responses.activate + def run(): + content_type = "application/json" + + url = "http://example.com/" + responses.add( + responses.GET, + url, + json={"message": "success"}, + auto_calculate_content_length=True, + ) + resp = requests.get(url) + assert_response(resp, '{"message": "success"}', content_type) + assert resp.headers["Content-Length"] == "22" + + url = "http://example.com/1/" + responses.add(responses.GET, url, json=[], auto_calculate_content_length=True) + resp = requests.get(url) + assert_response(resp, "[]", content_type) + assert resp.headers["Content-Length"] == "2" + + run() + assert_reset() + + +def test_auto_calculate_content_length_unicode_body(): + @responses.activate + def run(): + url = "http://example.com/test" + responses.add( + responses.GET, url, body="михољско лето", auto_calculate_content_length=True + ) + resp = requests.get(url) + assert_response(resp, "михољско лето", content_type="text/plain; charset=utf-8") + assert resp.headers["Content-Length"] == "25" + + run() + assert_reset() + + +def test_auto_calculate_content_length_doesnt_work_for_buffered_reader_body(): + @responses.activate + def run(): + url = "http://example.com/test" + responses.add( + responses.GET, + url, + body=BufferedReader(BytesIO(b"testing")), # type: ignore + auto_calculate_content_length=True, + ) + resp = requests.get(url) + assert_response(resp, "testing") + assert "Content-Length" not in resp.headers + + run() + assert_reset() + + +def test_auto_calculate_content_length_doesnt_override_existing_value(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add( + responses.GET, + url, + body="test", + headers={"Content-Length": "2"}, + auto_calculate_content_length=True, + ) + resp = requests.get(url) + assert_response(resp, "test") + assert resp.headers["Content-Length"] == "2" + + run() + assert_reset() + + +def test_multiple_responses(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com", body="test") + responses.add(responses.GET, "http://example.com", body="rest") + responses.add(responses.GET, "http://example.com", body="fest") + responses.add(responses.GET, "http://example.com", body="best") + + resp = requests.get("http://example.com") + assert_response(resp, "test") + + resp = requests.get("http://example.com") + assert_response(resp, "rest") + + resp = requests.get("http://example.com") + assert_response(resp, "fest") + + resp = requests.get("http://example.com") + assert_response(resp, "best") + + # After all responses are used, last response should be repeated + resp = requests.get("http://example.com") + assert_response(resp, "best") + + run() + assert_reset() + + +def test_multiple_responses_intermixed(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com", body="test") + resp = requests.get("http://example.com") + assert_response(resp, "test") + + responses.add(responses.GET, "http://example.com", body="rest") + resp = requests.get("http://example.com") + assert_response(resp, "rest") + + responses.add(responses.GET, "http://example.com", body="best") + resp = requests.get("http://example.com") + assert_response(resp, "best") + + # After all responses are used, last response should be repeated + resp = requests.get("http://example.com") + assert_response(resp, "best") + + run() + assert_reset() + + +def test_multiple_urls(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com/one", body="one") + responses.add(responses.GET, "http://example.com/two", body="two") + + resp = requests.get("http://example.com/two") + assert_response(resp, "two") + resp = requests.get("http://example.com/one") + assert_response(resp, "one") + + run() + assert_reset() + + +def test_multiple_methods(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com/one", body="gotcha") + responses.add(responses.POST, "http://example.com/one", body="posted") + + resp = requests.get("http://example.com/one") + assert_response(resp, "gotcha") + resp = requests.post("http://example.com/one") + assert_response(resp, "posted") + + run() + assert_reset() + + +def test_passthrough_flag(httpserver): + httpserver.serve_content("OK", headers={"Content-Type": "text/plain"}) + response = Response(responses.GET, httpserver.url, body="MOCK") + + @responses.activate + def run_passthrough(): + responses.add(response) + resp = requests.get(httpserver.url) + assert_response(resp, "OK") + + @responses.activate + def run_mocked(): + responses.add(response) + resp = requests.get(httpserver.url) + assert_response(resp, "MOCK") + + run_mocked() + assert_reset() + + response.passthrough = True + run_passthrough() + assert_reset() + + +def test_passthrough_response(httpserver): + httpserver.serve_content("OK", headers={"Content-Type": "text/plain"}) + + @responses.activate + def run(): + responses.add(PassthroughResponse(responses.GET, httpserver.url)) + responses.add(responses.GET, "{}/one".format(httpserver.url), body="one") + responses.add(responses.GET, "http://example.com/two", body="two") + + resp = requests.get("http://example.com/two") + assert_response(resp, "two") + resp = requests.get("{}/one".format(httpserver.url)) + assert_response(resp, "one") + resp = requests.get(httpserver.url) + assert_response(resp, "OK") + + assert len(responses.calls) == 3 + responses.assert_call_count(httpserver.url, 1) + + run() + assert_reset() + + +def test_passthrough_response_stream(httpserver): + httpserver.serve_content("OK", headers={"Content-Type": "text/plain"}) + + @responses.activate + def run(): + responses.add(PassthroughResponse(responses.GET, httpserver.url)) + content_1 = requests.get(httpserver.url).content + with requests.get(httpserver.url, stream=True) as resp: + content_2 = resp.raw.read() + assert content_1 == content_2 + + run() + assert_reset() + + +def test_passthru_prefixes(httpserver): + httpserver.serve_content("OK", headers={"Content-Type": "text/plain"}) + + @responses.activate + def run_constructor_argument(): + with responses.RequestsMock(passthru_prefixes=(httpserver.url,)): + resp = requests.get(httpserver.url) + assert_response(resp, "OK") + + @responses.activate + def run_property_setter(): + with responses.RequestsMock() as m: + m.passthru_prefixes = tuple([httpserver.url]) + resp = requests.get(httpserver.url) + assert_response(resp, "OK") + + run_constructor_argument() + assert_reset() + run_property_setter() + assert_reset() + + +def test_passthru(httpserver): + httpserver.serve_content("OK", headers={"Content-Type": "text/plain"}) + + @responses.activate + def run(): + responses.add_passthru(httpserver.url) + responses.add(responses.GET, "{}/one".format(httpserver.url), body="one") + responses.add(responses.GET, "http://example.com/two", body="two") + + resp = requests.get("http://example.com/two") + assert_response(resp, "two") + resp = requests.get("{}/one".format(httpserver.url)) + assert_response(resp, "one") + resp = requests.get(httpserver.url) + assert_response(resp, "OK") + + run() + assert_reset() + + +def test_passthru_regex(httpserver): + httpserver.serve_content("OK", headers={"Content-Type": "text/plain"}) + + @responses.activate + def run(): + responses.add_passthru(re.compile("{}/\\w+".format(httpserver.url))) + responses.add(responses.GET, "{}/one".format(httpserver.url), body="one") + responses.add(responses.GET, "http://example.com/two", body="two") + + resp = requests.get("http://example.com/two") + assert_response(resp, "two") + resp = requests.get("{}/one".format(httpserver.url)) + assert_response(resp, "one") + resp = requests.get("{}/two".format(httpserver.url)) + assert_response(resp, "OK") + resp = requests.get("{}/three".format(httpserver.url)) + assert_response(resp, "OK") + + run() + assert_reset() + + +def test_passthru_does_not_persist_across_tests(httpserver): + """ + passthru should be erased on exit from context manager + see: + https://github.com/getsentry/responses/issues/322 + """ + httpserver.serve_content("OK", headers={"Content-Type": "text/plain"}) + + @responses.activate + def with_a_passthru(): + assert not responses._default_mock.passthru_prefixes + responses.add_passthru(re.compile(".*")) + try: + response = requests.get("https://example.com") + except ConnectionError as err: # pragma: no cover + if "Failed to establish" in str(err): # pragma: no cover + pytest.skip("Cannot resolve DNS for example.com") # pragma: no cover + raise err # pragma: no cover + + assert response.status_code == 200 + + @responses.activate + def without_a_passthru(): + assert not responses._default_mock.passthru_prefixes + with pytest.raises(requests.exceptions.ConnectionError): + requests.get("https://example.com") + + with_a_passthru() + without_a_passthru() + + +def test_method_named_param(): + @responses.activate + def run(): + responses.add(method=responses.GET, url="http://example.com", body="OK") + resp = requests.get("http://example.com") + assert_response(resp, "OK") + + run() + assert_reset() + + +def test_passthru_unicode(): + @responses.activate + def run(): + with responses.RequestsMock() as m: + url = "http://موقع.وزارة-الاتصالات.مصر/" + clean_url = "http://xn--4gbrim.xn----ymcbaaajlc6dj7bxne2c.xn--wgbh1c/" + m.add_passthru(url) + assert m.passthru_prefixes[0] == clean_url + + run() + assert_reset() + + +def test_custom_target(monkeypatch): + requests_mock = responses.RequestsMock(target="something.else") + std_mock_mock = responses.std_mock.MagicMock() + patch_mock = std_mock_mock.patch + monkeypatch.setattr(responses, "std_mock", std_mock_mock) + requests_mock.start() + assert len(patch_mock.call_args_list) == 1 + assert patch_mock.call_args[1]["target"] == "something.else" + + +def test_cookies_from_headers(): + text = "こんにちは/世界" + quoted_text = responses.quote(text) + expected = {"x": "a", "y": quoted_text} + headers = {"set-cookie": "; ".join(k + "=" + v for k, v in expected.items())} + cookiejar = responses._cookies_from_headers(headers) + for k, v in cookiejar.items(): + assert isinstance(v, str) + assert v == expected[k] + + +@pytest.mark.parametrize( + "url", + ( + "http://example.com", + "http://example.com/some/path", + "http://example.com/other/path/", + ), +) +def test_request_param(url): + @responses.activate + def run(): + params = {"hello": "world", "example": "params"} + responses.add( + method=responses.GET, + url="{0}?hello=world".format(url), + body="test", + match_querystring=False, + ) + resp = requests.get(url, params=params) + assert_response(resp, "test") + assert_params(resp, params) + + resp = requests.get(url) + assert_response(resp, "test") + assert_params(resp, {}) + + run() + assert_reset() + + +def test_request_param_with_multiple_values_for_the_same_key(): + @responses.activate + def run(): + url = "http://example.com" + params = {"key1": ["one", "two"], "key2": "three"} + responses.add( + method=responses.GET, + url=url, + body="test", + ) + resp = requests.get(url, params=params) + assert_response(resp, "test") + assert_params(resp, params) + + run() + assert_reset() + + +@pytest.mark.parametrize( + "url", ("http://example.com", "http://example.com?hello=world") +) +def test_assert_call_count(url): + @responses.activate + def run(): + responses.add(responses.GET, url) + responses.add(responses.GET, "http://example1.com") + + assert responses.assert_call_count(url, 0) is True + + with pytest.raises(AssertionError) as excinfo: + responses.assert_call_count(url, 2) + assert "Expected URL '{0}' to be called 2 times. Called 0 times.".format( + url + ) in str(excinfo.value) + + requests.get(url) + assert responses.assert_call_count(url, 1) is True + + requests.get("http://example1.com") + assert responses.assert_call_count(url, 1) is True + + requests.get(url) + with pytest.raises(AssertionError) as excinfo: + responses.assert_call_count(url, 3) + assert "Expected URL '{0}' to be called 3 times. Called 2 times.".format( + url + ) in str(excinfo.value) + + run() + assert_reset() + + +def test_fail_request_error(): + """ + Validate that exception is raised if request URL/Method/kwargs don't match + :return: + """ + + def run(): + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + rsps.add("POST", "http://example1.com") + rsps.add("GET", "http://example.com") + + with pytest.raises(ConnectionError) as excinfo: + requests.post("http://example.com", data={"id": "bad"}) + + msg = str(excinfo.value) + assert "- POST http://example1.com/ URL does not match" in msg + assert "- GET http://example.com/ Method does not match" in msg + + run() + assert_reset() + + +@pytest.mark.parametrize( + "response_params, expected_representation", + [ + ( + {"method": responses.GET, "url": "http://example.com/"}, + ( + "" + ), + ), + ( + { + "method": responses.POST, + "url": "http://another-domain.com/", + "content_type": "application/json", + "status": 404, + }, + ( + "" + ), + ), + ( + { + "method": responses.PUT, + "url": "http://abcd.com/", + "content_type": "text/html", + "status": 500, + "headers": {"X-Test": "foo"}, + "body": {"it_wont_be": "considered"}, + }, + ( + "" + ), + ), + ], +) +def test_response_representations(response_params, expected_representation): + response = Response(**response_params) + + assert str(response) == expected_representation + assert repr(response) == expected_representation + + +def test_mocked_responses_list_registered(): + @responses.activate + def run(): + first_response = Response( + responses.GET, + "http://example.com/", + body="", + headers={"X-Test": "foo"}, + status=404, + ) + second_response = Response( + responses.GET, "http://example.com/", body="", headers={"X-Test": "foo"} + ) + third_response = Response( + responses.POST, + "http://anotherdomain.com/", + ) + responses.add(first_response) + responses.add(second_response) + responses.add(third_response) + + mocks_list = responses.registered() + + assert mocks_list == responses.mock.registered() + assert mocks_list == [first_response, second_response, third_response] + + run() + assert_reset() + + +@pytest.mark.parametrize( + "url,other_url", + [ + ("http://service-A/foo?q=fizz", "http://service-a/foo?q=fizz"), + ("http://service-a/foo", "http://service-A/foo"), + ("http://someHost-AwAy/", "http://somehost-away/"), + ("http://fizzbuzz/foo", "http://fizzbuzz/foo"), + ], +) +def test_rfc_compliance(url, other_url): + @responses.activate + def run(): + responses.add(method=responses.GET, url=url) + resp = requests.request("GET", other_url) + assert_response(resp, "") + + run() + assert_reset() + + +def test_requests_between_add(): + @responses.activate + def run(): + responses.add(responses.GET, "https://example.com/", json={"response": "old"}) + assert requests.get("https://example.com/").content == b'{"response": "old"}' + assert requests.get("https://example.com/").content == b'{"response": "old"}' + assert requests.get("https://example.com/").content == b'{"response": "old"}' + + responses.add(responses.GET, "https://example.com/", json={"response": "new"}) + + assert requests.get("https://example.com/").content == b'{"response": "new"}' + assert requests.get("https://example.com/").content == b'{"response": "new"}' + assert requests.get("https://example.com/").content == b'{"response": "new"}' + + run() + assert_reset()