diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..78f64cfe9126bafebd549265a176a159abe72eeb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__init__.py @@ -0,0 +1,139 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ruff: noqa + +from typing import Dict, List, Optional, Type + +from .. import config +from ..utils import logging +from .formatting import ( + ArrowFormatter, + CustomFormatter, + Formatter, + PandasFormatter, + PythonFormatter, + TensorFormatter, + format_table, + query_table, +) +from .np_formatter import NumpyFormatter + + +logger = logging.get_logger(__name__) + +_FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {} +_FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {} +_FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {} + + +def _register_formatter( + formatter_cls: type, + format_type: Optional[str], + aliases: Optional[List[str]] = None, +): + """ + Register a Formatter object using a name and optional aliases. + This function must be used on a Formatter class. + """ + aliases = aliases if aliases is not None else [] + if format_type in _FORMAT_TYPES: + logger.warning( + f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" + ) + _FORMAT_TYPES[format_type] = formatter_cls + for alias in set(aliases + [format_type]): + if alias in _FORMAT_TYPES_ALIASES: + logger.warning( + f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" + ) + _FORMAT_TYPES_ALIASES[alias] = format_type + + +def _register_unavailable_formatter( + unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None +): + """ + Register an unavailable Formatter object using a name and optional aliases. + This function must be used on an Exception object that is raised when trying to get the unavailable formatter. + """ + aliases = aliases if aliases is not None else [] + for alias in set(aliases + [format_type]): + _FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error + + +# Here we define all the available formatting functions that can be used by `Dataset.set_format` +_register_formatter(PythonFormatter, None, aliases=["python"]) +_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"]) +_register_formatter(NumpyFormatter, "numpy", aliases=["np"]) +_register_formatter(PandasFormatter, "pandas", aliases=["pd"]) +_register_formatter(CustomFormatter, "custom") + +if config.POLARS_AVAILABLE: + from .polars_formatter import PolarsFormatter + + _register_formatter(PolarsFormatter, "polars", aliases=["pl"]) +else: + _polars_error = ValueError("Polars needs to be installed to be able to return Polars dataframes.") + _register_unavailable_formatter(_polars_error, "polars", aliases=["pl"]) + +if config.TORCH_AVAILABLE: + from .torch_formatter import TorchFormatter + + _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"]) +else: + _torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.") + _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"]) + +if config.TF_AVAILABLE: + from .tf_formatter import TFFormatter + + _register_formatter(TFFormatter, "tensorflow", aliases=["tf"]) +else: + _tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.") + _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"]) + +if config.JAX_AVAILABLE: + from .jax_formatter import JaxFormatter + + _register_formatter(JaxFormatter, "jax", aliases=[]) +else: + _jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.") + _register_unavailable_formatter(_jax_error, "jax", aliases=[]) + + +def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]: + """If the given format type is a known alias, then return its main type name. Otherwise return the type with no change.""" + if format_type in _FORMAT_TYPES_ALIASES: + return _FORMAT_TYPES_ALIASES[format_type] + else: + return format_type + + +def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter: + """ + Factory function to get a Formatter given its type name and keyword arguments. + A formatter is an object that extracts and formats data from pyarrow table. + It defines the formatting for rows, colums and batches. + If the formatter for a given type name doesn't exist or is not available, an error is raised. + """ + format_type = get_format_type_from_alias(format_type) + if format_type in _FORMAT_TYPES: + return _FORMAT_TYPES[format_type](**format_kwargs) + if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: + raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] + else: + raise ValueError( + f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got '{format_type}'" + ) diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33db0486933d5f1a123dbf82416583e8e986c15a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9ff9b3993ac3c8e15d4069e938ee0635c73afa5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09c7320fbafeeb4c049449129ced9d1d6935e3f3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecc65545f0b808cbe51636e5317f541353adda7a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/polars_formatter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/polars_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0467698312f93a2e28bd51633def642a8bc88acd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/polars_formatter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a79c7edafe32d6fbba57b5c1f87e55d5700e9b68 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5ca845628889afd3c11042ad083697a16478417 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/formatting/formatting.py b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..9570fb1808ca2a46b35da691c033802cd6eb601b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/formatting.py @@ -0,0 +1,653 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import operator +from collections.abc import Mapping, MutableMapping +from functools import partial + +# Lint as: python3 +from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union + +import numpy as np +import pandas as pd +import pyarrow as pa +from packaging import version + +from .. import config +from ..features import Features +from ..features.features import _ArrayXDExtensionType, _is_zero_copy_only, decode_nested_example, pandas_types_mapper +from ..table import Table +from ..utils.py_utils import no_op_if_value_is_null + + +T = TypeVar("T") + +RowFormat = TypeVar("RowFormat") +ColumnFormat = TypeVar("ColumnFormat") +BatchFormat = TypeVar("BatchFormat") + + +def _is_range_contiguous(key: range) -> bool: + return key.step == 1 and key.stop >= key.start + + +def _raise_bad_key_type(key: Any): + raise TypeError( + f"Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable." + ) + + +def _query_table_with_indices_mapping( + table: Table, key: Union[int, slice, range, str, Iterable], indices: Table +) -> pa.Table: + """ + Query a pyarrow Table to extract the subtable that correspond to the given key. + The :obj:`indices` parameter corresponds to the indices mapping in case we cant to take into + account a shuffling or an indices selection for example. + The indices table must contain one column named "indices" of type uint64. + """ + if isinstance(key, int): + key = indices.fast_slice(key % indices.num_rows, 1).column(0)[0].as_py() + return _query_table(table, key) + if isinstance(key, slice): + key = range(*key.indices(indices.num_rows)) + if isinstance(key, range): + if _is_range_contiguous(key) and key.start >= 0: + return _query_table( + table, [i.as_py() for i in indices.fast_slice(key.start, key.stop - key.start).column(0)] + ) + else: + pass # treat as an iterable + if isinstance(key, str): + table = table.select([key]) + return _query_table(table, indices.column(0).to_pylist()) + if isinstance(key, Iterable): + return _query_table(table, [indices.fast_slice(i, 1).column(0)[0].as_py() for i in key]) + + _raise_bad_key_type(key) + + +def _query_table(table: Table, key: Union[int, slice, range, str, Iterable]) -> pa.Table: + """ + Query a pyarrow Table to extract the subtable that correspond to the given key. + """ + if isinstance(key, int): + return table.fast_slice(key % table.num_rows, 1) + if isinstance(key, slice): + key = range(*key.indices(table.num_rows)) + if isinstance(key, range): + if _is_range_contiguous(key) and key.start >= 0: + return table.fast_slice(key.start, key.stop - key.start) + else: + pass # treat as an iterable + if isinstance(key, str): + return table.table.drop([column for column in table.column_names if column != key]) + if isinstance(key, Iterable): + key = np.fromiter(key, np.int64) + if len(key) == 0: + return table.table.slice(0, 0) + # don't use pyarrow.Table.take even for pyarrow >=1.0 (see https://issues.apache.org/jira/browse/ARROW-9773) + return table.fast_gather(key % table.num_rows) + + _raise_bad_key_type(key) + + +def _is_array_with_nulls(pa_array: pa.Array) -> bool: + return pa_array.null_count > 0 + + +class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]): + """ + Arrow extractor are used to extract data from pyarrow tables. + It makes it possible to extract rows, columns and batches. + These three extractions types have to be implemented. + """ + + def extract_row(self, pa_table: pa.Table) -> RowFormat: + raise NotImplementedError + + def extract_column(self, pa_table: pa.Table) -> ColumnFormat: + raise NotImplementedError + + def extract_batch(self, pa_table: pa.Table) -> BatchFormat: + raise NotImplementedError + + +def _unnest(py_dict: Dict[str, List[T]]) -> Dict[str, T]: + """Return the first element of a batch (dict) as a row (dict)""" + return {key: array[0] for key, array in py_dict.items()} + + +class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]): + def extract_row(self, pa_table: pa.Table) -> pa.Table: + return pa_table + + def extract_column(self, pa_table: pa.Table) -> pa.Array: + return pa_table.column(0) + + def extract_batch(self, pa_table: pa.Table) -> pa.Table: + return pa_table + + +class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]): + def extract_row(self, pa_table: pa.Table) -> dict: + return _unnest(pa_table.to_pydict()) + + def extract_column(self, pa_table: pa.Table) -> list: + return pa_table.column(0).to_pylist() + + def extract_batch(self, pa_table: pa.Table) -> dict: + return pa_table.to_pydict() + + +class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]): + def __init__(self, **np_array_kwargs): + self.np_array_kwargs = np_array_kwargs + + def extract_row(self, pa_table: pa.Table) -> dict: + return _unnest(self.extract_batch(pa_table)) + + def extract_column(self, pa_table: pa.Table) -> np.ndarray: + return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]]) + + def extract_batch(self, pa_table: pa.Table) -> dict: + return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names} + + def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray: + if isinstance(pa_array, pa.ChunkedArray): + if isinstance(pa_array.type, _ArrayXDExtensionType): + # don't call to_pylist() to preserve dtype of the fixed-size array + zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True) + array: List = [ + row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only) + ] + else: + zero_copy_only = _is_zero_copy_only(pa_array.type) and all( + not _is_array_with_nulls(chunk) for chunk in pa_array.chunks + ) + array: List = [ + row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only) + ] + else: + if isinstance(pa_array.type, _ArrayXDExtensionType): + # don't call to_pylist() to preserve dtype of the fixed-size array + zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True) + array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only) + else: + zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array) + array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist() + if len(array) > 0: + if any( + (isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape)) + or (isinstance(x, float) and np.isnan(x)) + for x in array + ): + return np.array(array, copy=False, dtype=object) + return np.array(array, copy=False) + + +class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]): + def extract_row(self, pa_table: pa.Table) -> pd.DataFrame: + return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper) + + def extract_column(self, pa_table: pa.Table) -> pd.Series: + return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]] + + def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame: + return pa_table.to_pandas(types_mapper=pandas_types_mapper) + + +class PythonFeaturesDecoder: + def __init__(self, features: Optional[Features]): + self.features = features + + def decode_row(self, row: dict) -> dict: + return self.features.decode_example(row) if self.features else row + + def decode_column(self, column: list, column_name: str) -> list: + return self.features.decode_column(column, column_name) if self.features else column + + def decode_batch(self, batch: dict) -> dict: + return self.features.decode_batch(batch) if self.features else batch + + +class PandasFeaturesDecoder: + def __init__(self, features: Optional[Features]): + self.features = features + + def decode_row(self, row: pd.DataFrame) -> pd.DataFrame: + decode = ( + { + column_name: no_op_if_value_is_null(partial(decode_nested_example, feature)) + for column_name, feature in self.features.items() + if self.features._column_requires_decoding[column_name] + } + if self.features + else {} + ) + if decode: + row[list(decode.keys())] = row.transform(decode) + return row + + def decode_column(self, column: pd.Series, column_name: str) -> pd.Series: + decode = ( + no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name])) + if self.features and column_name in self.features and self.features._column_requires_decoding[column_name] + else None + ) + if decode: + column = column.transform(decode) + return column + + def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame: + return self.decode_row(batch) + + +class LazyDict(MutableMapping): + """A dictionary backed by Arrow data. The values are formatted on-the-fly when accessing the dictionary.""" + + def __init__(self, pa_table: pa.Table, formatter: "Formatter"): + self.pa_table = pa_table + self.formatter = formatter + + self.data = {key: None for key in pa_table.column_names} + self.keys_to_format = set(self.data.keys()) + + def __len__(self): + return len(self.data) + + def __getitem__(self, key): + value = self.data[key] + if key in self.keys_to_format: + value = self.format(key) + self.data[key] = value + self.keys_to_format.remove(key) + return value + + def __setitem__(self, key, value): + if key in self.keys_to_format: + self.keys_to_format.remove(key) + self.data[key] = value + + def __delitem__(self, key) -> None: + if key in self.keys_to_format: + self.keys_to_format.remove(key) + del self.data[key] + + def __iter__(self): + return iter(self.data) + + def __contains__(self, key): + return key in self.data + + def __repr__(self): + self._format_all() + return repr(self.data) + + if config.PY_VERSION >= version.parse("3.9"): + # merging with the union ("|") operator is supported in Python 3.9+ + + def __or__(self, other): + if isinstance(other, LazyDict): + inst = self.copy() + other = other.copy() + other._format_all() + inst.keys_to_format -= other.data.keys() + inst.data = inst.data | other.data + return inst + if isinstance(other, dict): + inst = self.copy() + inst.keys_to_format -= other.keys() + inst.data = inst.data | other + return inst + return NotImplemented + + def __ror__(self, other): + if isinstance(other, LazyDict): + inst = self.copy() + other = other.copy() + other._format_all() + inst.keys_to_format -= other.data.keys() + inst.data = other.data | inst.data + return inst + if isinstance(other, dict): + inst = self.copy() + inst.keys_to_format -= other.keys() + inst.data = other | inst.data + return inst + return NotImplemented + + def __ior__(self, other): + if isinstance(other, LazyDict): + other = other.copy() + other._format_all() + self.keys_to_format -= other.data.keys() + self.data |= other.data + else: + self.keys_to_format -= other.keys() + self.data |= other + return self + + def __copy__(self): + # Identical to `UserDict.__copy__` + inst = self.__class__.__new__(self.__class__) + inst.__dict__.update(self.__dict__) + # Create a copy and avoid triggering descriptors + inst.__dict__["data"] = self.__dict__["data"].copy() + inst.__dict__["keys_to_format"] = self.__dict__["keys_to_format"].copy() + return inst + + def copy(self): + import copy + + return copy.copy(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + raise NotImplementedError + + def format(self, key): + raise NotImplementedError + + def _format_all(self): + for key in self.keys_to_format: + self.data[key] = self.format(key) + self.keys_to_format.clear() + + +class LazyRow(LazyDict): + def format(self, key): + return self.formatter.format_column(self.pa_table.select([key]))[0] + + +class LazyBatch(LazyDict): + def format(self, key): + return self.formatter.format_column(self.pa_table.select([key])) + + +class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]): + """ + A formatter is an object that extracts and formats data from pyarrow tables. + It defines the formatting for rows, columns and batches. + """ + + simple_arrow_extractor = SimpleArrowExtractor + python_arrow_extractor = PythonArrowExtractor + numpy_arrow_extractor = NumpyArrowExtractor + pandas_arrow_extractor = PandasArrowExtractor + + def __init__(self, features: Optional[Features] = None): + self.features = features + self.python_features_decoder = PythonFeaturesDecoder(self.features) + self.pandas_features_decoder = PandasFeaturesDecoder(self.features) + + def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]: + if query_type == "row": + return self.format_row(pa_table) + elif query_type == "column": + return self.format_column(pa_table) + elif query_type == "batch": + return self.format_batch(pa_table) + + def format_row(self, pa_table: pa.Table) -> RowFormat: + raise NotImplementedError + + def format_column(self, pa_table: pa.Table) -> ColumnFormat: + raise NotImplementedError + + def format_batch(self, pa_table: pa.Table) -> BatchFormat: + raise NotImplementedError + + +class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]): + def recursive_tensorize(self, data_struct: dict): + raise NotImplementedError + + +class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]): + def format_row(self, pa_table: pa.Table) -> pa.Table: + return self.simple_arrow_extractor().extract_row(pa_table) + + def format_column(self, pa_table: pa.Table) -> pa.Array: + return self.simple_arrow_extractor().extract_column(pa_table) + + def format_batch(self, pa_table: pa.Table) -> pa.Table: + return self.simple_arrow_extractor().extract_batch(pa_table) + + +class PythonFormatter(Formatter[Mapping, list, Mapping]): + def __init__(self, features=None, lazy=False): + super().__init__(features) + self.lazy = lazy + + def format_row(self, pa_table: pa.Table) -> Mapping: + if self.lazy: + return LazyRow(pa_table, self) + row = self.python_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return row + + def format_column(self, pa_table: pa.Table) -> list: + column = self.python_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + if self.lazy: + return LazyBatch(pa_table, self) + batch = self.python_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + return batch + + +class PandasFormatter(Formatter[pd.DataFrame, pd.Series, pd.DataFrame]): + def format_row(self, pa_table: pa.Table) -> pd.DataFrame: + row = self.pandas_arrow_extractor().extract_row(pa_table) + row = self.pandas_features_decoder.decode_row(row) + return row + + def format_column(self, pa_table: pa.Table) -> pd.Series: + column = self.pandas_arrow_extractor().extract_column(pa_table) + column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0]) + return column + + def format_batch(self, pa_table: pa.Table) -> pd.DataFrame: + row = self.pandas_arrow_extractor().extract_batch(pa_table) + row = self.pandas_features_decoder.decode_batch(row) + return row + + +class CustomFormatter(Formatter[dict, ColumnFormat, dict]): + """ + A user-defined custom formatter function defined by a ``transform``. + The transform must take as input a batch of data extracted for an arrow table using the python extractor, + and return a batch. + If the output batch is not a dict, then output_all_columns won't work. + If the ouput batch has several fields, then querying a single column won't work since we don't know which field + to return. + """ + + def __init__(self, transform: Callable[[dict], dict], features=None, **kwargs): + super().__init__(features=features) + self.transform = transform + + def format_row(self, pa_table: pa.Table) -> dict: + formatted_batch = self.format_batch(pa_table) + try: + return _unnest(formatted_batch) + except Exception as exc: + raise TypeError( + f"Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}" + ) from exc + + def format_column(self, pa_table: pa.Table) -> ColumnFormat: + formatted_batch = self.format_batch(pa_table) + if hasattr(formatted_batch, "keys"): + if len(formatted_batch.keys()) > 1: + raise TypeError( + "Tried to query a column but the custom formatting function returns too many columns. " + f"Only one column was expected but got columns {list(formatted_batch.keys())}." + ) + else: + raise TypeError( + f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}" + ) + try: + return formatted_batch[pa_table.column_names[0]] + except Exception as exc: + raise TypeError( + f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}" + ) from exc + + def format_batch(self, pa_table: pa.Table) -> dict: + batch = self.python_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + return self.transform(batch) + + +def _check_valid_column_key(key: str, columns: List[str]) -> None: + if key not in columns: + raise KeyError(f"Column {key} not in the dataset. Current columns in the dataset: {columns}") + + +def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None: + if isinstance(key, int): + if (key < 0 and key + size < 0) or (key >= size): + raise IndexError(f"Invalid key: {key} is out of bounds for size {size}") + return + elif isinstance(key, slice): + pass + elif isinstance(key, range): + if len(key) > 0: + _check_valid_index_key(max(key), size=size) + _check_valid_index_key(min(key), size=size) + elif isinstance(key, Iterable): + if len(key) > 0: + _check_valid_index_key(int(max(key)), size=size) + _check_valid_index_key(int(min(key)), size=size) + else: + _raise_bad_key_type(key) + + +def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str: + if isinstance(key, int): + return "row" + elif isinstance(key, str): + return "column" + elif isinstance(key, (slice, range, Iterable)): + return "batch" + _raise_bad_key_type(key) + + +def query_table( + table: Table, + key: Union[int, slice, range, str, Iterable], + indices: Optional[Table] = None, +) -> pa.Table: + """ + Query a Table to extract the subtable that correspond to the given key. + + Args: + table (``datasets.table.Table``): The input Table to query from + key (``Union[int, slice, range, str, Iterable]``): The key can be of different types: + - an integer i: the subtable containing only the i-th row + - a slice [i:j:k]: the subtable containing the rows that correspond to this slice + - a range(i, j, k): the subtable containing the rows that correspond to this range + - a string c: the subtable containing all the rows but only the column c + - an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable + indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows. + The indices table must contain one column named "indices" of type uint64. + This is used in case of shuffling or rows selection. + + + Returns: + ``pyarrow.Table``: the result of the query on the input table + """ + # Check if key is valid + if not isinstance(key, (int, slice, range, str, Iterable)): + try: + key = operator.index(key) + except TypeError: + _raise_bad_key_type(key) + if isinstance(key, str): + _check_valid_column_key(key, table.column_names) + else: + size = indices.num_rows if indices is not None else table.num_rows + _check_valid_index_key(key, size) + # Query the main table + if indices is None: + pa_subtable = _query_table(table, key) + else: + pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices) + return pa_subtable + + +def format_table( + table: Table, + key: Union[int, slice, range, str, Iterable], + formatter: Formatter, + format_columns: Optional[list] = None, + output_all_columns=False, +): + """ + Format a Table depending on the key that was used and a Formatter object. + + Args: + table (``datasets.table.Table``): The input Table to format + key (``Union[int, slice, range, str, Iterable]``): Depending on the key that was used, the formatter formats + the table as either a row, a column or a batch. + formatter (``datasets.formatting.formatting.Formatter``): Any subclass of a Formatter such as + PythonFormatter, NumpyFormatter, etc. + format_columns (:obj:`List[str]`, optional): if not None, it defines the columns that will be formatted using the + given formatter. Other columns are discarded (unless ``output_all_columns`` is True) + output_all_columns (:obj:`bool`, defaults to False). If True, the formatted output is completed using the columns + that are not in the ``format_columns`` list. For these columns, the PythonFormatter is used. + + + Returns: + A row, column or batch formatted object defined by the Formatter: + - the PythonFormatter returns a dictionary for a row or a batch, and a list for a column. + - the NumpyFormatter returns a dictionary for a row or a batch, and a np.array for a column. + - the PandasFormatter returns a pd.DataFrame for a row or a batch, and a pd.Series for a column. + - the TorchFormatter returns a dictionary for a row or a batch, and a torch.Tensor for a column. + - the TFFormatter returns a dictionary for a row or a batch, and a tf.Tensor for a column. + """ + if isinstance(table, Table): + pa_table = table.table + else: + pa_table = table + query_type = key_to_query_type(key) + python_formatter = PythonFormatter(features=formatter.features) + if format_columns is None: + return formatter(pa_table, query_type=query_type) + elif query_type == "column": + if key in format_columns: + return formatter(pa_table, query_type) + else: + return python_formatter(pa_table, query_type=query_type) + else: + pa_table_to_format = pa_table.drop(col for col in pa_table.column_names if col not in format_columns) + formatted_output = formatter(pa_table_to_format, query_type=query_type) + if output_all_columns: + if isinstance(formatted_output, MutableMapping): + pa_table_with_remaining_columns = pa_table.drop( + col for col in pa_table.column_names if col in format_columns + ) + remaining_columns_dict = python_formatter(pa_table_with_remaining_columns, query_type=query_type) + formatted_output.update(remaining_columns_dict) + else: + raise TypeError( + f"Custom formatting function must return a dict to work with output_all_columns=True, but got {formatted_output}" + ) + return formatted_output diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..8035341c5cd2794345163b388945b3a092708916 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py @@ -0,0 +1,160 @@ +# Copyright 2021 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING, Dict, Optional + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.logging import get_logger +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +if TYPE_CHECKING: + import jax + import jaxlib + +logger = get_logger() + +DEVICE_MAPPING: Optional[dict] = None + + +class JaxFormatter(TensorFormatter[Mapping, "jax.Array", Mapping]): + def __init__(self, features=None, device=None, **jnp_array_kwargs): + super().__init__(features=features) + import jax + from jaxlib.xla_client import Device + + if isinstance(device, Device): + raise ValueError( + f"Expected {device} to be a `str` not {type(device)}, as `jaxlib.xla_extension.Device` " + "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " + "the device with `str()` to get its string identifier that will be internally mapped " + "to the actual `jaxlib.xla_extension.Device`." + ) + self.device = device if isinstance(device, str) else str(jax.devices()[0]) + # using global variable since `jaxlib.xla_extension.Device` is not serializable neither + # with `pickle` nor with `dill`, so we need to use a global variable instead + global DEVICE_MAPPING + if DEVICE_MAPPING is None: + DEVICE_MAPPING = self._map_devices_to_str() + if self.device not in list(DEVICE_MAPPING.keys()): + logger.warning( + f"Device with string identifier {self.device} not listed among the available " + f"devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default " + f"device: {str(jax.devices()[0])}." + ) + self.device = str(jax.devices()[0]) + self.jnp_array_kwargs = jnp_array_kwargs + + @staticmethod + def _map_devices_to_str() -> Dict[str, "jaxlib.xla_extension.Device"]: + import jax + + return {str(device): device for device in jax.devices()} + + def _consolidate(self, column): + import jax + import jax.numpy as jnp + + if isinstance(column, list) and column: + if all( + isinstance(x, jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column + ): + return jnp.stack(column, axis=0) + return column + + def _tensorize(self, value): + import jax + import jax.numpy as jnp + + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value.tolist() + + default_dtype = {} + + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + # the default int precision depends on the jax config + # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision + if jax.config.jax_enable_x64: + default_dtype = {"dtype": jnp.int64} + else: + default_dtype = {"dtype": jnp.int32} + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": jnp.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + + # using global variable since `jaxlib.xla_extension.Device` is not serializable neither + # with `pickle` nor with `dill`, so we need to use a global variable instead + global DEVICE_MAPPING + if DEVICE_MAPPING is None: + DEVICE_MAPPING = self._map_devices_to_str() + + with jax.default_device(DEVICE_MAPPING[self.device]): + # calling jnp.array on a np.ndarray does copy the data + # see https://github.com/google/jax/issues/4486 + return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs}) + + def _recursive_tensorize(self, data_struct): + import jax + + # support for torch, tf, jax etc. + if config.TORCH_AVAILABLE and "torch" in sys.modules: + import torch + + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, "__array__") and not isinstance(data_struct, jax.Array): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> "jax.Array": + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/formatting/np_formatter.py b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/np_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..95bcff2b51728fdd9647dad382639724df163ce2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/np_formatter.py @@ -0,0 +1,106 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from collections.abc import Mapping + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]): + def __init__(self, features=None, **np_array_kwargs): + super().__init__(features=features) + self.np_array_kwargs = np_array_kwargs + + def _consolidate(self, column): + if isinstance(column, list): + if column and all( + isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column + ): + return np.stack(column) + else: + # don't use np.array(column, dtype=object) + # since it fails in certain cases + # see https://stackoverflow.com/q/51005699 + out = np.empty(len(column), dtype=object) + out[:] = column + return out + return column + + def _tensorize(self, value): + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value + elif isinstance(value, np.number): + return value + + default_dtype = {} + + if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer): + default_dtype = {"dtype": np.int64} + elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": np.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + return np.asarray(value, **self.np_array_kwargs) + + return np.asarray(value, **{**default_dtype, **self.np_array_kwargs}) + + def _recursive_tensorize(self, data_struct): + # support for torch, tf, jax etc. + if config.TORCH_AVAILABLE and "torch" in sys.modules: + import torch + + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, "__array__") and not isinstance(data_struct, (np.ndarray, np.character, np.number)): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + if isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> np.ndarray: + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/formatting/polars_formatter.py b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/polars_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..543bde52dd0fb29f2732bce5ee2edcdf9f253109 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/polars_formatter.py @@ -0,0 +1,122 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from collections.abc import Mapping +from functools import partial +from typing import TYPE_CHECKING, Optional + +import pyarrow as pa + +from .. import config +from ..features import Features +from ..features.features import decode_nested_example +from ..utils.py_utils import no_op_if_value_is_null +from .formatting import BaseArrowExtractor, TensorFormatter + + +if TYPE_CHECKING: + import polars as pl + + +class PolarsArrowExtractor(BaseArrowExtractor["pl.DataFrame", "pl.Series", "pl.DataFrame"]): + def extract_row(self, pa_table: pa.Table) -> "pl.DataFrame": + if config.POLARS_AVAILABLE: + if "polars" not in sys.modules: + import polars + else: + polars = sys.modules["polars"] + + return polars.from_arrow(pa_table.slice(length=1)) + else: + raise ValueError("Polars needs to be installed to be able to return Polars dataframes.") + + def extract_column(self, pa_table: pa.Table) -> "pl.Series": + if config.POLARS_AVAILABLE: + if "polars" not in sys.modules: + import polars + else: + polars = sys.modules["polars"] + + return polars.from_arrow(pa_table.select([0]))[pa_table.column_names[0]] + else: + raise ValueError("Polars needs to be installed to be able to return Polars dataframes.") + + def extract_batch(self, pa_table: pa.Table) -> "pl.DataFrame": + if config.POLARS_AVAILABLE: + if "polars" not in sys.modules: + import polars + else: + polars = sys.modules["polars"] + + return polars.from_arrow(pa_table) + else: + raise ValueError("Polars needs to be installed to be able to return Polars dataframes.") + + +class PolarsFeaturesDecoder: + def __init__(self, features: Optional[Features]): + self.features = features + import polars as pl # noqa: F401 - import pl at initialization + + def decode_row(self, row: "pl.DataFrame") -> "pl.DataFrame": + decode = ( + { + column_name: no_op_if_value_is_null(partial(decode_nested_example, feature)) + for column_name, feature in self.features.items() + if self.features._column_requires_decoding[column_name] + } + if self.features + else {} + ) + if decode: + row[list(decode.keys())] = row.map_rows(decode) + return row + + def decode_column(self, column: "pl.Series", column_name: str) -> "pl.Series": + decode = ( + no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name])) + if self.features and column_name in self.features and self.features._column_requires_decoding[column_name] + else None + ) + if decode: + column = column.map_elements(decode) + return column + + def decode_batch(self, batch: "pl.DataFrame") -> "pl.DataFrame": + return self.decode_row(batch) + + +class PolarsFormatter(TensorFormatter[Mapping, "pl.DataFrame", Mapping]): + def __init__(self, features=None, **np_array_kwargs): + super().__init__(features=features) + self.np_array_kwargs = np_array_kwargs + self.polars_arrow_extractor = PolarsArrowExtractor + self.polars_features_decoder = PolarsFeaturesDecoder(features) + import polars as pl # noqa: F401 - import pl at initialization + + def format_row(self, pa_table: pa.Table) -> "pl.DataFrame": + row = self.polars_arrow_extractor().extract_row(pa_table) + row = self.polars_features_decoder.decode_row(row) + return row + + def format_column(self, pa_table: pa.Table) -> "pl.Series": + column = self.polars_arrow_extractor().extract_column(pa_table) + column = self.polars_features_decoder.decode_column(column, pa_table.column_names[0]) + return column + + def format_batch(self, pa_table: pa.Table) -> "pl.DataFrame": + row = self.polars_arrow_extractor().extract_batch(pa_table) + row = self.polars_features_decoder.decode_batch(row) + return row diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..adb15cda3815d77fa0272562e83fda029d1babee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py @@ -0,0 +1,115 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +if TYPE_CHECKING: + import tensorflow as tf + + +class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]): + def __init__(self, features=None, **tf_tensor_kwargs): + super().__init__(features=features) + self.tf_tensor_kwargs = tf_tensor_kwargs + import tensorflow as tf # noqa: F401 - import tf at initialization + + def _consolidate(self, column): + import tensorflow as tf + + if isinstance(column, list) and column: + if all( + isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column + ): + return tf.stack(column) + elif all( + isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype + for x in column + ): + # only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated + return tf.ragged.stack(column) + + return column + + def _tensorize(self, value): + import tensorflow as tf + + if value is None: + return value + + default_dtype = {} + + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + default_dtype = {"dtype": tf.int64} + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": tf.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + + return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs}) + + def _recursive_tensorize(self, data_struct): + import tensorflow as tf + + # support for torch, tf, jax etc. + if config.TORCH_AVAILABLE and "torch" in sys.modules: + import torch + + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> "tf.Tensor": + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..8efe759a1443a74b94d59fe38944f6527ac18cf7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py @@ -0,0 +1,115 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING + +import numpy as np +import pyarrow as pa + +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + + +if TYPE_CHECKING: + import torch + + +class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]): + def __init__(self, features=None, **torch_tensor_kwargs): + super().__init__(features=features) + self.torch_tensor_kwargs = torch_tensor_kwargs + import torch # noqa import torch at initialization + + def _consolidate(self, column): + import torch + + if isinstance(column, list) and column: + if all( + isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype + for x in column + ): + return torch.stack(column) + return column + + def _tensorize(self, value): + import torch + + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value.tolist() + + default_dtype = {} + + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + default_dtype = {"dtype": torch.int64} + + # Convert dtype to np.int64 if it's either np.uint16 or np.uint32 to ensure compatibility. + # np.uint64 is excluded from this conversion as there is no compatible PyTorch dtype that can handle it without loss. + if value.dtype in [np.uint16, np.uint32]: + value = value.astype(np.int64) + + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {"dtype": torch.float32} + elif config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + if value.ndim == 2: + value = value[:, :, np.newaxis] + + value = value.transpose((2, 0, 1)) + return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs}) + + def _recursive_tensorize(self, data_struct): + import torch + + # support for torch, tf, jax etc. + if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor): + data_struct = data_struct.__array__() + # support for nested types like struct of list of struct + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> "torch.Tensor": + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32ff9f57f8489d8d6515407dcb9b9ba336ee4b2b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/automatic_speech_recognition.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/automatic_speech_recognition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57a868704e2f0a05df44f0849f2bea4bf87bca2b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/automatic_speech_recognition.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5dac3d81a10e6f2c0fe8dff7b6898986fc884ef Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/question_answering.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bb675b9e6ccfd2c27145e1c79efffd178e23b53 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/question_answering.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/text_classification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/text_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7739417e1dbe0338c06ef708604bec73fc085288 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/text_classification.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a418d120c52ed5313857febc59dab299be370b69 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/_dataset_viewer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/_dataset_viewer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be6960d3ea718eab38b739a6e7f30b256b3ca17b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/_dataset_viewer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/cache.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1aec0a18ac61cf32e672d172b417a3871cbb540 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/cache.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/deprecation_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/deprecation_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10281aaf946379130b48c175954b823a846ba594 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/deprecation_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/doc_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/doc_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c751b46e5fb4b0ee3286ce315dbca536c23ab471 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/doc_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/hub.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/hub.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8d994581ac812892b0428881ae8435d0047d66b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/hub.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/logging.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e41bd24817a5e3f6ba4698bce2c7006354e45f2c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/logging.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/readme.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/readme.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5e8983b0ca96f6c19aa4354ec84a8d339d250a7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/readme.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/tqdm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/tqdm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3569bc1b97e4f6e4e5bb2428939527ee99454b58 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/tqdm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/version.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b88c75fe943fc30716afa1295b59f806b4f6e321 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/version.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..8727172ae058e56805bd8ed0f988b6788711dcfd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/LICENSE @@ -0,0 +1,13 @@ + Copyright 2016 Andrew Svetlov and aio-libs contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..9d9b4a72123d46871134b92b629443644d098ff3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/METADATA @@ -0,0 +1,132 @@ +Metadata-Version: 2.1 +Name: multidict +Version: 6.0.5 +Summary: multidict implementation +Home-page: https://github.com/aio-libs/multidict +Author: Andrew Svetlov +Author-email: andrew.svetlov@gmail.com +License: Apache 2 +Project-URL: Chat: Gitter, https://gitter.im/aio-libs/Lobby +Project-URL: CI: GitHub, https://github.com/aio-libs/multidict/actions +Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/multidict +Project-URL: Docs: RTD, https://multidict.aio-libs.org +Project-URL: GitHub: issues, https://github.com/aio-libs/multidict/issues +Project-URL: GitHub: repo, https://github.com/aio-libs/multidict +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE + +========= +multidict +========= + +.. image:: https://github.com/aio-libs/multidict/workflows/CI/badge.svg + :target: https://github.com/aio-libs/multidict/actions?query=workflow%3ACI + :alt: GitHub status for master branch + +.. image:: https://codecov.io/gh/aio-libs/multidict/branch/master/graph/badge.svg + :target: https://codecov.io/gh/aio-libs/multidict + :alt: Coverage metrics + +.. image:: https://img.shields.io/pypi/v/multidict.svg + :target: https://pypi.org/project/multidict + :alt: PyPI + +.. image:: https://readthedocs.org/projects/multidict/badge/?version=latest + :target: http://multidict.aio-libs.org/en/latest/?badge=latest + :alt: Documentation + +.. image:: https://img.shields.io/pypi/pyversions/multidict.svg + :target: https://pypi.org/project/multidict + :alt: Python versions + +.. image:: https://badges.gitter.im/Join%20Chat.svg + :target: https://gitter.im/aio-libs/Lobby + :alt: Chat on Gitter + +Multidict is dict-like collection of *key-value pairs* where key +might occur more than once in the container. + +Introduction +------------ + +*HTTP Headers* and *URL query string* require specific data structure: +*multidict*. It behaves mostly like a regular ``dict`` but it may have +several *values* for the same *key* and *preserves insertion ordering*. + +The *key* is ``str`` (or ``istr`` for case-insensitive dictionaries). + +``multidict`` has four multidict classes: +``MultiDict``, ``MultiDictProxy``, ``CIMultiDict`` +and ``CIMultiDictProxy``. + +Immutable proxies (``MultiDictProxy`` and +``CIMultiDictProxy``) provide a dynamic view for the +proxied multidict, the view reflects underlying collection changes. They +implement the ``collections.abc.Mapping`` interface. + +Regular mutable (``MultiDict`` and ``CIMultiDict``) classes +implement ``collections.abc.MutableMapping`` and allows them to change +their own content. + + +*Case insensitive* (``CIMultiDict`` and +``CIMultiDictProxy``) assume the *keys* are case +insensitive, e.g.:: + + >>> dct = CIMultiDict(key='val') + >>> 'Key' in dct + True + >>> dct['Key'] + 'val' + +*Keys* should be ``str`` or ``istr`` instances. + +The library has optional C Extensions for speed. + + +License +------- + +Apache 2 + +Library Installation +-------------------- + +.. code-block:: bash + + $ pip install multidict + +The library is Python 3 only! + +PyPI contains binary wheels for Linux, Windows and MacOS. If you want to install +``multidict`` on another operating system (or *Alpine Linux* inside a Docker) the +tarball will be used to compile the library from source. It requires a C compiler and +Python headers to be installed. + +To skip the compilation, please use the `MULTIDICT_NO_EXTENSIONS` environment variable, +e.g.: + +.. code-block:: bash + + $ MULTIDICT_NO_EXTENSIONS=1 pip install multidict + +Please note, the pure Python (uncompiled) version is about 20-50 times slower depending on +the usage scenario!!! + + + +Changelog +--------- +See `RTD page `_. diff --git a/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..53707d7781d1904d894269cb78ce0b7ee4af36f7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/RECORD @@ -0,0 +1,19 @@ +multidict-6.0.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +multidict-6.0.5.dist-info/LICENSE,sha256=k9Ealo4vDzY3PECBH_bSDhc_WMPKtYhM1mF7v9eVSSo,611 +multidict-6.0.5.dist-info/METADATA,sha256=fGbYCQYEMcDtxEz2H6GLf1np9JtMhNTaLVzgAhsQYzU,4214 +multidict-6.0.5.dist-info/RECORD,, +multidict-6.0.5.dist-info/WHEEL,sha256=1FEjxEYgybphwh9S0FO9IcZ0B-NIeM2ko8OzhFZeOeQ,152 +multidict-6.0.5.dist-info/top_level.txt,sha256=-euDElkk5_qkmfIJ7WiqCab02ZlSFZWynejKg59qZQQ,10 +multidict/__init__.py,sha256=psbRrP64CD22Wjoc_OoqG9QlkRGcaZfOFCoPmoUiMig,928 +multidict/__init__.pyi,sha256=SbgC2ew1NvNXWlRKs9o0KhW4moozgMqgQ0OA4Re5JQQ,4840 +multidict/__pycache__/__init__.cpython-310.pyc,, +multidict/__pycache__/_abc.cpython-310.pyc,, +multidict/__pycache__/_compat.cpython-310.pyc,, +multidict/__pycache__/_multidict_base.cpython-310.pyc,, +multidict/__pycache__/_multidict_py.cpython-310.pyc,, +multidict/_abc.py,sha256=Zvnrn4SBkrv4QTD7-ZzqNcoxw0f8KStLMPzGvBuGT2w,1190 +multidict/_compat.py,sha256=tjUGdP9ooiH6c2KJrvUbPRwcvjWerKlKU6InIviwh7w,316 +multidict/_multidict.cpython-310-x86_64-linux-gnu.so,sha256=BmNKiShRM0HlwDp_fjuenfzDjBlo6C0BEfpi55TRp-k,394656 +multidict/_multidict_base.py,sha256=XugkE78fXBmtzDdg2Yi9TrEhDexmL-6qJbFIG0viLMg,3791 +multidict/_multidict_py.py,sha256=57h4sYrRIu7EjMX4YpHVIZVrV9-q1KCW3F6rao10D3U,15050 +multidict/py.typed,sha256=e9bmbH3UFxsabQrnNFPG9qxIXztwbcM6IKDYnvZwprY,15 diff --git a/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..1d812513305907d2ee59b95d161fdb54d1ab559c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_17_x86_64 +Tag: cp310-cp310-manylinux2014_x86_64 + diff --git a/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/COPYING b/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..17f34bc3d8ae0889ae327ae0c16bf78870c41527 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/COPYING @@ -0,0 +1,28 @@ +Copyright (c) 2006-2008, R Oudkerk + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. Neither the name of author nor the names of any contributors may be + used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. diff --git a/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..0f46bc0edd00d1950d98fec0f78e4366691391d5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/LICENSE @@ -0,0 +1,38 @@ +Copyright (c) 2008-2016 California Institute of Technology. +Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +All rights reserved. + +This software forks the python package "multiprocessing". Licence and +copyright information for multiprocessing can be found in "COPYING". + +This software is available subject to the conditions and terms laid +out below. By downloading and using this software you are agreeing +to the following conditions. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + - Neither the names of the copyright holders nor the names of any of + the contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..1e8d30dd497e6ac867b1f7f11f79ac2704525be0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/METADATA @@ -0,0 +1,203 @@ +Metadata-Version: 2.1 +Name: multiprocess +Version: 0.70.16 +Summary: better multiprocessing and multithreading in Python +Home-page: https://github.com/uqfoundation/multiprocess +Download-URL: https://pypi.org/project/multiprocess/#files +Author: Mike McKerns +Author-email: mmckerns@uqfoundation.org +Maintainer: Mike McKerns +Maintainer-email: mmckerns@uqfoundation.org +License: BSD-3-Clause +Project-URL: Documentation, http://multiprocess.rtfd.io +Project-URL: Source Code, https://github.com/uqfoundation/multiprocess +Project-URL: Bug Tracker, https://github.com/uqfoundation/multiprocess/issues +Platform: Linux +Platform: Windows +Platform: Mac +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Software Development +Requires-Python: >=3.8 +License-File: LICENSE +License-File: COPYING +Requires-Dist: dill (>=0.3.8) + +----------------------------------------------------------------- +multiprocess: better multiprocessing and multithreading in Python +----------------------------------------------------------------- + +About Multiprocess +================== + +``multiprocess`` is a fork of ``multiprocessing``. ``multiprocess`` extends ``multiprocessing`` to provide enhanced serialization, using `dill`. ``multiprocess`` leverages ``multiprocessing`` to support the spawning of processes using the API of the Python standard library's ``threading`` module. ``multiprocessing`` has been distributed as part of the standard library since Python 2.6. + +``multiprocess`` is part of ``pathos``, a Python framework for heterogeneous computing. +``multiprocess`` is in active development, so any user feedback, bug reports, comments, +or suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/multiprocess/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query. + + +Major Features +============== + +``multiprocess`` enables: + + - objects to be transferred between processes using pipes or multi-producer/multi-consumer queues + - objects to be shared between processes using a server process or (for simple data) shared memory + +``multiprocess`` provides: + + - equivalents of all the synchronization primitives in ``threading`` + - a ``Pool`` class to facilitate submitting tasks to worker processes + - enhanced serialization, using ``dill`` + + +Current Release +=============== + +The latest released version of ``multiprocess`` is available from: + + https://pypi.org/project/multiprocess + +``multiprocess`` is distributed under a 3-clause BSD license, and is a fork of ``multiprocessing``. + + +Development Version +=================== + +You can get the latest development version with all the shiny new features at: + + https://github.com/uqfoundation + +If you have a new contribution, please submit a pull request. + + +Installation +============ + +``multiprocess`` can be installed with ``pip``:: + + $ pip install multiprocess + +For Python 2, a C compiler is required to build the included extension module from source. Python 3 and binary installs do not require a C compiler. + + +Requirements +============ + +``multiprocess`` requires: + + - ``python`` (or ``pypy``), **>=3.8** + - ``setuptools``, **>=42** + - ``dill``, **>=0.3.8** + + +Basic Usage +=========== + +The ``multiprocess.Process`` class follows the API of ``threading.Thread``. +For example :: + + from multiprocess import Process, Queue + + def f(q): + q.put('hello world') + + if __name__ == '__main__': + q = Queue() + p = Process(target=f, args=[q]) + p.start() + print (q.get()) + p.join() + +Synchronization primitives like locks, semaphores and conditions are +available, for example :: + + >>> from multiprocess import Condition + >>> c = Condition() + >>> print (c) + ), 0> + >>> c.acquire() + True + >>> print (c) + ), 0> + +One can also use a manager to create shared objects either in shared +memory or in a server process, for example :: + + >>> from multiprocess import Manager + >>> manager = Manager() + >>> l = manager.list(range(10)) + >>> l.reverse() + >>> print (l) + [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + >>> print (repr(l)) + + +Tasks can be offloaded to a pool of worker processes in various ways, +for example :: + + >>> from multiprocess import Pool + >>> def f(x): return x*x + ... + >>> p = Pool(4) + >>> result = p.map_async(f, range(10)) + >>> print (result.get(timeout=1)) + [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] + +When ``dill`` is installed, serialization is extended to most objects, +for example :: + + >>> from multiprocess import Pool + >>> p = Pool(4) + >>> print (p.map(lambda x: (lambda y:y**2)(x) + x, xrange(10))) + [0, 2, 6, 12, 20, 30, 42, 56, 72, 90] + + +More Information +================ + +Probably the best way to get started is to look at the documentation at +http://multiprocess.rtfd.io. Also see ``multiprocess.tests`` for scripts that +demonstrate how ``multiprocess`` can be used to leverge multiple processes +to execute Python in parallel. You can run the test suite with +``python -m multiprocess.tests``. As ``multiprocess`` conforms to the +``multiprocessing`` interface, the examples and documentation found at +http://docs.python.org/library/multiprocessing.html also apply to +``multiprocess`` if one will ``import multiprocessing as multiprocess``. +See https://github.com/uqfoundation/multiprocess/tree/master/py3.12/examples +for a set of examples that demonstrate some basic use cases and benchmarking +for running Python code in parallel. Please feel free to submit a ticket on +github, or ask a question on stackoverflow (**@Mike McKerns**). If you would +like to share how you use ``multiprocess`` in your work, please send an email +(to **mmckerns at uqfoundation dot org**). + + +Citation +======== + +If you use ``multiprocess`` to do research that leads to publication, we ask that you +acknowledge use of ``multiprocess`` by citing the following in your publication:: + + M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis, + "Building a framework for predictive science", Proceedings of + the 10th Python in Science Conference, 2011; + http://arxiv.org/pdf/1202.1056 + + Michael McKerns and Michael Aivazis, + "pathos: a framework for heterogeneous computing", 2010- ; + https://uqfoundation.github.io/project/pathos + +Please see https://uqfoundation.github.io/project/pathos or +http://arxiv.org/pdf/1202.1056 for further information. diff --git a/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..f9b6e322f58b95c8384ea9203fc06bd289e0c564 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/RECORD @@ -0,0 +1,73 @@ +_multiprocess/__init__.py,sha256=zX5_h36TGSL0brHRtBvCL5E59ccW7yjL79i-Y399ODM,321 +_multiprocess/__pycache__/__init__.cpython-310.pyc,, +multiprocess-0.70.16.dist-info/COPYING,sha256=n3_yfLkw0sMgLuB-PS1hRvTeZ20GmjPaMWbJjNuoOpU,1493 +multiprocess-0.70.16.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +multiprocess-0.70.16.dist-info/LICENSE,sha256=6XUJedJKg2dhI98BD3PMtVtZvRFT-oGczkOr5B4tEEA,1930 +multiprocess-0.70.16.dist-info/METADATA,sha256=Sv2eH2CjjyjVYaryTKqHkbJTlxlVA-SbmziCgkBJeQ0,7151 +multiprocess-0.70.16.dist-info/RECORD,, +multiprocess-0.70.16.dist-info/WHEEL,sha256=KxatxaZA14OswIJTdImHhiM2tdZgU-xLZEzs-sYveVc,94 +multiprocess-0.70.16.dist-info/top_level.txt,sha256=qtJc8GNdvi6suNpISX0Myln9AXJBYrNuas1MCqRPPqg,27 +multiprocess/__info__.py,sha256=84TUBn1oJMNpbVvXKs0lKyfLYaZvRr-ZVh1zHM9VeCY,7997 +multiprocess/__init__.py,sha256=XWUBDGorUkDW04h64xe51pUV9N5gzvSDj3tNT2ekifw,1856 +multiprocess/__pycache__/__info__.cpython-310.pyc,, +multiprocess/__pycache__/__init__.cpython-310.pyc,, +multiprocess/__pycache__/connection.cpython-310.pyc,, +multiprocess/__pycache__/context.cpython-310.pyc,, +multiprocess/__pycache__/forkserver.cpython-310.pyc,, +multiprocess/__pycache__/heap.cpython-310.pyc,, +multiprocess/__pycache__/managers.cpython-310.pyc,, +multiprocess/__pycache__/pool.cpython-310.pyc,, +multiprocess/__pycache__/popen_fork.cpython-310.pyc,, +multiprocess/__pycache__/popen_forkserver.cpython-310.pyc,, +multiprocess/__pycache__/popen_spawn_posix.cpython-310.pyc,, +multiprocess/__pycache__/popen_spawn_win32.cpython-310.pyc,, +multiprocess/__pycache__/process.cpython-310.pyc,, +multiprocess/__pycache__/queues.cpython-310.pyc,, +multiprocess/__pycache__/reduction.cpython-310.pyc,, +multiprocess/__pycache__/resource_sharer.cpython-310.pyc,, +multiprocess/__pycache__/resource_tracker.cpython-310.pyc,, +multiprocess/__pycache__/shared_memory.cpython-310.pyc,, +multiprocess/__pycache__/sharedctypes.cpython-310.pyc,, +multiprocess/__pycache__/spawn.cpython-310.pyc,, +multiprocess/__pycache__/synchronize.cpython-310.pyc,, +multiprocess/__pycache__/util.cpython-310.pyc,, +multiprocess/connection.py,sha256=TO9BbLVlLVjTjr0fP7lIumBgiLwaFVnpqMBgFG6iL9s,31843 +multiprocess/context.py,sha256=2fYvgfnu3B8wj8UyNndHUHgeuVDoVxlkFFKryycstaU,11610 +multiprocess/dummy/__init__.py,sha256=kSekDqD_NCy0FDg7XnxZSgW-Ldg1_iRr07sNwDajKpA,3061 +multiprocess/dummy/__pycache__/__init__.cpython-310.pyc,, +multiprocess/dummy/__pycache__/connection.cpython-310.pyc,, +multiprocess/dummy/connection.py,sha256=1j3Rl5_enBM-_kMO6HDmum3kPAoFE4Zs485HV5H-V6s,1598 +multiprocess/forkserver.py,sha256=hiltKfLImDYJyAcezNAgMDaQznB2LtYWgwre0QroLRg,12138 +multiprocess/heap.py,sha256=9rt5u5m5rkhJNfDWiCLpYDoWIt0LbElmx52yMqk7phQ,11626 +multiprocess/managers.py,sha256=Y5m_aCdLE4mSCuyVrwMWg5Nh9f4OdSHDlSajyOgyGao,47562 +multiprocess/pool.py,sha256=FTmtfoqkuN8Dd48f5TgdkokoxYN75xcnR78Hw-bLSng,32759 +multiprocess/popen_fork.py,sha256=Nvq5vVId24UfkOQxXhxZbcXuo8d6YMc409yRXAamTd0,2374 +multiprocess/popen_forkserver.py,sha256=SrEbV8Wv0Uu_UegkaW-cayXRdjTGXr560Yyy90pj-yE,2227 +multiprocess/popen_spawn_posix.py,sha256=l7XSWqR5UWiUSJh35qeSElLuNfUeEYwvH5HzKRnnyqg,2029 +multiprocess/popen_spawn_win32.py,sha256=A9uvlPmhO8JBzNcEU_Gmix2Q_qYJW1NXZgXPwtN5Ao0,4011 +multiprocess/process.py,sha256=GIIo2NiBsX1r_m0J1TcnbdeSulGLWHElRCuYRkkdgQ4,12083 +multiprocess/queues.py,sha256=sgXCXnIOVrPScqI3lwRD9t3IshqIBMEksLtouPH9Nzc,12139 +multiprocess/reduction.py,sha256=NQQ6KbDhmuAyaDeWaIarTZQokGPhcFda1poNnPm5uNc,9637 +multiprocess/resource_sharer.py,sha256=nEApLhMQqd8KunfaNKl3n8vdeiCGPxKrSL1Ja0nNAEk,5132 +multiprocess/resource_tracker.py,sha256=_D2iX4IWRe3dOwLoLjfCnXNbDAM4pRzA8qEMTcRfutw,9056 +multiprocess/shared_memory.py,sha256=UTAecHECIOHElP9Tg6yURCo4pKZiLy65TkASjEXeGus,18458 +multiprocess/sharedctypes.py,sha256=d-9SKRJHRlJJC331IxEoWOUXIeY9zxCbhWejXOmzGw0,6306 +multiprocess/spawn.py,sha256=cgtV66HhV_yIVzvdblc8bVdSpem16Ks0BOFu_bV5PDQ,9293 +multiprocess/synchronize.py,sha256=6q1ijwWyWLWLO8uUtaYT9MKepAYKfdzWPSEZGyJFP4s,11829 +multiprocess/tests/__init__.py,sha256=k00IjwhAUV_O1bp81895vN1gLnFzBM3iM-QTn5VrQnU,199087 +multiprocess/tests/__main__.py,sha256=RauIRQrO0HwRq_clLqbBk4gwo5Xw3-ASLuC029XaHeA,912 +multiprocess/tests/__pycache__/__init__.cpython-310.pyc,, +multiprocess/tests/__pycache__/__main__.cpython-310.pyc,, +multiprocess/tests/__pycache__/mp_fork_bomb.cpython-310.pyc,, +multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc,, +multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc,, +multiprocess/tests/__pycache__/test_multiprocessing_forkserver.cpython-310.pyc,, +multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc,, +multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc,, +multiprocess/tests/mp_fork_bomb.py,sha256=6ADOEzh1aXHZ21aOGoBPhKcgB5sj15G9tQVgSc6GrlY,448 +multiprocess/tests/mp_preload.py,sha256=1-WvLFMaPoH-vZbpUaJvvZHFxTpA9tgmct2vblQy99M,365 +multiprocess/tests/test_multiprocessing_fork.py,sha256=ue1SQLJFxm1oc_3F2gR_WRtt39jhaj0l_Ht6Y1MBmFo,476 +multiprocess/tests/test_multiprocessing_forkserver.py,sha256=VFlUuZI60gyRbNxfHWDlgmy3zm-dPTldLWuKQZ8KObs,391 +multiprocess/tests/test_multiprocessing_main_handling.py,sha256=mtmN0K-spqZCcZVNLf_HrhP186-knpY6eaoFonL1U4U,12018 +multiprocess/tests/test_multiprocessing_spawn.py,sha256=2UAisJX58GZCbYuDFay_x97R9akhzzjIA4VuUUzITOY,276 +multiprocess/util.py,sha256=OPI3CZ34BNwwwa7AqW-eGhnuSUsu-ozy2NRU8BYKuwg,14012 diff --git a/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..6d6b3dada5abf9570af11d88980a1299e01af0ad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py310-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..d547cb06bfcaf32e902f6d3c00ec331470ab2f71 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_multiprocess +multiprocess diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/__init__.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..609501c8c5930a400204046b9dfa63a8132e9086 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/__init__.py @@ -0,0 +1,133 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from dataproperty import LineBreakHandling + +from .__version__ import __author__, __copyright__, __email__, __license__, __version__ +from ._factory import TableWriterFactory +from ._function import dumps_tabledata +from ._logger import set_logger +from ._table_format import FormatAttr, TableFormat +from .error import ( + EmptyTableDataError, + EmptyTableNameError, + EmptyValueError, + NotSupportedError, + WriterNotFoundError, +) +from .style import Align, Format +from .typehint import ( + Bool, + DateTime, + Dictionary, + Infinity, + Integer, + IpAddress, + List, + Nan, + NoneType, + NullString, + RealNumber, + String, +) +from .writer import ( + AbstractTableWriter, + AsciiDocTableWriter, + BoldUnicodeTableWriter, + BorderlessTableWriter, + CssTableWriter, + CsvTableWriter, + ElasticsearchWriter, + ExcelXlsTableWriter, + ExcelXlsxTableWriter, + HtmlTableWriter, + JavaScriptTableWriter, + JsonLinesTableWriter, + JsonTableWriter, + LatexMatrixWriter, + LatexTableWriter, + LtsvTableWriter, + MarkdownTableWriter, + MediaWikiTableWriter, + NullTableWriter, + NumpyTableWriter, + PandasDataFramePickleWriter, + PandasDataFrameWriter, + PythonCodeTableWriter, + RstCsvTableWriter, + RstGridTableWriter, + RstSimpleTableWriter, + SpaceAlignedTableWriter, + SqliteTableWriter, + TomlTableWriter, + TsvTableWriter, + UnicodeTableWriter, + YamlTableWriter, +) + + +__all__ = ( + "__author__", + "__copyright__", + "__email__", + "__license__", + "__version__", + "LineBreakHandling", + "TableWriterFactory", + "dumps_tabledata", + "set_logger", + "FormatAttr", + "TableFormat", + "Align", + "Format", + "Bool", + "DateTime", + "Dictionary", + "Infinity", + "Integer", + "IpAddress", + "List", + "Nan", + "NoneType", + "NullString", + "RealNumber", + "String", + "EmptyTableDataError", + "EmptyTableNameError", + "EmptyValueError", + "NotSupportedError", + "WriterNotFoundError", + "AbstractTableWriter", + "AsciiDocTableWriter", + "BoldUnicodeTableWriter", + "BorderlessTableWriter", + "CssTableWriter", + "CsvTableWriter", + "ElasticsearchWriter", + "ExcelXlsTableWriter", + "ExcelXlsxTableWriter", + "HtmlTableWriter", + "JavaScriptTableWriter", + "JsonLinesTableWriter", + "JsonTableWriter", + "LatexMatrixWriter", + "LatexTableWriter", + "LtsvTableWriter", + "MarkdownTableWriter", + "MediaWikiTableWriter", + "NullTableWriter", + "NumpyTableWriter", + "PandasDataFramePickleWriter", + "PandasDataFrameWriter", + "PythonCodeTableWriter", + "RstCsvTableWriter", + "RstGridTableWriter", + "RstSimpleTableWriter", + "SpaceAlignedTableWriter", + "SqliteTableWriter", + "TomlTableWriter", + "TsvTableWriter", + "UnicodeTableWriter", + "YamlTableWriter", +) diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/__version__.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..6ed8c0e5f4906d9cf0bc7d6a4da939239bba0717 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/__version__.py @@ -0,0 +1,6 @@ +__author__ = "Tsuyoshi Hombashi" +__copyright__ = f"Copyright 2016, {__author__}" +__license__ = "MIT License" +__version__ = "1.2.0" +__maintainer__ = __author__ +__email__ = "tsuyoshi.hombashi@gmail.com" diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/_converter.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..2a8d70441a2dd20e4184eda5ab80bc4a5d116960 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/_converter.py @@ -0,0 +1,11 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import re + + +def strip_quote(text: str, value: str) -> str: + re_replace = re.compile(f"[\"']{value:s}[\"']", re.MULTILINE) + + return re_replace.sub(value, text) diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/_factory.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..1b404bc0bc54d4a47fe3fd83b41b2e1d407ae53c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/_factory.py @@ -0,0 +1,274 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import os +from itertools import chain +from typing import Any, List + +import typepy + +from ._logger import logger +from ._table_format import FormatAttr, TableFormat +from .error import WriterNotFoundError +from .writer import AbstractTableWriter + + +class TableWriterFactory: + """ + A factory class of table writer classes. + """ + + @classmethod + def create_from_file_extension(cls, file_extension: str, **kwargs: Any) -> AbstractTableWriter: + """ + Create a table writer class instance from a file extension. + Supported file extensions are as follows: + + ================== =================================== + Extension Writer Class + ================== =================================== + ``".adoc"`` :py:class:`~.AsciiDocTableWriter` + ``".asciidoc"`` :py:class:`~.AsciiDocTableWriter` + ``".asc"`` :py:class:`~.AsciiDocTableWriter` + ``".css"`` :py:class:`~.CssTableWriter` + ``".csv"`` :py:class:`~.CsvTableWriter` + ``".htm"`` :py:class:`~.HtmlTableWriter` + ``".html"`` :py:class:`~.HtmlTableWriter` + ``".js"`` :py:class:`~.JavaScriptTableWriter` + ``".json"`` :py:class:`~.JsonTableWriter` + ``".jsonl"`` :py:class:`~.JsonLinesTableWriter` + ``".ltsv"`` :py:class:`~.LtsvTableWriter` + ``".ldjson"`` :py:class:`~.JsonLinesTableWriter` + ``".md"`` :py:class:`~.MarkdownTableWriter` + ``".ndjson"`` :py:class:`~.JsonLinesTableWriter` + ``".py"`` :py:class:`~.PythonCodeTableWriter` + ``".rst"`` :py:class:`~.RstGridTableWriter` + ``".tsv"`` :py:class:`~.TsvTableWriter` + ``".xls"`` :py:class:`~.ExcelXlsTableWriter` + ``".xlsx"`` :py:class:`~.ExcelXlsxTableWriter` + ``".sqlite"`` :py:class:`~.SqliteTableWriter` + ``".sqlite3"`` :py:class:`~.SqliteTableWriter` + ``".tsv"`` :py:class:`~.TsvTableWriter` + ``".toml"`` :py:class:`~.TomlTableWriter` + ``".yml"`` :py:class:`~.YamlTableWriter` + ================== =================================== + + :param str file_extension: + File extension string (case insensitive). + :param kwargs: + Keyword arguments that pass to a writer class constructor. + :return: + Writer instance that coincides with the ``file_extension``. + :rtype: + :py:class:`~pytablewriter.writer._table_writer.TableWriterInterface` + :raises pytablewriter.WriterNotFoundError: + |WriterNotFoundError_desc| the file extension. + """ + + ext = os.path.splitext(file_extension)[1] + if typepy.is_null_string(ext): + file_extension = file_extension + else: + file_extension = ext + + file_extension = file_extension.lstrip(".").lower() + + for table_format in TableFormat: + if file_extension not in table_format.file_extensions: + continue + + if table_format.format_attribute & FormatAttr.SECONDARY_EXT: + continue + + logger.debug(f"create a {table_format.writer_class} instance") + + return table_format.writer_class(**kwargs) # type: ignore + + raise WriterNotFoundError( + "\n".join( + [ + f"{file_extension:s} (unknown file extension).", + "", + "acceptable file extensions are: {}.".format(", ".join(cls.get_extensions())), + ] + ) + ) + + @classmethod + def create_from_format_name(cls, format_name: str, **kwargs: Any) -> AbstractTableWriter: + """ + Create a table writer class instance from a format name. + Supported file format names are as follows: + + ============================================= =================================== + Format name Writer Class + ============================================= =================================== + ``"adoc"`` :py:class:`~.AsciiDocTableWriter` + ``"asciidoc"`` :py:class:`~.AsciiDocTableWriter` + ``"css"`` :py:class:`~.CssTableWriter` + ``"csv"`` :py:class:`~.CsvTableWriter` + ``"elasticsearch"`` :py:class:`~.ElasticsearchWriter` + ``"excel"`` :py:class:`~.ExcelXlsxTableWriter` + ``"html"``/``"htm"`` :py:class:`~.HtmlTableWriter` + ``"javascript"``/``"js"`` :py:class:`~.JavaScriptTableWriter` + ``"json"`` :py:class:`~.JsonTableWriter` + ``"json_lines"`` :py:class:`~.JsonLinesTableWriter` + ``"latex_matrix"`` :py:class:`~.LatexMatrixWriter` + ``"latex_table"`` :py:class:`~.LatexTableWriter` + ``"ldjson"`` :py:class:`~.JsonLinesTableWriter` + ``"ltsv"`` :py:class:`~.LtsvTableWriter` + ``"markdown"``/``"md"`` :py:class:`~.MarkdownTableWriter` + ``"mediawiki"`` :py:class:`~.MediaWikiTableWriter` + ``"null"`` :py:class:`~.NullTableWriter` + ``"pandas"`` :py:class:`~.PandasDataFrameWriter` + ``"py"``/``"python"`` :py:class:`~.PythonCodeTableWriter` + ``"rst"``/``"rst_grid"``/``"rst_grid_table"`` :py:class:`~.RstGridTableWriter` + ``"rst_simple"``/``"rst_simple_table"`` :py:class:`~.RstSimpleTableWriter` + ``"rst_csv"``/``"rst_csv_table"`` :py:class:`~.RstCsvTableWriter` + ``"sqlite"`` :py:class:`~.SqliteTableWriter` + ``"ssv"`` :py:class:`~.SpaceAlignedTableWriter` + ``"tsv"`` :py:class:`~.TsvTableWriter` + ``"toml"`` :py:class:`~.TomlTableWriter` + ``"unicode"`` :py:class:`~.UnicodeTableWriter` + ``"yaml"`` :py:class:`~.YamlTableWriter` + ============================================= =================================== + + :param str format_name: + Format name string (case insensitive). + :param kwargs: + Keyword arguments that pass to a writer class constructor. + :return: + Writer instance that coincides with the ``format_name``: + :rtype: + :py:class:`~pytablewriter.writer._table_writer.TableWriterInterface` + :raises pytablewriter.WriterNotFoundError: + |WriterNotFoundError_desc| for the format. + """ + + format_name = format_name.casefold() + + for table_format in TableFormat: + if format_name in table_format.names and not ( + table_format.format_attribute & FormatAttr.SECONDARY_NAME + ): + writer = table_format.writer_class(**kwargs) # type: ignore + logger.debug(f"create a {writer.FORMAT_NAME} instance") + + return writer + + raise WriterNotFoundError( + "\n".join( + [ + f"{format_name} (unknown format name).", + "acceptable format names are: {}.".format(", ".join(cls.get_format_names())), + ] + ) + ) + + @classmethod + def get_format_names(cls) -> List[str]: + """ + :return: Available format names. + :rtype: list + + :Example: + .. code:: python + + >>> import pytablewriter as ptw + >>> for name in ptw.TableWriterFactory.get_format_names(): + ... print(name) + ... + adoc + asciidoc + bold_unicode + borderless + css + csv + elasticsearch + excel + htm + html + javascript + js + json + json_lines + jsonl + latex_matrix + latex_table + ldjson + ltsv + markdown + md + mediawiki + ndjson + null + numpy + pandas + pandas_pickle + py + python + rst + rst_csv + rst_csv_table + rst_grid + rst_grid_table + rst_simple + rst_simple_table + space_aligned + sqlite + ssv + toml + tsv + unicode + yaml + + """ + + return sorted(list(set(chain(*(table_format.names for table_format in TableFormat))))) + + @classmethod + def get_extensions(cls) -> List[str]: + """ + :return: Available file extensions. + :rtype: list + + :Example: + .. code:: python + + >>> import pytablewriter as ptw + >>> for name in ptw.TableWriterFactory.get_extensions(): + ... print(name) + ... + adoc + asc + asciidoc + css + csv + htm + html + js + json + jsonl + ldjson + ltsv + md + ndjson + py + rst + sqlite + sqlite3 + tex + toml + tsv + xls + xlsx + yml + """ + + file_extension_set = set() + for table_format in TableFormat: + for file_extension in table_format.file_extensions: + file_extension_set.add(file_extension) + + return sorted(list(file_extension_set)) diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/_function.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/_function.py new file mode 100644 index 0000000000000000000000000000000000000000..b8e2bfca7c0c80ce1ff4ffe283e578da91523869 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/_function.py @@ -0,0 +1,84 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from datetime import datetime +from enum import Enum +from typing import Any, Optional, Type + +import dataproperty +from pathvalidate import replace_symbol +from tabledata._core import TableData + + +def quote_datetime_formatter(value: datetime) -> str: + return f'"{value.strftime(dataproperty.DefaultValue.DATETIME_FORMAT):s}"' + + +def dateutil_datetime_formatter(value: datetime) -> str: + return 'dateutil.parser.parse("{:s}")'.format( + value.strftime(dataproperty.DefaultValue.DATETIME_FORMAT) + ) + + +def dumps_tabledata(value: TableData, format_name: str = "rst_grid_table", **kwargs: Any) -> str: + """ + :param tabledata.TableData value: Tabular data to dump. + :param str format_name: + Dumped format name of tabular data. + Available formats are described in + :py:meth:`~pytablewriter.TableWriterFactory.create_from_format_name` + + :Example: + .. code:: python + + >>> dumps_tabledata(value) + .. table:: sample_data + + ====== ====== ====== + attr_a attr_b attr_c + ====== ====== ====== + 1 4.0 a + 2 2.1 bb + 3 120.9 ccc + ====== ====== ====== + """ + + from ._factory import TableWriterFactory + + if not value: + raise TypeError("value must be a tabledata.TableData instance") + + writer = TableWriterFactory.create_from_format_name(format_name) + + for attr_name, attr_value in kwargs.items(): + setattr(writer, attr_name, attr_value) + + writer.from_tabledata(value) + + return writer.dumps() + + +def normalize_enum( + value: Any, enum_class: Type[Enum], validate: bool = True, default: Optional[Enum] = None +) -> Any: + if value is None: + return default + + if isinstance(value, enum_class): + return value + + try: + return enum_class[replace_symbol(value.strip(), "_").upper()] + except AttributeError: + if validate: + raise TypeError(f"value must be a {enum_class} or a str: actual={type(value)}") + except KeyError: + if validate: + raise ValueError( + "invalid valid found: expected={}, actual={}".format( + "/".join(item.name for item in enum_class), value + ) + ) + + return value diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/_table_format.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/_table_format.py new file mode 100644 index 0000000000000000000000000000000000000000..785a3b4a49a5bd5331caf857bf64f1aa46c11965 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/_table_format.py @@ -0,0 +1,353 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import enum +from typing import List, Optional, Sequence + +from .writer import ( + AbstractTableWriter, + AsciiDocTableWriter, + BoldUnicodeTableWriter, + BorderlessTableWriter, + CssTableWriter, + CsvTableWriter, + ElasticsearchWriter, + ExcelXlsTableWriter, + ExcelXlsxTableWriter, + HtmlTableWriter, + JavaScriptTableWriter, + JsonLinesTableWriter, + JsonTableWriter, + LatexMatrixWriter, + LatexTableWriter, + LtsvTableWriter, + MarkdownTableWriter, + MediaWikiTableWriter, + NullTableWriter, + NumpyTableWriter, + PandasDataFramePickleWriter, + PandasDataFrameWriter, + PythonCodeTableWriter, + RstCsvTableWriter, + RstGridTableWriter, + RstSimpleTableWriter, + SpaceAlignedTableWriter, + SqliteTableWriter, + TomlTableWriter, + TsvTableWriter, + UnicodeTableWriter, + YamlTableWriter, +) + + +class FormatAttr: + """ + Bitmaps to represent table attributes. + """ + + NONE = 1 << 1 + + #: Can create a file with the format. + FILE = 1 << 2 + + #: Table format that can represent as a text. + TEXT = 1 << 3 + + #: Table format that can represent as a binary file. + BIN = 1 << 4 + + #: Can create a source code (variables definition) + #: one of the programming language. + SOURCECODE = 1 << 5 + + #: Can call API for external service. + API = 1 << 6 + + SECONDARY_EXT = 1 << 10 + SECONDARY_NAME = 1 << 11 + + +@enum.unique +class TableFormat(enum.Enum): + """ + Enum to represent table format attributes. + """ + + ASCIIDOC = ( + [AsciiDocTableWriter.FORMAT_NAME, "adoc"], + AsciiDocTableWriter, + FormatAttr.FILE | FormatAttr.TEXT, + ["adoc", "asciidoc", "asc"], + ) + CSV = ([CsvTableWriter.FORMAT_NAME], CsvTableWriter, FormatAttr.FILE | FormatAttr.TEXT, ["csv"]) + CSS = ( + [CssTableWriter.FORMAT_NAME], + CssTableWriter, + FormatAttr.FILE | FormatAttr.TEXT, + ["css"], + ) + ELASTICSEARCH = ( + [ElasticsearchWriter.FORMAT_NAME], # type: ignore + ElasticsearchWriter, + FormatAttr.API, + [], + ) + EXCEL_XLSX = ( + [ExcelXlsxTableWriter.FORMAT_NAME], + ExcelXlsxTableWriter, + FormatAttr.FILE | FormatAttr.BIN, + ["xlsx"], + ) + EXCEL_XLS = ( + [ExcelXlsTableWriter.FORMAT_NAME], + ExcelXlsTableWriter, + FormatAttr.FILE | FormatAttr.BIN | FormatAttr.SECONDARY_NAME, + ["xls"], + ) + HTML = ( + [HtmlTableWriter.FORMAT_NAME, "htm"], + HtmlTableWriter, + FormatAttr.FILE | FormatAttr.TEXT, + ["html", "htm"], + ) + JAVASCRIPT = ( + [JavaScriptTableWriter.FORMAT_NAME, "js"], + JavaScriptTableWriter, + FormatAttr.FILE | FormatAttr.TEXT | FormatAttr.SOURCECODE, + ["js"], + ) + JSON = ( + [JsonTableWriter.FORMAT_NAME], + JsonTableWriter, + FormatAttr.FILE | FormatAttr.TEXT, + ["json"], + ) + JSON_LINES = ( + [JsonLinesTableWriter.FORMAT_NAME, "jsonl", "ldjson", "ndjson"], + JsonLinesTableWriter, + FormatAttr.FILE | FormatAttr.TEXT, + ["jsonl", "ldjson", "ndjson"], + ) + LATEX_MATRIX = ( + [LatexMatrixWriter.FORMAT_NAME], + LatexMatrixWriter, + FormatAttr.FILE | FormatAttr.TEXT, + ["tex"], + ) + LATEX_TABLE = ( + [LatexTableWriter.FORMAT_NAME], + LatexTableWriter, + FormatAttr.FILE | FormatAttr.TEXT | FormatAttr.SECONDARY_EXT, + ["tex"], + ) + LTSV = ( + [LtsvTableWriter.FORMAT_NAME], + LtsvTableWriter, + FormatAttr.FILE | FormatAttr.TEXT, + ["ltsv"], + ) + MARKDOWN = ( + [MarkdownTableWriter.FORMAT_NAME, "md"], + MarkdownTableWriter, + FormatAttr.FILE | FormatAttr.TEXT, + ["md"], + ) + MEDIAWIKI = ( + [MediaWikiTableWriter.FORMAT_NAME], # type: ignore + MediaWikiTableWriter, + FormatAttr.FILE | FormatAttr.TEXT, + [], + ) + NULL = ( + [NullTableWriter.FORMAT_NAME], # type: ignore + NullTableWriter, + FormatAttr.NONE, + [], + ) + NUMPY = ( + [NumpyTableWriter.FORMAT_NAME], + NumpyTableWriter, + FormatAttr.FILE | FormatAttr.TEXT | FormatAttr.SOURCECODE | FormatAttr.SECONDARY_EXT, + ["py"], + ) + PANDAS = ( + [PandasDataFrameWriter.FORMAT_NAME], + PandasDataFrameWriter, + FormatAttr.FILE | FormatAttr.TEXT | FormatAttr.SOURCECODE | FormatAttr.SECONDARY_EXT, + ["py"], + ) + PANDAS_PICKLE = ( + [PandasDataFramePickleWriter.FORMAT_NAME], # type: ignore + PandasDataFramePickleWriter, + FormatAttr.FILE | FormatAttr.BIN, + [], + ) + PYTHON = ( + [PythonCodeTableWriter.FORMAT_NAME, "py"], + PythonCodeTableWriter, + FormatAttr.FILE | FormatAttr.TEXT | FormatAttr.SOURCECODE, + ["py"], + ) + RST_CSV_TABLE = ( + [RstCsvTableWriter.FORMAT_NAME, "rst_csv"], + RstCsvTableWriter, + FormatAttr.FILE | FormatAttr.TEXT | FormatAttr.SECONDARY_EXT, + ["rst"], + ) + RST_GRID_TABLE = ( + [RstGridTableWriter.FORMAT_NAME, "rst_grid", "rst"], + RstGridTableWriter, + FormatAttr.FILE | FormatAttr.TEXT, + ["rst"], + ) + RST_SIMPLE_TABLE = ( + [RstSimpleTableWriter.FORMAT_NAME, "rst_simple"], + RstSimpleTableWriter, + FormatAttr.FILE | FormatAttr.TEXT | FormatAttr.SECONDARY_EXT, + ["rst"], + ) + SPACE_ALIGNED = ( + [SpaceAlignedTableWriter.FORMAT_NAME, "ssv"], # type: ignore + SpaceAlignedTableWriter, + FormatAttr.FILE | FormatAttr.TEXT, + [], + ) + SQLITE = ( + [SqliteTableWriter.FORMAT_NAME], + SqliteTableWriter, + FormatAttr.FILE | FormatAttr.BIN, + ["sqlite", "sqlite3"], + ) + TOML = ( + [TomlTableWriter.FORMAT_NAME], + TomlTableWriter, + FormatAttr.FILE | FormatAttr.TEXT, + ["toml"], + ) + TSV = ([TsvTableWriter.FORMAT_NAME], TsvTableWriter, FormatAttr.FILE | FormatAttr.TEXT, ["tsv"]) + UNICODE = ( + [UnicodeTableWriter.FORMAT_NAME], # type: ignore + UnicodeTableWriter, + FormatAttr.TEXT, + [], + ) + YAML = ( + [YamlTableWriter.FORMAT_NAME], + YamlTableWriter, + FormatAttr.FILE | FormatAttr.TEXT, + ["yml"], + ) + BOLD_UNICODE = ( + [BoldUnicodeTableWriter.FORMAT_NAME], # type: ignore + BoldUnicodeTableWriter, + FormatAttr.TEXT, + [], + ) + BORDERLESS = ( + [BorderlessTableWriter.FORMAT_NAME], # type: ignore + BorderlessTableWriter, + FormatAttr.TEXT, + [], + ) + + @property + def names(self) -> List[str]: + """ + List[str]: Names associated with the table format. + """ + + return self.__names + + @property + def writer_class(self) -> AbstractTableWriter: + """ + Type[AbstractTableWriter]: Table writer class object associated with the table format. + """ + + return self.__writer_class + + @property + def format_attribute(self) -> int: + """ + FormatAttr: Table attributes bitmap. + """ + + return self.__format_attribute + + @property + def file_extensions(self) -> List[str]: + """ + List[str]: File extensions associated with the table format. + """ + + return self.__file_extensions + + def __init__( + self, + names: Sequence[str], + writer_class: AbstractTableWriter, + format_attribute: int, + file_extensions: Sequence[str], + ) -> None: + self.__names = list(names) + self.__writer_class = writer_class + self.__format_attribute = format_attribute + self.__file_extensions = list(file_extensions) + + @classmethod + def find_all_attr(cls, format_attribute: int) -> List["TableFormat"]: + """Searching table formats that have specific attributes. + + Args: + format_attribute (FormatAttr): + Table format attributes to look for. + + Returns: + List[TableFormat]: Table formats that matched the attribute. + """ + + return [ + table_format + for table_format in TableFormat + if table_format.format_attribute & format_attribute + ] + + @classmethod + def from_name(cls, format_name: str) -> Optional["TableFormat"]: + """Get a table format from a format name. + + Args: + format_name (str): Table format specifier. + + Returns: + Optional[TableFormat]: A table format enum value corresponding to the ``format_name``. + """ + + format_name = format_name.casefold().strip() + + for table_format in TableFormat: + if format_name in table_format.names: + return table_format + + return None + + @classmethod + def from_file_extension(cls, file_extension: str) -> Optional["TableFormat"]: + """Get a table format from a file extension. + + Args: + file_extension (str): File extension. + + Returns: + Optional[TableFormat]: + A table format enum value corresponding to the ``file_extension``. + """ + + ext = file_extension.lower().strip().lstrip(".") + + for table_format in TableFormat: + if ext in table_format.file_extensions: + return table_format + + return None diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/_typing.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..ff4ec48320d495d703933b282718c6bd8035c3e8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/_typing.py @@ -0,0 +1,2855 @@ +# type: ignore + +""" +source code from: python/typing/typing_extensions/src_py3/typing_extensions.py +tag: 3.10.0.2 + +Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved + +https://github.com/python/typing/blob/3.10.0.2/LICENSE +""" + +import abc +import collections +import contextlib +import sys +import typing +import collections.abc as collections_abc +import operator + +# These are used by Protocol implementation +# We use internal typing helpers here, but this significantly reduces +# code duplication. (Also this is only until Protocol is in typing.) +from typing import Generic, Callable, TypeVar, Tuple + +# After PEP 560, internal typing API was substantially reworked. +# This is especially important for Protocol class which uses internal APIs +# quite extensivelly. +PEP_560 = sys.version_info[:3] >= (3, 7, 0) + +if PEP_560: + GenericMeta = TypingMeta = type + from typing import _GenericAlias +else: + from typing import GenericMeta, TypingMeta +OLD_GENERICS = False +try: + from typing import _type_vars, _next_in_mro, _type_check +except ImportError: + OLD_GENERICS = True +try: + from typing import _subs_tree # noqa + SUBS_TREE = True +except ImportError: + SUBS_TREE = False +try: + from typing import _tp_cache +except ImportError: + def _tp_cache(x): + return x +try: + from typing import _TypingEllipsis, _TypingEmpty +except ImportError: + class _TypingEllipsis: + pass + + class _TypingEmpty: + pass + + +# The two functions below are copies of typing internal helpers. +# They are needed by _ProtocolMeta + + +def _no_slots_copy(dct): + dict_copy = dict(dct) + if '__slots__' in dict_copy: + for slot in dict_copy['__slots__']: + dict_copy.pop(slot, None) + return dict_copy + + +def _check_generic(cls, parameters): + if not cls.__parameters__: + raise TypeError("%s is not a generic class" % repr(cls)) + alen = len(parameters) + elen = len(cls.__parameters__) + if alen != elen: + raise TypeError("Too %s parameters for %s; actual %s, expected %s" % + ("many" if alen > elen else "few", repr(cls), alen, elen)) + + +if hasattr(typing, '_generic_new'): + _generic_new = typing._generic_new +else: + # Note: The '_generic_new(...)' function is used as a part of the + # process of creating a generic type and was added to the typing module + # as of Python 3.5.3. + # + # We've defined '_generic_new(...)' below to exactly match the behavior + # implemented in older versions of 'typing' bundled with Python 3.5.0 to + # 3.5.2. This helps eliminate redundancy when defining collection types + # like 'Deque' later. + # + # See https://github.com/python/typing/pull/308 for more details -- in + # particular, compare and contrast the definition of types like + # 'typing.List' before and after the merge. + + def _generic_new(base_cls, cls, *args, **kwargs): + return base_cls.__new__(cls, *args, **kwargs) + +# See https://github.com/python/typing/pull/439 +if hasattr(typing, '_geqv'): + from typing import _geqv + _geqv_defined = True +else: + _geqv = None + _geqv_defined = False + +if sys.version_info[:2] >= (3, 6): + import _collections_abc + _check_methods_in_mro = _collections_abc._check_methods +else: + def _check_methods_in_mro(C, *methods): + mro = C.__mro__ + for method in methods: + for B in mro: + if method in B.__dict__: + if B.__dict__[method] is None: + return NotImplemented + break + else: + return NotImplemented + return True + + +# Please keep __all__ alphabetized within each category. +__all__ = [ + # Super-special typing primitives. + 'ClassVar', + 'Concatenate', + 'Final', + 'ParamSpec', + 'Type', + + # ABCs (from collections.abc). + # The following are added depending on presence + # of their non-generic counterparts in stdlib: + # 'Awaitable', + # 'AsyncIterator', + # 'AsyncIterable', + # 'Coroutine', + # 'AsyncGenerator', + # 'AsyncContextManager', + # 'ChainMap', + + # Concrete collection types. + 'ContextManager', + 'Counter', + 'Deque', + 'DefaultDict', + 'OrderedDict', + 'TypedDict', + + # Structural checks, a.k.a. protocols. + 'SupportsIndex', + + # One-off things. + 'final', + 'IntVar', + 'Literal', + 'NewType', + 'overload', + 'Text', + 'TypeAlias', + 'TypeGuard', + 'TYPE_CHECKING', +] + +# Annotated relies on substitution trees of pep 560. It will not work for +# versions of typing older than 3.5.3 +HAVE_ANNOTATED = PEP_560 or SUBS_TREE + +if PEP_560: + __all__.extend(["get_args", "get_origin", "get_type_hints"]) + +if HAVE_ANNOTATED: + __all__.append("Annotated") + +# Protocols are hard to backport to the original version of typing 3.5.0 +HAVE_PROTOCOLS = sys.version_info[:3] != (3, 5, 0) + +if HAVE_PROTOCOLS: + __all__.extend(['Protocol', 'runtime', 'runtime_checkable']) + + +# TODO +if hasattr(typing, 'NoReturn'): + NoReturn = typing.NoReturn +elif hasattr(typing, '_FinalTypingBase'): + class _NoReturn(typing._FinalTypingBase, _root=True): + """Special type indicating functions that never return. + Example:: + + from typing import NoReturn + + def stop() -> NoReturn: + raise Exception('no way') + + This type is invalid in other positions, e.g., ``List[NoReturn]`` + will fail in static type checkers. + """ + __slots__ = () + + def __instancecheck__(self, obj): + raise TypeError("NoReturn cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("NoReturn cannot be used with issubclass().") + + NoReturn = _NoReturn(_root=True) +else: + class _NoReturnMeta(typing.TypingMeta): + """Metaclass for NoReturn""" + def __new__(cls, name, bases, namespace, _root=False): + return super().__new__(cls, name, bases, namespace, _root=_root) + + def __instancecheck__(self, obj): + raise TypeError("NoReturn cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("NoReturn cannot be used with issubclass().") + + class NoReturn(typing.Final, metaclass=_NoReturnMeta, _root=True): + """Special type indicating functions that never return. + Example:: + + from typing import NoReturn + + def stop() -> NoReturn: + raise Exception('no way') + + This type is invalid in other positions, e.g., ``List[NoReturn]`` + will fail in static type checkers. + """ + __slots__ = () + + +# Some unconstrained type variables. These are used by the container types. +# (These are not for export.) +T = typing.TypeVar('T') # Any type. +KT = typing.TypeVar('KT') # Key type. +VT = typing.TypeVar('VT') # Value type. +T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. +V_co = typing.TypeVar('V_co', covariant=True) # Any type covariant containers. +VT_co = typing.TypeVar('VT_co', covariant=True) # Value type covariant containers. +T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. + + +if hasattr(typing, 'ClassVar'): + ClassVar = typing.ClassVar +elif hasattr(typing, '_FinalTypingBase'): + class _ClassVar(typing._FinalTypingBase, _root=True): + """Special type construct to mark class variables. + + An annotation wrapped in ClassVar indicates that a given + attribute is intended to be used as a class variable and + should not be set on instances of that class. Usage:: + + class Starship: + stats: ClassVar[Dict[str, int]] = {} # class variable + damage: int = 10 # instance variable + + ClassVar accepts only types and cannot be further subscribed. + + Note that ClassVar is not a class itself, and should not + be used with isinstance() or issubclass(). + """ + + __slots__ = ('__type__',) + + def __init__(self, tp=None, **kwds): + self.__type__ = tp + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is None: + return cls(typing._type_check(item, + '{} accepts only single type.'.format(cls.__name__[1:])), + _root=True) + raise TypeError('{} cannot be further subscripted' + .format(cls.__name__[1:])) + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)(new_tp, _root=True) + + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += '[{}]'.format(typing._type_repr(self.__type__)) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__type__)) + + def __eq__(self, other): + if not isinstance(other, _ClassVar): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other + + ClassVar = _ClassVar(_root=True) +else: + class _ClassVarMeta(typing.TypingMeta): + """Metaclass for ClassVar""" + + def __new__(cls, name, bases, namespace, tp=None, _root=False): + self = super().__new__(cls, name, bases, namespace, _root=_root) + if tp is not None: + self.__type__ = tp + return self + + def __instancecheck__(self, obj): + raise TypeError("ClassVar cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("ClassVar cannot be used with issubclass().") + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is not None: + raise TypeError('{} cannot be further subscripted' + .format(cls.__name__[1:])) + + param = typing._type_check( + item, + '{} accepts only single type.'.format(cls.__name__[1:])) + return cls(self.__name__, self.__bases__, + dict(self.__dict__), tp=param, _root=True) + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)(self.__name__, self.__bases__, + dict(self.__dict__), tp=self.__type__, + _root=True) + + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += '[{}]'.format(typing._type_repr(self.__type__)) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__type__)) + + def __eq__(self, other): + if not isinstance(other, ClassVar): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other + + class ClassVar(typing.Final, metaclass=_ClassVarMeta, _root=True): + """Special type construct to mark class variables. + + An annotation wrapped in ClassVar indicates that a given + attribute is intended to be used as a class variable and + should not be set on instances of that class. Usage:: + + class Starship: + stats: ClassVar[Dict[str, int]] = {} # class variable + damage: int = 10 # instance variable + + ClassVar accepts only types and cannot be further subscribed. + + Note that ClassVar is not a class itself, and should not + be used with isinstance() or issubclass(). + """ + + __type__ = None + +# On older versions of typing there is an internal class named "Final". +if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7): + Final = typing.Final +elif sys.version_info[:2] >= (3, 7): + class _FinalForm(typing._SpecialForm, _root=True): + + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + item = typing._type_check(parameters, + '{} accepts only single type'.format(self._name)) + return _GenericAlias(self, (item,)) + + Final = _FinalForm('Final', + doc="""A special typing construct to indicate that a name + cannot be re-assigned or overridden in a subclass. + For example: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties.""") +elif hasattr(typing, '_FinalTypingBase'): + class _Final(typing._FinalTypingBase, _root=True): + """A special typing construct to indicate that a name + cannot be re-assigned or overridden in a subclass. + For example: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties. + """ + + __slots__ = ('__type__',) + + def __init__(self, tp=None, **kwds): + self.__type__ = tp + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is None: + return cls(typing._type_check(item, + '{} accepts only single type.'.format(cls.__name__[1:])), + _root=True) + raise TypeError('{} cannot be further subscripted' + .format(cls.__name__[1:])) + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)(new_tp, _root=True) + + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += '[{}]'.format(typing._type_repr(self.__type__)) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__type__)) + + def __eq__(self, other): + if not isinstance(other, _Final): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other + + Final = _Final(_root=True) +else: + class _FinalMeta(typing.TypingMeta): + """Metaclass for Final""" + + def __new__(cls, name, bases, namespace, tp=None, _root=False): + self = super().__new__(cls, name, bases, namespace, _root=_root) + if tp is not None: + self.__type__ = tp + return self + + def __instancecheck__(self, obj): + raise TypeError("Final cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("Final cannot be used with issubclass().") + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is not None: + raise TypeError('{} cannot be further subscripted' + .format(cls.__name__[1:])) + + param = typing._type_check( + item, + '{} accepts only single type.'.format(cls.__name__[1:])) + return cls(self.__name__, self.__bases__, + dict(self.__dict__), tp=param, _root=True) + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)(self.__name__, self.__bases__, + dict(self.__dict__), tp=self.__type__, + _root=True) + + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += '[{}]'.format(typing._type_repr(self.__type__)) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__type__)) + + def __eq__(self, other): + if not isinstance(other, Final): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other + + class Final(typing.Final, metaclass=_FinalMeta, _root=True): + """A special typing construct to indicate that a name + cannot be re-assigned or overridden in a subclass. + For example: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties. + """ + + __type__ = None + + +if hasattr(typing, 'final'): + final = typing.final +else: + def final(f): + """This decorator can be used to indicate to type checkers that + the decorated method cannot be overridden, and decorated class + cannot be subclassed. For example: + + class Base: + @final + def done(self) -> None: + ... + class Sub(Base): + def done(self) -> None: # Error reported by type checker + ... + @final + class Leaf: + ... + class Other(Leaf): # Error reported by type checker + ... + + There is no runtime checking of these properties. + """ + return f + + +def IntVar(name): + return TypeVar(name) + + +if hasattr(typing, 'Literal'): + Literal = typing.Literal +elif sys.version_info[:2] >= (3, 7): + class _LiteralForm(typing._SpecialForm, _root=True): + + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + return _GenericAlias(self, parameters) + + Literal = _LiteralForm('Literal', + doc="""A type that can be used to indicate to type checkers + that the corresponding value has a value literally equivalent + to the provided parameter. For example: + + var: Literal[4] = 4 + + The type checker understands that 'var' is literally equal to + the value 4 and no other value. + + Literal[...] cannot be subclassed. There is no runtime + checking verifying that the parameter is actually a value + instead of a type.""") +elif hasattr(typing, '_FinalTypingBase'): + class _Literal(typing._FinalTypingBase, _root=True): + """A type that can be used to indicate to type checkers that the + corresponding value has a value literally equivalent to the + provided parameter. For example: + + var: Literal[4] = 4 + + The type checker understands that 'var' is literally equal to the + value 4 and no other value. + + Literal[...] cannot be subclassed. There is no runtime checking + verifying that the parameter is actually a value instead of a type. + """ + + __slots__ = ('__values__',) + + def __init__(self, values=None, **kwds): + self.__values__ = values + + def __getitem__(self, values): + cls = type(self) + if self.__values__ is None: + if not isinstance(values, tuple): + values = (values,) + return cls(values, _root=True) + raise TypeError('{} cannot be further subscripted' + .format(cls.__name__[1:])) + + def _eval_type(self, globalns, localns): + return self + + def __repr__(self): + r = super().__repr__() + if self.__values__ is not None: + r += '[{}]'.format(', '.join(map(typing._type_repr, self.__values__))) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__values__)) + + def __eq__(self, other): + if not isinstance(other, _Literal): + return NotImplemented + if self.__values__ is not None: + return self.__values__ == other.__values__ + return self is other + + Literal = _Literal(_root=True) +else: + class _LiteralMeta(typing.TypingMeta): + """Metaclass for Literal""" + + def __new__(cls, name, bases, namespace, values=None, _root=False): + self = super().__new__(cls, name, bases, namespace, _root=_root) + if values is not None: + self.__values__ = values + return self + + def __instancecheck__(self, obj): + raise TypeError("Literal cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("Literal cannot be used with issubclass().") + + def __getitem__(self, item): + cls = type(self) + if self.__values__ is not None: + raise TypeError('{} cannot be further subscripted' + .format(cls.__name__[1:])) + + if not isinstance(item, tuple): + item = (item,) + return cls(self.__name__, self.__bases__, + dict(self.__dict__), values=item, _root=True) + + def _eval_type(self, globalns, localns): + return self + + def __repr__(self): + r = super().__repr__() + if self.__values__ is not None: + r += '[{}]'.format(', '.join(map(typing._type_repr, self.__values__))) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__values__)) + + def __eq__(self, other): + if not isinstance(other, Literal): + return NotImplemented + if self.__values__ is not None: + return self.__values__ == other.__values__ + return self is other + + class Literal(typing.Final, metaclass=_LiteralMeta, _root=True): + """A type that can be used to indicate to type checkers that the + corresponding value has a value literally equivalent to the + provided parameter. For example: + + var: Literal[4] = 4 + + The type checker understands that 'var' is literally equal to the + value 4 and no other value. + + Literal[...] cannot be subclassed. There is no runtime checking + verifying that the parameter is actually a value instead of a type. + """ + + __values__ = None + + +def _overload_dummy(*args, **kwds): + """Helper for @overload to raise when called.""" + raise NotImplementedError( + "You should not call an overloaded function. " + "A series of @overload-decorated functions " + "outside a stub module should always be followed " + "by an implementation that is not @overload-ed.") + + +def overload(func): + """Decorator for overloaded functions/methods. + + In a stub file, place two or more stub definitions for the same + function in a row, each decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + + In a non-stub file (i.e. a regular .py file), do the same but + follow it with an implementation. The implementation should *not* + be decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + def utf8(value): + # implementation goes here + """ + return _overload_dummy + + +# This is not a real generic class. Don't use outside annotations. +if hasattr(typing, 'Type'): + Type = typing.Type +else: + # Internal type variable used for Type[]. + CT_co = typing.TypeVar('CT_co', covariant=True, bound=type) + + class Type(typing.Generic[CT_co], extra=type): + """A special construct usable to annotate class objects. + + For example, suppose we have the following classes:: + + class User: ... # Abstract base for User classes + class BasicUser(User): ... + class ProUser(User): ... + class TeamUser(User): ... + + And a function that takes a class argument that's a subclass of + User and returns an instance of the corresponding class:: + + U = TypeVar('U', bound=User) + def new_user(user_class: Type[U]) -> U: + user = user_class() + # (Here we could write the user object to a database) + return user + joe = new_user(BasicUser) + + At this point the type checker knows that joe has type BasicUser. + """ + + __slots__ = () + + +# Various ABCs mimicking those in collections.abc. +# A few are simply re-exported for completeness. + +def _define_guard(type_name): + """ + Returns True if the given type isn't defined in typing but + is defined in collections_abc. + + Adds the type to __all__ if the collection is found in either + typing or collection_abc. + """ + if hasattr(typing, type_name): + __all__.append(type_name) + globals()[type_name] = getattr(typing, type_name) + return False + elif hasattr(collections_abc, type_name): + __all__.append(type_name) + return True + else: + return False + + +class _ExtensionsGenericMeta(GenericMeta): + def __subclasscheck__(self, subclass): + """This mimics a more modern GenericMeta.__subclasscheck__() logic + (that does not have problems with recursion) to work around interactions + between collections, typing, and typing_extensions on older + versions of Python, see https://github.com/python/typing/issues/501. + """ + if sys.version_info[:3] >= (3, 5, 3) or sys.version_info[:3] < (3, 5, 0): + if self.__origin__ is not None: + if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']: + raise TypeError("Parameterized generics cannot be used with class " + "or instance checks") + return False + if not self.__extra__: + return super().__subclasscheck__(subclass) + res = self.__extra__.__subclasshook__(subclass) + if res is not NotImplemented: + return res + if self.__extra__ in subclass.__mro__: + return True + for scls in self.__extra__.__subclasses__(): + if isinstance(scls, GenericMeta): + continue + if issubclass(subclass, scls): + return True + return False + + +if _define_guard('Awaitable'): + class Awaitable(typing.Generic[T_co], metaclass=_ExtensionsGenericMeta, + extra=collections_abc.Awaitable): + __slots__ = () + + +if _define_guard('Coroutine'): + class Coroutine(Awaitable[V_co], typing.Generic[T_co, T_contra, V_co], + metaclass=_ExtensionsGenericMeta, + extra=collections_abc.Coroutine): + __slots__ = () + + +if _define_guard('AsyncIterable'): + class AsyncIterable(typing.Generic[T_co], + metaclass=_ExtensionsGenericMeta, + extra=collections_abc.AsyncIterable): + __slots__ = () + + +if _define_guard('AsyncIterator'): + class AsyncIterator(AsyncIterable[T_co], + metaclass=_ExtensionsGenericMeta, + extra=collections_abc.AsyncIterator): + __slots__ = () + + +if hasattr(typing, 'Deque'): + Deque = typing.Deque +elif _geqv_defined: + class Deque(collections.deque, typing.MutableSequence[T], + metaclass=_ExtensionsGenericMeta, + extra=collections.deque): + __slots__ = () + + def __new__(cls, *args, **kwds): + if _geqv(cls, Deque): + return collections.deque(*args, **kwds) + return _generic_new(collections.deque, cls, *args, **kwds) +else: + class Deque(collections.deque, typing.MutableSequence[T], + metaclass=_ExtensionsGenericMeta, + extra=collections.deque): + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is Deque: + return collections.deque(*args, **kwds) + return _generic_new(collections.deque, cls, *args, **kwds) + + +if hasattr(typing, 'ContextManager'): + ContextManager = typing.ContextManager +elif hasattr(contextlib, 'AbstractContextManager'): + class ContextManager(typing.Generic[T_co], + metaclass=_ExtensionsGenericMeta, + extra=contextlib.AbstractContextManager): + __slots__ = () +else: + class ContextManager(typing.Generic[T_co]): + __slots__ = () + + def __enter__(self): + return self + + @abc.abstractmethod + def __exit__(self, exc_type, exc_value, traceback): + return None + + @classmethod + def __subclasshook__(cls, C): + if cls is ContextManager: + # In Python 3.6+, it is possible to set a method to None to + # explicitly indicate that the class does not implement an ABC + # (https://bugs.python.org/issue25958), but we do not support + # that pattern here because this fallback class is only used + # in Python 3.5 and earlier. + if (any("__enter__" in B.__dict__ for B in C.__mro__) and + any("__exit__" in B.__dict__ for B in C.__mro__)): + return True + return NotImplemented + + +if hasattr(typing, 'AsyncContextManager'): + AsyncContextManager = typing.AsyncContextManager + __all__.append('AsyncContextManager') +elif hasattr(contextlib, 'AbstractAsyncContextManager'): + class AsyncContextManager(typing.Generic[T_co], + metaclass=_ExtensionsGenericMeta, + extra=contextlib.AbstractAsyncContextManager): + __slots__ = () + + __all__.append('AsyncContextManager') +elif sys.version_info[:2] >= (3, 5): + exec(""" +class AsyncContextManager(typing.Generic[T_co]): + __slots__ = () + + async def __aenter__(self): + return self + + @abc.abstractmethod + async def __aexit__(self, exc_type, exc_value, traceback): + return None + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncContextManager: + return _check_methods_in_mro(C, "__aenter__", "__aexit__") + return NotImplemented + +__all__.append('AsyncContextManager') +""") + + +if hasattr(typing, 'DefaultDict'): + DefaultDict = typing.DefaultDict +elif _geqv_defined: + class DefaultDict(collections.defaultdict, typing.MutableMapping[KT, VT], + metaclass=_ExtensionsGenericMeta, + extra=collections.defaultdict): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if _geqv(cls, DefaultDict): + return collections.defaultdict(*args, **kwds) + return _generic_new(collections.defaultdict, cls, *args, **kwds) +else: + class DefaultDict(collections.defaultdict, typing.MutableMapping[KT, VT], + metaclass=_ExtensionsGenericMeta, + extra=collections.defaultdict): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is DefaultDict: + return collections.defaultdict(*args, **kwds) + return _generic_new(collections.defaultdict, cls, *args, **kwds) + + +if hasattr(typing, 'OrderedDict'): + OrderedDict = typing.OrderedDict +elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2): + OrderedDict = typing._alias(collections.OrderedDict, (KT, VT)) +elif _geqv_defined: + class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT], + metaclass=_ExtensionsGenericMeta, + extra=collections.OrderedDict): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if _geqv(cls, OrderedDict): + return collections.OrderedDict(*args, **kwds) + return _generic_new(collections.OrderedDict, cls, *args, **kwds) +else: + class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT], + metaclass=_ExtensionsGenericMeta, + extra=collections.OrderedDict): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is OrderedDict: + return collections.OrderedDict(*args, **kwds) + return _generic_new(collections.OrderedDict, cls, *args, **kwds) + + +if hasattr(typing, 'Counter'): + Counter = typing.Counter +elif (3, 5, 0) <= sys.version_info[:3] <= (3, 5, 1): + assert _geqv_defined + _TInt = typing.TypeVar('_TInt') + + class _CounterMeta(typing.GenericMeta): + """Metaclass for Counter""" + def __getitem__(self, item): + return super().__getitem__((item, int)) + + class Counter(collections.Counter, + typing.Dict[T, int], + metaclass=_CounterMeta, + extra=collections.Counter): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if _geqv(cls, Counter): + return collections.Counter(*args, **kwds) + return _generic_new(collections.Counter, cls, *args, **kwds) + +elif _geqv_defined: + class Counter(collections.Counter, + typing.Dict[T, int], + metaclass=_ExtensionsGenericMeta, extra=collections.Counter): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if _geqv(cls, Counter): + return collections.Counter(*args, **kwds) + return _generic_new(collections.Counter, cls, *args, **kwds) + +else: + class Counter(collections.Counter, + typing.Dict[T, int], + metaclass=_ExtensionsGenericMeta, extra=collections.Counter): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is Counter: + return collections.Counter(*args, **kwds) + return _generic_new(collections.Counter, cls, *args, **kwds) + + +if hasattr(typing, 'ChainMap'): + ChainMap = typing.ChainMap + __all__.append('ChainMap') +elif hasattr(collections, 'ChainMap'): + # ChainMap only exists in 3.3+ + if _geqv_defined: + class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT], + metaclass=_ExtensionsGenericMeta, + extra=collections.ChainMap): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if _geqv(cls, ChainMap): + return collections.ChainMap(*args, **kwds) + return _generic_new(collections.ChainMap, cls, *args, **kwds) + else: + class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT], + metaclass=_ExtensionsGenericMeta, + extra=collections.ChainMap): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is ChainMap: + return collections.ChainMap(*args, **kwds) + return _generic_new(collections.ChainMap, cls, *args, **kwds) + + __all__.append('ChainMap') + + +if _define_guard('AsyncGenerator'): + class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra], + metaclass=_ExtensionsGenericMeta, + extra=collections_abc.AsyncGenerator): + __slots__ = () + + +if hasattr(typing, 'NewType'): + NewType = typing.NewType +else: + def NewType(name, tp): + """NewType creates simple unique types with almost zero + runtime overhead. NewType(name, tp) is considered a subtype of tp + by static type checkers. At runtime, NewType(name, tp) returns + a dummy function that simply returns its argument. Usage:: + + UserId = NewType('UserId', int) + + def name_by_id(user_id: UserId) -> str: + ... + + UserId('user') # Fails type check + + name_by_id(42) # Fails type check + name_by_id(UserId(42)) # OK + + num = UserId(5) + 1 # type: int + """ + + def new_type(x): + return x + + new_type.__name__ = name + new_type.__supertype__ = tp + return new_type + + +if hasattr(typing, 'Text'): + Text = typing.Text +else: + Text = str + + +if hasattr(typing, 'TYPE_CHECKING'): + TYPE_CHECKING = typing.TYPE_CHECKING +else: + # Constant that's True when type checking, but False here. + TYPE_CHECKING = False + + +def _gorg(cls): + """This function exists for compatibility with old typing versions.""" + assert isinstance(cls, GenericMeta) + if hasattr(cls, '_gorg'): + return cls._gorg + while cls.__origin__ is not None: + cls = cls.__origin__ + return cls + + +if OLD_GENERICS: + def _next_in_mro(cls): # noqa + """This function exists for compatibility with old typing versions.""" + next_in_mro = object + for i, c in enumerate(cls.__mro__[:-1]): + if isinstance(c, GenericMeta) and _gorg(c) is Generic: + next_in_mro = cls.__mro__[i + 1] + return next_in_mro + + +_PROTO_WHITELIST = ['Callable', 'Awaitable', + 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator', + 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', + 'ContextManager', 'AsyncContextManager'] + + +def _get_protocol_attrs(cls): + attrs = set() + for base in cls.__mro__[:-1]: # without object + if base.__name__ in ('Protocol', 'Generic'): + continue + annotations = getattr(base, '__annotations__', {}) + for attr in list(base.__dict__.keys()) + list(annotations.keys()): + if (not attr.startswith('_abc_') and attr not in ( + '__abstractmethods__', '__annotations__', '__weakref__', + '_is_protocol', '_is_runtime_protocol', '__dict__', + '__args__', '__slots__', + '__next_in_mro__', '__parameters__', '__origin__', + '__orig_bases__', '__extra__', '__tree_hash__', + '__doc__', '__subclasshook__', '__init__', '__new__', + '__module__', '_MutableMapping__marker', '_gorg')): + attrs.add(attr) + return attrs + + +def _is_callable_members_only(cls): + return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls)) + + +if hasattr(typing, 'Protocol'): + Protocol = typing.Protocol +elif HAVE_PROTOCOLS and not PEP_560: + + def _no_init(self, *args, **kwargs): + if type(self)._is_protocol: + raise TypeError('Protocols cannot be instantiated') + + class _ProtocolMeta(GenericMeta): + """Internal metaclass for Protocol. + + This exists so Protocol classes can be generic without deriving + from Generic. + """ + if not OLD_GENERICS: + def __new__(cls, name, bases, namespace, + tvars=None, args=None, origin=None, extra=None, orig_bases=None): + # This is just a version copied from GenericMeta.__new__ that + # includes "Protocol" special treatment. (Comments removed for brevity.) + assert extra is None # Protocols should not have extra + if tvars is not None: + assert origin is not None + assert all(isinstance(t, TypeVar) for t in tvars), tvars + else: + tvars = _type_vars(bases) + gvars = None + for base in bases: + if base is Generic: + raise TypeError("Cannot inherit from plain Generic") + if (isinstance(base, GenericMeta) and + base.__origin__ in (Generic, Protocol)): + if gvars is not None: + raise TypeError( + "Cannot inherit from Generic[...] or" + " Protocol[...] multiple times.") + gvars = base.__parameters__ + if gvars is None: + gvars = tvars + else: + tvarset = set(tvars) + gvarset = set(gvars) + if not tvarset <= gvarset: + raise TypeError( + "Some type variables (%s) " + "are not listed in %s[%s]" % + (", ".join(str(t) for t in tvars if t not in gvarset), + "Generic" if any(b.__origin__ is Generic + for b in bases) else "Protocol", + ", ".join(str(g) for g in gvars))) + tvars = gvars + + initial_bases = bases + if (extra is not None and type(extra) is abc.ABCMeta and + extra not in bases): + bases = (extra,) + bases + bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b + for b in bases) + if any(isinstance(b, GenericMeta) and b is not Generic for b in bases): + bases = tuple(b for b in bases if b is not Generic) + namespace.update({'__origin__': origin, '__extra__': extra}) + self = super(GenericMeta, cls).__new__(cls, name, bases, namespace, + _root=True) + super(GenericMeta, self).__setattr__('_gorg', + self if not origin else + _gorg(origin)) + self.__parameters__ = tvars + self.__args__ = tuple(... if a is _TypingEllipsis else + () if a is _TypingEmpty else + a for a in args) if args else None + self.__next_in_mro__ = _next_in_mro(self) + if orig_bases is None: + self.__orig_bases__ = initial_bases + elif origin is not None: + self._abc_registry = origin._abc_registry + self._abc_cache = origin._abc_cache + if hasattr(self, '_subs_tree'): + self.__tree_hash__ = (hash(self._subs_tree()) if origin else + super(GenericMeta, self).__hash__()) + return self + + def __init__(cls, *args, **kwargs): + super().__init__(*args, **kwargs) + if not cls.__dict__.get('_is_protocol', None): + cls._is_protocol = any(b is Protocol or + isinstance(b, _ProtocolMeta) and + b.__origin__ is Protocol + for b in cls.__bases__) + if cls._is_protocol: + for base in cls.__mro__[1:]: + if not (base in (object, Generic) or + base.__module__ == 'collections.abc' and + base.__name__ in _PROTO_WHITELIST or + isinstance(base, TypingMeta) and base._is_protocol or + isinstance(base, GenericMeta) and + base.__origin__ is Generic): + raise TypeError('Protocols can only inherit from other' + ' protocols, got %r' % base) + + cls.__init__ = _no_init + + def _proto_hook(other): + if not cls.__dict__.get('_is_protocol', None): + return NotImplemented + if not isinstance(other, type): + # Same error as for issubclass(1, int) + raise TypeError('issubclass() arg 1 must be a class') + for attr in _get_protocol_attrs(cls): + for base in other.__mro__: + if attr in base.__dict__: + if base.__dict__[attr] is None: + return NotImplemented + break + annotations = getattr(base, '__annotations__', {}) + if (isinstance(annotations, typing.Mapping) and + attr in annotations and + isinstance(other, _ProtocolMeta) and + other._is_protocol): + break + else: + return NotImplemented + return True + if '__subclasshook__' not in cls.__dict__: + cls.__subclasshook__ = _proto_hook + + def __instancecheck__(self, instance): + # We need this method for situations where attributes are + # assigned in __init__. + if ((not getattr(self, '_is_protocol', False) or + _is_callable_members_only(self)) and + issubclass(instance.__class__, self)): + return True + if self._is_protocol: + if all(hasattr(instance, attr) and + (not callable(getattr(self, attr, None)) or + getattr(instance, attr) is not None) + for attr in _get_protocol_attrs(self)): + return True + return super(GenericMeta, self).__instancecheck__(instance) + + def __subclasscheck__(self, cls): + if self.__origin__ is not None: + if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']: + raise TypeError("Parameterized generics cannot be used with class " + "or instance checks") + return False + if (self.__dict__.get('_is_protocol', None) and + not self.__dict__.get('_is_runtime_protocol', None)): + if sys._getframe(1).f_globals['__name__'] in ['abc', + 'functools', + 'typing']: + return False + raise TypeError("Instance and class checks can only be used with" + " @runtime protocols") + if (self.__dict__.get('_is_runtime_protocol', None) and + not _is_callable_members_only(self)): + if sys._getframe(1).f_globals['__name__'] in ['abc', + 'functools', + 'typing']: + return super(GenericMeta, self).__subclasscheck__(cls) + raise TypeError("Protocols with non-method members" + " don't support issubclass()") + return super(GenericMeta, self).__subclasscheck__(cls) + + if not OLD_GENERICS: + @_tp_cache + def __getitem__(self, params): + # We also need to copy this from GenericMeta.__getitem__ to get + # special treatment of "Protocol". (Comments removed for brevity.) + if not isinstance(params, tuple): + params = (params,) + if not params and _gorg(self) is not Tuple: + raise TypeError( + "Parameter list to %s[...] cannot be empty" % self.__qualname__) + msg = "Parameters to generic types must be types." + params = tuple(_type_check(p, msg) for p in params) + if self in (Generic, Protocol): + if not all(isinstance(p, TypeVar) for p in params): + raise TypeError( + "Parameters to %r[...] must all be type variables" % self) + if len(set(params)) != len(params): + raise TypeError( + "Parameters to %r[...] must all be unique" % self) + tvars = params + args = params + elif self in (Tuple, Callable): + tvars = _type_vars(params) + args = params + elif self.__origin__ in (Generic, Protocol): + raise TypeError("Cannot subscript already-subscripted %s" % + repr(self)) + else: + _check_generic(self, params) + tvars = _type_vars(params) + args = params + + prepend = (self,) if self.__origin__ is None else () + return self.__class__(self.__name__, + prepend + self.__bases__, + _no_slots_copy(self.__dict__), + tvars=tvars, + args=args, + origin=self, + extra=self.__extra__, + orig_bases=self.__orig_bases__) + + class Protocol(metaclass=_ProtocolMeta): + """Base class for protocol classes. Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing), for example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing_extensions.runtime act as simple-minded runtime protocol that checks + only the presence of given attributes, ignoring their type signatures. + + Protocol classes can be generic, they are defined as:: + + class GenProto({bases}): + def meth(self) -> T: + ... + """ + __slots__ = () + _is_protocol = True + + def __new__(cls, *args, **kwds): + if _gorg(cls) is Protocol: + raise TypeError("Type Protocol cannot be instantiated; " + "it can be used only as a base class") + if OLD_GENERICS: + return _generic_new(_next_in_mro(cls), cls, *args, **kwds) + return _generic_new(cls.__next_in_mro__, cls, *args, **kwds) + if Protocol.__doc__ is not None: + Protocol.__doc__ = Protocol.__doc__.format(bases="Protocol, Generic[T]" if + OLD_GENERICS else "Protocol[T]") + + +elif PEP_560: + from typing import _type_check, _collect_type_vars # noqa + + def _no_init(self, *args, **kwargs): + if type(self)._is_protocol: + raise TypeError('Protocols cannot be instantiated') + + class _ProtocolMeta(abc.ABCMeta): + # This metaclass is a bit unfortunate and exists only because of the lack + # of __instancehook__. + def __instancecheck__(cls, instance): + # We need this method for situations where attributes are + # assigned in __init__. + if ((not getattr(cls, '_is_protocol', False) or + _is_callable_members_only(cls)) and + issubclass(instance.__class__, cls)): + return True + if cls._is_protocol: + if all(hasattr(instance, attr) and + (not callable(getattr(cls, attr, None)) or + getattr(instance, attr) is not None) + for attr in _get_protocol_attrs(cls)): + return True + return super().__instancecheck__(instance) + + class Protocol(metaclass=_ProtocolMeta): + # There is quite a lot of overlapping code with typing.Generic. + # Unfortunately it is hard to avoid this while these live in two different + # modules. The duplicated code will be removed when Protocol is moved to typing. + """Base class for protocol classes. Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing), for example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing_extensions.runtime act as simple-minded runtime protocol that checks + only the presence of given attributes, ignoring their type signatures. + + Protocol classes can be generic, they are defined as:: + + class GenProto(Protocol[T]): + def meth(self) -> T: + ... + """ + __slots__ = () + _is_protocol = True + + def __new__(cls, *args, **kwds): + if cls is Protocol: + raise TypeError("Type Protocol cannot be instantiated; " + "it can only be used as a base class") + return super().__new__(cls) + + @_tp_cache + def __class_getitem__(cls, params): + if not isinstance(params, tuple): + params = (params,) + if not params and cls is not Tuple: + raise TypeError( + "Parameter list to {}[...] cannot be empty".format(cls.__qualname__)) + msg = "Parameters to generic types must be types." + params = tuple(_type_check(p, msg) for p in params) + if cls is Protocol: + # Generic can only be subscripted with unique type variables. + if not all(isinstance(p, TypeVar) for p in params): + i = 0 + while isinstance(params[i], TypeVar): + i += 1 + raise TypeError( + "Parameters to Protocol[...] must all be type variables." + " Parameter {} is {}".format(i + 1, params[i])) + if len(set(params)) != len(params): + raise TypeError( + "Parameters to Protocol[...] must all be unique") + else: + # Subscripting a regular Generic subclass. + _check_generic(cls, params) + return _GenericAlias(cls, params) + + def __init_subclass__(cls, *args, **kwargs): + tvars = [] + if '__orig_bases__' in cls.__dict__: + error = Generic in cls.__orig_bases__ + else: + error = Generic in cls.__bases__ + if error: + raise TypeError("Cannot inherit from plain Generic") + if '__orig_bases__' in cls.__dict__: + tvars = _collect_type_vars(cls.__orig_bases__) + # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn]. + # If found, tvars must be a subset of it. + # If not found, tvars is it. + # Also check for and reject plain Generic, + # and reject multiple Generic[...] and/or Protocol[...]. + gvars = None + for base in cls.__orig_bases__: + if (isinstance(base, _GenericAlias) and + base.__origin__ in (Generic, Protocol)): + # for error messages + the_base = 'Generic' if base.__origin__ is Generic else 'Protocol' + if gvars is not None: + raise TypeError( + "Cannot inherit from Generic[...]" + " and/or Protocol[...] multiple types.") + gvars = base.__parameters__ + if gvars is None: + gvars = tvars + else: + tvarset = set(tvars) + gvarset = set(gvars) + if not tvarset <= gvarset: + s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) + s_args = ', '.join(str(g) for g in gvars) + raise TypeError("Some type variables ({}) are" + " not listed in {}[{}]".format(s_vars, + the_base, s_args)) + tvars = gvars + cls.__parameters__ = tuple(tvars) + + # Determine if this is a protocol or a concrete subclass. + if not cls.__dict__.get('_is_protocol', None): + cls._is_protocol = any(b is Protocol for b in cls.__bases__) + + # Set (or override) the protocol subclass hook. + def _proto_hook(other): + if not cls.__dict__.get('_is_protocol', None): + return NotImplemented + if not getattr(cls, '_is_runtime_protocol', False): + if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']: + return NotImplemented + raise TypeError("Instance and class checks can only be used with" + " @runtime protocols") + if not _is_callable_members_only(cls): + if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']: + return NotImplemented + raise TypeError("Protocols with non-method members" + " don't support issubclass()") + if not isinstance(other, type): + # Same error as for issubclass(1, int) + raise TypeError('issubclass() arg 1 must be a class') + for attr in _get_protocol_attrs(cls): + for base in other.__mro__: + if attr in base.__dict__: + if base.__dict__[attr] is None: + return NotImplemented + break + annotations = getattr(base, '__annotations__', {}) + if (isinstance(annotations, typing.Mapping) and + attr in annotations and + isinstance(other, _ProtocolMeta) and + other._is_protocol): + break + else: + return NotImplemented + return True + if '__subclasshook__' not in cls.__dict__: + cls.__subclasshook__ = _proto_hook + + # We have nothing more to do for non-protocols. + if not cls._is_protocol: + return + + # Check consistency of bases. + for base in cls.__bases__: + if not (base in (object, Generic) or + base.__module__ == 'collections.abc' and + base.__name__ in _PROTO_WHITELIST or + isinstance(base, _ProtocolMeta) and base._is_protocol): + raise TypeError('Protocols can only inherit from other' + ' protocols, got %r' % base) + cls.__init__ = _no_init + + +if hasattr(typing, 'runtime_checkable'): + runtime_checkable = typing.runtime_checkable +elif HAVE_PROTOCOLS: + def runtime_checkable(cls): + """Mark a protocol class as a runtime protocol, so that it + can be used with isinstance() and issubclass(). Raise TypeError + if applied to a non-protocol class. + + This allows a simple-minded structural check very similar to the + one-offs in collections.abc such as Hashable. + """ + if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol: + raise TypeError('@runtime_checkable can be only applied to protocol classes,' + ' got %r' % cls) + cls._is_runtime_protocol = True + return cls + + +if HAVE_PROTOCOLS: + # Exists for backwards compatibility. + runtime = runtime_checkable + + +if hasattr(typing, 'SupportsIndex'): + SupportsIndex = typing.SupportsIndex +elif HAVE_PROTOCOLS: + @runtime_checkable + class SupportsIndex(Protocol): + __slots__ = () + + @abc.abstractmethod + def __index__(self) -> int: + pass + + +if sys.version_info >= (3, 9, 2): + # The standard library TypedDict in Python 3.8 does not store runtime information + # about which (if any) keys are optional. See https://bugs.python.org/issue38834 + # The standard library TypedDict in Python 3.9.0/1 does not honour the "total" + # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059 + TypedDict = typing.TypedDict +else: + def _check_fails(cls, other): + try: + if sys._getframe(1).f_globals['__name__'] not in ['abc', + 'functools', + 'typing']: + # Typed dicts are only for static structural subtyping. + raise TypeError('TypedDict does not support instance and class checks') + except (AttributeError, ValueError): + pass + return False + + def _dict_new(*args, **kwargs): + if not args: + raise TypeError('TypedDict.__new__(): not enough arguments') + _, args = args[0], args[1:] # allow the "cls" keyword be passed + return dict(*args, **kwargs) + + _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)' + + def _typeddict_new(*args, total=True, **kwargs): + if not args: + raise TypeError('TypedDict.__new__(): not enough arguments') + _, args = args[0], args[1:] # allow the "cls" keyword be passed + if args: + typename, args = args[0], args[1:] # allow the "_typename" keyword be passed + elif '_typename' in kwargs: + typename = kwargs.pop('_typename') + import warnings + warnings.warn("Passing '_typename' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + raise TypeError("TypedDict.__new__() missing 1 required positional " + "argument: '_typename'") + if args: + try: + fields, = args # allow the "_fields" keyword be passed + except ValueError: + raise TypeError('TypedDict.__new__() takes from 2 to 3 ' + 'positional arguments but {} ' + 'were given'.format(len(args) + 2)) + elif '_fields' in kwargs and len(kwargs) == 1: + fields = kwargs.pop('_fields') + import warnings + warnings.warn("Passing '_fields' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + fields = None + + if fields is None: + fields = kwargs + elif kwargs: + raise TypeError("TypedDict takes either a dict or keyword arguments," + " but not both") + + ns = {'__annotations__': dict(fields)} + try: + # Setting correct module is necessary to make typed dict classes pickleable. + ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + + return _TypedDictMeta(typename, (), ns, total=total) + + _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,' + ' /, *, total=True, **kwargs)') + + class _TypedDictMeta(type): + def __init__(cls, name, bases, ns, total=True): + # In Python 3.4 and 3.5 the __init__ method also needs to support the + # keyword arguments. + # See https://www.python.org/dev/peps/pep-0487/#implementation-details + super(_TypedDictMeta, cls).__init__(name, bases, ns) + + def __new__(cls, name, bases, ns, total=True): + # Create new typed dict class object. + # This method is called directly when TypedDict is subclassed, + # or via _typeddict_new when TypedDict is instantiated. This way + # TypedDict supports all three syntaxes described in its docstring. + # Subclasses and instances of TypedDict return actual dictionaries + # via _dict_new. + ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new + tp_dict = super(_TypedDictMeta, cls).__new__(cls, name, (dict,), ns) + + annotations = {} + own_annotations = ns.get('__annotations__', {}) + own_annotation_keys = set(own_annotations.keys()) + msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" + own_annotations = { + n: typing._type_check(tp, msg) for n, tp in own_annotations.items() + } + required_keys = set() + optional_keys = set() + + for base in bases: + annotations.update(base.__dict__.get('__annotations__', {})) + required_keys.update(base.__dict__.get('__required_keys__', ())) + optional_keys.update(base.__dict__.get('__optional_keys__', ())) + + annotations.update(own_annotations) + if total: + required_keys.update(own_annotation_keys) + else: + optional_keys.update(own_annotation_keys) + + tp_dict.__annotations__ = annotations + tp_dict.__required_keys__ = frozenset(required_keys) + tp_dict.__optional_keys__ = frozenset(optional_keys) + if not hasattr(tp_dict, '__total__'): + tp_dict.__total__ = total + return tp_dict + + __instancecheck__ = __subclasscheck__ = _check_fails + + TypedDict = _TypedDictMeta('TypedDict', (dict,), {}) + TypedDict.__module__ = __name__ + TypedDict.__doc__ = \ + """A simple typed name space. At runtime it is equivalent to a plain dict. + + TypedDict creates a dictionary type that expects all of its + instances to have a certain set of keys, with each key + associated with a value of a consistent type. This expectation + is not checked at runtime but is only enforced by type checkers. + Usage:: + + class Point2D(TypedDict): + x: int + y: int + label: str + + a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK + b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check + + assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') + + The type info can be accessed via the Point2D.__annotations__ dict, and + the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets. + TypedDict supports two additional equivalent forms:: + + Point2D = TypedDict('Point2D', x=int, y=int, label=str) + Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) + + The class syntax is only supported in Python 3.6+, while two other + syntax forms work for Python 2.7 and 3.2+ + """ + + +# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints) +if hasattr(typing, 'Annotated'): + Annotated = typing.Annotated + get_type_hints = typing.get_type_hints + # Not exported and not a public API, but needed for get_origin() and get_args() + # to work. + _AnnotatedAlias = typing._AnnotatedAlias +elif PEP_560: + class _AnnotatedAlias(typing._GenericAlias, _root=True): + """Runtime representation of an annotated type. + + At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' + with extra annotations. The alias behaves like a normal typing alias, + instantiating is the same as instantiating the underlying type, binding + it to types is also the same. + """ + def __init__(self, origin, metadata): + if isinstance(origin, _AnnotatedAlias): + metadata = origin.__metadata__ + metadata + origin = origin.__origin__ + super().__init__(origin, origin) + self.__metadata__ = metadata + + def copy_with(self, params): + assert len(params) == 1 + new_type = params[0] + return _AnnotatedAlias(new_type, self.__metadata__) + + def __repr__(self): + return "typing_extensions.Annotated[{}, {}]".format( + typing._type_repr(self.__origin__), + ", ".join(repr(a) for a in self.__metadata__) + ) + + def __reduce__(self): + return operator.getitem, ( + Annotated, (self.__origin__,) + self.__metadata__ + ) + + def __eq__(self, other): + if not isinstance(other, _AnnotatedAlias): + return NotImplemented + if self.__origin__ != other.__origin__: + return False + return self.__metadata__ == other.__metadata__ + + def __hash__(self): + return hash((self.__origin__, self.__metadata__)) + + class Annotated: + """Add context specific metadata to a type. + + Example: Annotated[int, runtime_check.Unsigned] indicates to the + hypothetical runtime_check module that this type is an unsigned int. + Every other consumer of this type can ignore this metadata and treat + this type as int. + + The first argument to Annotated must be a valid type (and will be in + the __origin__ field), the remaining arguments are kept as a tuple in + the __extra__ field. + + Details: + + - It's an error to call `Annotated` with less than two arguments. + - Nested Annotated are flattened:: + + Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] + + - Instantiating an annotated type is equivalent to instantiating the + underlying type:: + + Annotated[C, Ann1](5) == C(5) + + - Annotated can be used as a generic type alias:: + + Optimized = Annotated[T, runtime.Optimize()] + Optimized[int] == Annotated[int, runtime.Optimize()] + + OptimizedList = Annotated[List[T], runtime.Optimize()] + OptimizedList[int] == Annotated[List[int], runtime.Optimize()] + """ + + __slots__ = () + + def __new__(cls, *args, **kwargs): + raise TypeError("Type Annotated cannot be instantiated.") + + @_tp_cache + def __class_getitem__(cls, params): + if not isinstance(params, tuple) or len(params) < 2: + raise TypeError("Annotated[...] should be used " + "with at least two arguments (a type and an " + "annotation).") + msg = "Annotated[t, ...]: t must be a type." + origin = typing._type_check(params[0], msg) + metadata = tuple(params[1:]) + return _AnnotatedAlias(origin, metadata) + + def __init_subclass__(cls, *args, **kwargs): + raise TypeError( + "Cannot subclass {}.Annotated".format(cls.__module__) + ) + + def _strip_annotations(t): + """Strips the annotations from a given type. + """ + if isinstance(t, _AnnotatedAlias): + return _strip_annotations(t.__origin__) + if isinstance(t, typing._GenericAlias): + stripped_args = tuple(_strip_annotations(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + res = t.copy_with(stripped_args) + res._special = t._special + return res + return t + + def get_type_hints(obj, globalns=None, localns=None, include_extras=False): + """Return type hints for an object. + + This is often the same as obj.__annotations__, but it handles + forward references encoded as string literals, adds Optional[t] if a + default value equal to None is set and recursively replaces all + 'Annotated[T, ...]' with 'T' (unless 'include_extras=True'). + + The argument may be a module, class, method, or function. The annotations + are returned as a dictionary. For classes, annotations include also + inherited members. + + TypeError is raised if the argument is not of a type that can contain + annotations, and an empty dictionary is returned if no annotations are + present. + + BEWARE -- the behavior of globalns and localns is counterintuitive + (unless you are familiar with how eval() and exec() work). The + search order is locals first, then globals. + + - If no dict arguments are passed, an attempt is made to use the + globals from obj (or the respective module's globals for classes), + and these are also used as the locals. If the object does not appear + to have globals, an empty dictionary is used. + + - If one dict argument is passed, it is used for both globals and + locals. + + - If two dict arguments are passed, they specify globals and + locals, respectively. + """ + hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) + if include_extras: + return hint + return {k: _strip_annotations(t) for k, t in hint.items()} + +elif HAVE_ANNOTATED: + + def _is_dunder(name): + """Returns True if name is a __dunder_variable_name__.""" + return len(name) > 4 and name.startswith('__') and name.endswith('__') + + # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality + # checks, argument expansion etc. are done on the _subs_tre. As a result we + # can't provide a get_type_hints function that strips out annotations. + + class AnnotatedMeta(typing.GenericMeta): + """Metaclass for Annotated""" + + def __new__(cls, name, bases, namespace, **kwargs): + if any(b is not object for b in bases): + raise TypeError("Cannot subclass " + str(Annotated)) + return super().__new__(cls, name, bases, namespace, **kwargs) + + @property + def __metadata__(self): + return self._subs_tree()[2] + + def _tree_repr(self, tree): + cls, origin, metadata = tree + if not isinstance(origin, tuple): + tp_repr = typing._type_repr(origin) + else: + tp_repr = origin[0]._tree_repr(origin) + metadata_reprs = ", ".join(repr(arg) for arg in metadata) + return '%s[%s, %s]' % (cls, tp_repr, metadata_reprs) + + def _subs_tree(self, tvars=None, args=None): # noqa + if self is Annotated: + return Annotated + res = super()._subs_tree(tvars=tvars, args=args) + # Flatten nested Annotated + if isinstance(res[1], tuple) and res[1][0] is Annotated: + sub_tp = res[1][1] + sub_annot = res[1][2] + return (Annotated, sub_tp, sub_annot + res[2]) + return res + + def _get_cons(self): + """Return the class used to create instance of this type.""" + if self.__origin__ is None: + raise TypeError("Cannot get the underlying type of a " + "non-specialized Annotated type.") + tree = self._subs_tree() + while isinstance(tree, tuple) and tree[0] is Annotated: + tree = tree[1] + if isinstance(tree, tuple): + return tree[0] + else: + return tree + + @_tp_cache + def __getitem__(self, params): + if not isinstance(params, tuple): + params = (params,) + if self.__origin__ is not None: # specializing an instantiated type + return super().__getitem__(params) + elif not isinstance(params, tuple) or len(params) < 2: + raise TypeError("Annotated[...] should be instantiated " + "with at least two arguments (a type and an " + "annotation).") + else: + msg = "Annotated[t, ...]: t must be a type." + tp = typing._type_check(params[0], msg) + metadata = tuple(params[1:]) + return self.__class__( + self.__name__, + self.__bases__, + _no_slots_copy(self.__dict__), + tvars=_type_vars((tp,)), + # Metadata is a tuple so it won't be touched by _replace_args et al. + args=(tp, metadata), + origin=self, + ) + + def __call__(self, *args, **kwargs): + cons = self._get_cons() + result = cons(*args, **kwargs) + try: + result.__orig_class__ = self + except AttributeError: + pass + return result + + def __getattr__(self, attr): + # For simplicity we just don't relay all dunder names + if self.__origin__ is not None and not _is_dunder(attr): + return getattr(self._get_cons(), attr) + raise AttributeError(attr) + + def __setattr__(self, attr, value): + if _is_dunder(attr) or attr.startswith('_abc_'): + super().__setattr__(attr, value) + elif self.__origin__ is None: + raise AttributeError(attr) + else: + setattr(self._get_cons(), attr, value) + + def __instancecheck__(self, obj): + raise TypeError("Annotated cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("Annotated cannot be used with issubclass().") + + class Annotated(metaclass=AnnotatedMeta): + """Add context specific metadata to a type. + + Example: Annotated[int, runtime_check.Unsigned] indicates to the + hypothetical runtime_check module that this type is an unsigned int. + Every other consumer of this type can ignore this metadata and treat + this type as int. + + The first argument to Annotated must be a valid type, the remaining + arguments are kept as a tuple in the __metadata__ field. + + Details: + + - It's an error to call `Annotated` with less than two arguments. + - Nested Annotated are flattened:: + + Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] + + - Instantiating an annotated type is equivalent to instantiating the + underlying type:: + + Annotated[C, Ann1](5) == C(5) + + - Annotated can be used as a generic type alias:: + + Optimized = Annotated[T, runtime.Optimize()] + Optimized[int] == Annotated[int, runtime.Optimize()] + + OptimizedList = Annotated[List[T], runtime.Optimize()] + OptimizedList[int] == Annotated[List[int], runtime.Optimize()] + """ + +# Python 3.8 has get_origin() and get_args() but those implementations aren't +# Annotated-aware, so we can't use those, only Python 3.9 versions will do. +# Similarly, Python 3.9's implementation doesn't support ParamSpecArgs and +# ParamSpecKwargs. +if sys.version_info[:2] >= (3, 10): + get_origin = typing.get_origin + get_args = typing.get_args +elif PEP_560: + try: + # 3.9+ + from typing import _BaseGenericAlias + except ImportError: + _BaseGenericAlias = _GenericAlias + try: + # 3.9+ + from typing import GenericAlias + except ImportError: + GenericAlias = _GenericAlias + + def get_origin(tp): + """Get the unsubscripted version of a type. + + This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar + and Annotated. Return None for unsupported types. Examples:: + + get_origin(Literal[42]) is Literal + get_origin(int) is None + get_origin(ClassVar[int]) is ClassVar + get_origin(Generic) is Generic + get_origin(Generic[T]) is Generic + get_origin(Union[T, int]) is Union + get_origin(List[Tuple[T, T]][int]) == list + get_origin(P.args) is P + """ + if isinstance(tp, _AnnotatedAlias): + return Annotated + if isinstance(tp, (_GenericAlias, GenericAlias, _BaseGenericAlias, + ParamSpecArgs, ParamSpecKwargs)): + return tp.__origin__ + if tp is Generic: + return Generic + return None + + def get_args(tp): + """Get type arguments with all substitutions performed. + + For unions, basic simplifications used by Union constructor are performed. + Examples:: + get_args(Dict[str, int]) == (str, int) + get_args(int) == () + get_args(Union[int, Union[T, int], str][int]) == (int, str) + get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) + get_args(Callable[[], T][int]) == ([], int) + """ + if isinstance(tp, _AnnotatedAlias): + return (tp.__origin__,) + tp.__metadata__ + if isinstance(tp, (_GenericAlias, GenericAlias)): + if getattr(tp, "_special", False): + return () + res = tp.__args__ + if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis: + res = (list(res[:-1]), res[-1]) + return res + return () + + +if hasattr(typing, 'TypeAlias'): + TypeAlias = typing.TypeAlias +elif sys.version_info[:2] >= (3, 9): + class _TypeAliasForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + @_TypeAliasForm + def TypeAlias(self, parameters): + """Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example above. + """ + raise TypeError("{} is not subscriptable".format(self)) + +elif sys.version_info[:2] >= (3, 7): + class _TypeAliasForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + TypeAlias = _TypeAliasForm('TypeAlias', + doc="""Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example + above.""") + +elif hasattr(typing, '_FinalTypingBase'): + class _TypeAliasMeta(typing.TypingMeta): + """Metaclass for TypeAlias""" + + def __repr__(self): + return 'typing_extensions.TypeAlias' + + class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True): + """Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example above. + """ + __slots__ = () + + def __instancecheck__(self, obj): + raise TypeError("TypeAlias cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("TypeAlias cannot be used with issubclass().") + + def __repr__(self): + return 'typing_extensions.TypeAlias' + + TypeAlias = _TypeAliasBase(_root=True) +else: + class _TypeAliasMeta(typing.TypingMeta): + """Metaclass for TypeAlias""" + + def __instancecheck__(self, obj): + raise TypeError("TypeAlias cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("TypeAlias cannot be used with issubclass().") + + def __call__(self, *args, **kwargs): + raise TypeError("Cannot instantiate TypeAlias") + + class TypeAlias(metaclass=_TypeAliasMeta, _root=True): + """Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example above. + """ + __slots__ = () + + +# Python 3.10+ has PEP 612 +if hasattr(typing, 'ParamSpecArgs'): + ParamSpecArgs = typing.ParamSpecArgs + ParamSpecKwargs = typing.ParamSpecKwargs +else: + class _Immutable: + """Mixin to indicate that object should not be copied.""" + __slots__ = () + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + return self + + class ParamSpecArgs(_Immutable): + """The args for a ParamSpec object. + + Given a ParamSpec object P, P.args is an instance of ParamSpecArgs. + + ParamSpecArgs objects have a reference back to their ParamSpec: + + P.args.__origin__ is P + + This type is meant for runtime introspection and has no special meaning to + static type checkers. + """ + def __init__(self, origin): + self.__origin__ = origin + + def __repr__(self): + return "{}.args".format(self.__origin__.__name__) + + class ParamSpecKwargs(_Immutable): + """The kwargs for a ParamSpec object. + + Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs. + + ParamSpecKwargs objects have a reference back to their ParamSpec: + + P.kwargs.__origin__ is P + + This type is meant for runtime introspection and has no special meaning to + static type checkers. + """ + def __init__(self, origin): + self.__origin__ = origin + + def __repr__(self): + return "{}.kwargs".format(self.__origin__.__name__) + +if hasattr(typing, 'ParamSpec'): + ParamSpec = typing.ParamSpec +else: + + # Inherits from list as a workaround for Callable checks in Python < 3.9.2. + class ParamSpec(list): + """Parameter specification variable. + + Usage:: + + P = ParamSpec('P') + + Parameter specification variables exist primarily for the benefit of static + type checkers. They are used to forward the parameter types of one + callable to another callable, a pattern commonly found in higher order + functions and decorators. They are only valid when used in ``Concatenate``, + or s the first argument to ``Callable``. In Python 3.10 and higher, + they are also supported in user-defined Generics at runtime. + See class Generic for more information on generic types. An + example for annotating a decorator:: + + T = TypeVar('T') + P = ParamSpec('P') + + def add_logging(f: Callable[P, T]) -> Callable[P, T]: + '''A type-safe decorator to add logging to a function.''' + def inner(*args: P.args, **kwargs: P.kwargs) -> T: + logging.info(f'{f.__name__} was called') + return f(*args, **kwargs) + return inner + + @add_logging + def add_two(x: float, y: float) -> float: + '''Add two numbers together.''' + return x + y + + Parameter specification variables defined with covariant=True or + contravariant=True can be used to declare covariant or contravariant + generic types. These keyword arguments are valid, but their actual semantics + are yet to be decided. See PEP 612 for details. + + Parameter specification variables can be introspected. e.g.: + + P.__name__ == 'T' + P.__bound__ == None + P.__covariant__ == False + P.__contravariant__ == False + + Note that only parameter specification variables defined in global scope can + be pickled. + """ + + # Trick Generic __parameters__. + __class__ = TypeVar + + @property + def args(self): + return ParamSpecArgs(self) + + @property + def kwargs(self): + return ParamSpecKwargs(self) + + def __init__(self, name, *, bound=None, covariant=False, contravariant=False): + super().__init__([self]) + self.__name__ = name + self.__covariant__ = bool(covariant) + self.__contravariant__ = bool(contravariant) + if bound: + self.__bound__ = typing._type_check(bound, 'Bound must be a type.') + else: + self.__bound__ = None + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + + def __repr__(self): + if self.__covariant__: + prefix = '+' + elif self.__contravariant__: + prefix = '-' + else: + prefix = '~' + return prefix + self.__name__ + + def __hash__(self): + return object.__hash__(self) + + def __eq__(self, other): + return self is other + + def __reduce__(self): + return self.__name__ + + # Hack to get typing._type_check to pass. + def __call__(self, *args, **kwargs): + pass + + if not PEP_560: + # Only needed in 3.6 and lower. + def _get_type_vars(self, tvars): + if self not in tvars: + tvars.append(self) + + +# Inherits from list as a workaround for Callable checks in Python < 3.9.2. +class _ConcatenateGenericAlias(list): + + # Trick Generic into looking into this for __parameters__. + if PEP_560: + __class__ = typing._GenericAlias + elif sys.version_info[:3] == (3, 5, 2): + __class__ = typing.TypingMeta + else: + __class__ = typing._TypingBase + + # Flag in 3.8. + _special = False + # Attribute in 3.6 and earlier. + if sys.version_info[:3] == (3, 5, 2): + _gorg = typing.GenericMeta + else: + _gorg = typing.Generic + + def __init__(self, origin, args): + super().__init__(args) + self.__origin__ = origin + self.__args__ = args + + def __repr__(self): + _type_repr = typing._type_repr + return '{origin}[{args}]' \ + .format(origin=_type_repr(self.__origin__), + args=', '.join(_type_repr(arg) for arg in self.__args__)) + + def __hash__(self): + return hash((self.__origin__, self.__args__)) + + # Hack to get typing._type_check to pass in Generic. + def __call__(self, *args, **kwargs): + pass + + @property + def __parameters__(self): + return tuple(tp for tp in self.__args__ if isinstance(tp, (TypeVar, ParamSpec))) + + if not PEP_560: + # Only required in 3.6 and lower. + def _get_type_vars(self, tvars): + if self.__origin__ and self.__parameters__: + typing._get_type_vars(self.__parameters__, tvars) + + +@_tp_cache +def _concatenate_getitem(self, parameters): + if parameters == (): + raise TypeError("Cannot take a Concatenate of no types.") + if not isinstance(parameters, tuple): + parameters = (parameters,) + if not isinstance(parameters[-1], ParamSpec): + raise TypeError("The last parameter to Concatenate should be a " + "ParamSpec variable.") + msg = "Concatenate[arg, ...]: each arg must be a type." + parameters = tuple(typing._type_check(p, msg) for p in parameters) + return _ConcatenateGenericAlias(self, parameters) + + +if hasattr(typing, 'Concatenate'): + Concatenate = typing.Concatenate + _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa +elif sys.version_info[:2] >= (3, 9): + @_TypeAliasForm + def Concatenate(self, parameters): + """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a + higher order function which adds, removes or transforms parameters of a + callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """ + return _concatenate_getitem(self, parameters) + +elif sys.version_info[:2] >= (3, 7): + class _ConcatenateForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + return _concatenate_getitem(self, parameters) + + Concatenate = _ConcatenateForm( + 'Concatenate', + doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a + higher order function which adds, removes or transforms parameters of a + callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """) + +elif hasattr(typing, '_FinalTypingBase'): + class _ConcatenateAliasMeta(typing.TypingMeta): + """Metaclass for Concatenate.""" + + def __repr__(self): + return 'typing_extensions.Concatenate' + + class _ConcatenateAliasBase(typing._FinalTypingBase, + metaclass=_ConcatenateAliasMeta, + _root=True): + """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a + higher order function which adds, removes or transforms parameters of a + callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """ + __slots__ = () + + def __instancecheck__(self, obj): + raise TypeError("Concatenate cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("Concatenate cannot be used with issubclass().") + + def __repr__(self): + return 'typing_extensions.Concatenate' + + def __getitem__(self, parameters): + return _concatenate_getitem(self, parameters) + + Concatenate = _ConcatenateAliasBase(_root=True) +# For 3.5.0 - 3.5.2 +else: + class _ConcatenateAliasMeta(typing.TypingMeta): + """Metaclass for Concatenate.""" + + def __instancecheck__(self, obj): + raise TypeError("TypeAlias cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("TypeAlias cannot be used with issubclass().") + + def __call__(self, *args, **kwargs): + raise TypeError("Cannot instantiate TypeAlias") + + def __getitem__(self, parameters): + return _concatenate_getitem(self, parameters) + + class Concatenate(metaclass=_ConcatenateAliasMeta, _root=True): + """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a + higher order function which adds, removes or transforms parameters of a + callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """ + __slots__ = () + +if hasattr(typing, 'TypeGuard'): + TypeGuard = typing.TypeGuard +elif sys.version_info[:2] >= (3, 9): + class _TypeGuardForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + @_TypeGuardForm + def TypeGuard(self, parameters): + """Special typing form used to annotate the return type of a user-defined + type guard function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeGuard[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeGuard`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the type inside ``TypeGuard``. + + For example:: + + def is_str(val: Union[str, float]): + # "isinstance" type guard + if isinstance(val, str): + # Type of ``val`` is narrowed to ``str`` + ... + else: + # Else, type of ``val`` is narrowed to ``float``. + ... + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``List[object]`` to ``List[str]`` even though the latter is not + a subtype of the former, since ``List`` is invariant. The responsibility of + writing type-safe type guards is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """ + item = typing._type_check(parameters, '{} accepts only single type.'.format(self)) + return _GenericAlias(self, (item,)) + +elif sys.version_info[:2] >= (3, 7): + class _TypeGuardForm(typing._SpecialForm, _root=True): + + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + item = typing._type_check(parameters, + '{} accepts only a single type'.format(self._name)) + return _GenericAlias(self, (item,)) + + TypeGuard = _TypeGuardForm( + 'TypeGuard', + doc="""Special typing form used to annotate the return type of a user-defined + type guard function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeGuard[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeGuard`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the type inside ``TypeGuard``. + + For example:: + + def is_str(val: Union[str, float]): + # "isinstance" type guard + if isinstance(val, str): + # Type of ``val`` is narrowed to ``str`` + ... + else: + # Else, type of ``val`` is narrowed to ``float``. + ... + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``List[object]`` to ``List[str]`` even though the latter is not + a subtype of the former, since ``List`` is invariant. The responsibility of + writing type-safe type guards is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """) +elif hasattr(typing, '_FinalTypingBase'): + class _TypeGuard(typing._FinalTypingBase, _root=True): + """Special typing form used to annotate the return type of a user-defined + type guard function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeGuard[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeGuard`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the type inside ``TypeGuard``. + + For example:: + + def is_str(val: Union[str, float]): + # "isinstance" type guard + if isinstance(val, str): + # Type of ``val`` is narrowed to ``str`` + ... + else: + # Else, type of ``val`` is narrowed to ``float``. + ... + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``List[object]`` to ``List[str]`` even though the latter is not + a subtype of the former, since ``List`` is invariant. The responsibility of + writing type-safe type guards is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """ + + __slots__ = ('__type__',) + + def __init__(self, tp=None, **kwds): + self.__type__ = tp + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is None: + return cls(typing._type_check(item, + '{} accepts only a single type.'.format(cls.__name__[1:])), + _root=True) + raise TypeError('{} cannot be further subscripted' + .format(cls.__name__[1:])) + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)(new_tp, _root=True) + + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += '[{}]'.format(typing._type_repr(self.__type__)) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__type__)) + + def __eq__(self, other): + if not isinstance(other, _TypeGuard): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other + + TypeGuard = _TypeGuard(_root=True) +else: + class _TypeGuardMeta(typing.TypingMeta): + """Metaclass for TypeGuard""" + + def __new__(cls, name, bases, namespace, tp=None, _root=False): + self = super().__new__(cls, name, bases, namespace, _root=_root) + if tp is not None: + self.__type__ = tp + return self + + def __instancecheck__(self, obj): + raise TypeError("TypeGuard cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("TypeGuard cannot be used with issubclass().") + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is not None: + raise TypeError('{} cannot be further subscripted' + .format(cls.__name__[1:])) + + param = typing._type_check( + item, + '{} accepts only single type.'.format(cls.__name__[1:])) + return cls(self.__name__, self.__bases__, + dict(self.__dict__), tp=param, _root=True) + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)(self.__name__, self.__bases__, + dict(self.__dict__), tp=self.__type__, + _root=True) + + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += '[{}]'.format(typing._type_repr(self.__type__)) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__type__)) + + def __eq__(self, other): + if not hasattr(other, "__type__"): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other + + class TypeGuard(typing.Final, metaclass=_TypeGuardMeta, _root=True): + """Special typing form used to annotate the return type of a user-defined + type guard function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeGuard[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeGuard`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the type inside ``TypeGuard``. + + For example:: + + def is_str(val: Union[str, float]): + # "isinstance" type guard + if isinstance(val, str): + # Type of ``val`` is narrowed to ``str`` + ... + else: + # Else, type of ``val`` is narrowed to ``float``. + ... + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``List[object]`` to ``List[str]`` even though the latter is not + a subtype of the former, since ``List`` is invariant. The responsibility of + writing type-safe type guards is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """ + __type__ = None diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/error.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/error.py new file mode 100644 index 0000000000000000000000000000000000000000..325be5c2e4395d0898fc7d9b3b43acf011ec48b7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/error.py @@ -0,0 +1,34 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + + +class NotSupportedError(Exception): + pass + + +class EmptyTableNameError(Exception): + """ + Exception raised when a table writer class of the |table_name| attribute + is null and the class is not accepted null |table_name|. + """ + + +class EmptyValueError(Exception): + """ + Exception raised when a table writer class of the |value_matrix| attribute + is null, and the class is not accepted null |value_matrix|. + """ + + +class EmptyTableDataError(Exception): + """ + Exception raised when a table writer class of the |headers| and + |value_matrix| attributes are null. + """ + + +class WriterNotFoundError(Exception): + """ + Exception raised when appropriate loader writer found. + """ diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/py.typed b/llmeval-env/lib/python3.10/site-packages/pytablewriter/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__init__.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..77203ea9ead92a316d96114872ae681f5c8e3a73 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__init__.py @@ -0,0 +1,21 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from ._elasticsearch import ElasticsearchIndexNameSanitizer +from ._excel import sanitize_excel_sheet_name, validate_excel_sheet_name +from ._javascript import JavaScriptVarNameSanitizer, sanitize_js_var_name, validate_js_var_name +from ._python import PythonVarNameSanitizer, sanitize_python_var_name, validate_python_var_name + + +__all__ = ( + "ElasticsearchIndexNameSanitizer", + "JavaScriptVarNameSanitizer", + "PythonVarNameSanitizer", + "sanitize_excel_sheet_name", + "sanitize_js_var_name", + "sanitize_python_var_name", + "validate_excel_sheet_name", + "validate_js_var_name", + "validate_python_var_name", +) diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b9a20a10683e3da416680c92cfda71f2db2b601 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5720a5d9c43be654521db93e39cf7f7f5e980ca4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_elasticsearch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_elasticsearch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02975cba17753c6c9e7f0efeaec1a923369c9fac Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_elasticsearch.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_excel.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_excel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c26d7b75e9f317a48c340fbf334f8a1058588cce Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_excel.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_interface.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aaf80fd78e1a6cd293878ccffd35f06d0358e439 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_interface.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_javascript.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_javascript.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a4bf9386d0943e5e51dbadfe3634ffb8d28b7ed Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_javascript.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_python.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_python.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..676814e9546570894595d409c3133c0ca3dbe56c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/__pycache__/_python.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_base.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..459df620e1e0c006d04f3bbe4acd44f12907b2de --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_base.py @@ -0,0 +1,91 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import abc +import re +from typing import Pattern + +from pathvalidate.error import ErrorReason, ValidationError +from typepy import is_null_string + +from ._interface import NameSanitizer + + +def _preprocess(name: str) -> str: + return name.strip() + + +class VarNameSanitizer(NameSanitizer): + @abc.abstractproperty + def _invalid_var_name_head_re(self) -> Pattern[str]: # pragma: no cover + pass + + @abc.abstractproperty + def _invalid_var_name_re(self) -> Pattern[str]: # pragma: no cover + pass + + def validate(self) -> None: + self._validate(self._value) + + def sanitize(self, replacement_text: str = "") -> str: + var_name = self._invalid_var_name_re.sub(replacement_text, self._str) + + # delete invalid char(s) in the beginning of the variable name + is_require_remove_head = any( + [ + is_null_string(replacement_text), + self._invalid_var_name_head_re.search(replacement_text) is not None, + ] + ) + + if is_require_remove_head: + var_name = self._invalid_var_name_head_re.sub("", var_name) + else: + match = self._invalid_var_name_head_re.search(var_name) + if match is not None: + var_name = match.end() * replacement_text + self._invalid_var_name_head_re.sub( + "", var_name + ) + + if not var_name: + return "" + + try: + self._validate(var_name) + except ValidationError as e: + if e.reason == ErrorReason.RESERVED_NAME and e.reusable_name is False: + var_name += "_" + + return var_name + + def _validate(self, value: str) -> None: + self._validate_null_string(value) + + unicode_var_name = _preprocess(value) + + if self._is_reserved_keyword(unicode_var_name): + raise ValidationError( + description=f"{unicode_var_name:s} is a reserved keyword by python", + reason=ErrorReason.RESERVED_NAME, + reusable_name=False, + reserved_name=unicode_var_name, + ) + + match = self._invalid_var_name_re.search(unicode_var_name) + if match is not None: + raise ValidationError( + description="invalid char found in the variable name: '{}'".format( + re.escape(match.group()) + ), + reason=ErrorReason.INVALID_CHARACTER, + ) + + match = self._invalid_var_name_head_re.search(unicode_var_name) + if match is not None: + raise ValidationError( + description="the first character of the variable name is invalid: '{}'".format( + re.escape(match.group()) + ), + reason=ErrorReason.INVALID_CHARACTER, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_elasticsearch.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_elasticsearch.py new file mode 100644 index 0000000000000000000000000000000000000000..d722e317d618cc4b9266b7813fdc79daeaaba954 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_elasticsearch.py @@ -0,0 +1,27 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import re +from typing import ClassVar, List, Pattern + +from ._base import VarNameSanitizer + + +class ElasticsearchIndexNameSanitizer(VarNameSanitizer): + __RE_INVALID_INDEX_NAME: ClassVar[Pattern[str]] = re.compile( + "[" + re.escape('\\/*?"<>|,"') + r"\s]+" + ) + __RE_INVALID_INDEX_NAME_HEAD: ClassVar[Pattern[str]] = re.compile("^[_]+") + + @property + def reserved_keywords(self) -> List[str]: + return [] + + @property + def _invalid_var_name_head_re(self) -> Pattern[str]: + return self.__RE_INVALID_INDEX_NAME_HEAD + + @property + def _invalid_var_name_re(self) -> Pattern[str]: + return self.__RE_INVALID_INDEX_NAME diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_excel.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_excel.py new file mode 100644 index 0000000000000000000000000000000000000000..cf5e096c9a77bbda8305f214d81667b92ede8a44 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_excel.py @@ -0,0 +1,75 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import re + +from pathvalidate import validate_pathtype +from pathvalidate.error import ErrorReason, ValidationError + +from ._base import _preprocess + + +__MAX_SHEET_NAME_LEN = 31 + +__INVALID_EXCEL_CHARS = "[]:*?/\\" + +__RE_INVALID_EXCEL_SHEET_NAME = re.compile(f"[{re.escape(__INVALID_EXCEL_CHARS):s}]", re.UNICODE) + + +def validate_excel_sheet_name(sheet_name: str) -> None: + """ + :param str sheet_name: Excel sheet name to validate. + :raises pathvalidate.ValidationError (ErrorReason.INVALID_CHARACTER): + If the ``sheet_name`` includes invalid char(s): + |invalid_excel_sheet_chars|. + :raises pathvalidate.ValidationError (ErrorReason.INVALID_LENGTH): + If the ``sheet_name`` is longer than 31 characters. + """ + + validate_pathtype(sheet_name) + + if len(sheet_name) > __MAX_SHEET_NAME_LEN: + raise ValidationError( + description="sheet name is too long: expected<={:d}, actual={:d}".format( + __MAX_SHEET_NAME_LEN, len(sheet_name) + ), + reason=ErrorReason.INVALID_LENGTH, + ) + + unicode_sheet_name = _preprocess(sheet_name) + match = __RE_INVALID_EXCEL_SHEET_NAME.search(unicode_sheet_name) + if match is not None: + raise ValidationError( + description="invalid char found in the sheet name: '{:s}'".format( + re.escape(match.group()) + ), + reason=ErrorReason.INVALID_CHARACTER, + ) + + +def sanitize_excel_sheet_name(sheet_name: str, replacement_text: str = "") -> str: + """ + Replace invalid characters for an Excel sheet name within + the ``sheet_name`` with the ``replacement_text``. + Invalid characters are as follows: + |invalid_excel_sheet_chars|. + The ``sheet_name`` truncate to 31 characters + (max sheet name length of Excel) from the head, if the length + of the name is exceed 31 characters. + + :param str sheet_name: Excel sheet name to sanitize. + :param str replacement_text: Replacement text. + :return: A replacement string. + :rtype: str + :raises ValueError: If the ``sheet_name`` is an invalid sheet name. + """ + + try: + unicode_sheet_name = _preprocess(sheet_name) + except AttributeError as e: + raise ValueError(e) + + modify_sheet_name = __RE_INVALID_EXCEL_SHEET_NAME.sub(replacement_text, unicode_sheet_name) + + return modify_sheet_name[:__MAX_SHEET_NAME_LEN] diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_interface.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..fefb5612f12e10e0014dedc6489b5549e93e7e1c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_interface.py @@ -0,0 +1,38 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import abc +from typing import List + +from pathvalidate import validate_pathtype + + +class NameSanitizer(metaclass=abc.ABCMeta): + @abc.abstractproperty + def reserved_keywords(self) -> List[str]: # pragma: no cover + pass + + @abc.abstractmethod + def validate(self) -> None: # pragma: no cover + pass + + @abc.abstractmethod + def sanitize(self, replacement_text: str = "") -> str: # pragma: no cover + pass + + @property + def _str(self) -> str: + return str(self._value) + + def __init__(self, value: str) -> None: + self._validate_null_string(value) + + self._value = value.strip() + + def _is_reserved_keyword(self, value: str) -> bool: + return value in self.reserved_keywords + + @staticmethod + def _validate_null_string(text: str) -> None: + validate_pathtype(text) diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_javascript.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_javascript.py new file mode 100644 index 0000000000000000000000000000000000000000..8fe67a65b337421d2b406421c6def39d44ee9b2d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_javascript.py @@ -0,0 +1,143 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import re +from typing import List, Pattern + +from ._base import VarNameSanitizer + + +class JavaScriptVarNameSanitizer(VarNameSanitizer): + __JS_RESERVED_KEYWORDS_ES6 = [ + "break", + "case", + "catch", + "class", + "const", + "continue", + "debugger", + "default", + "delete", + "do", + "else", + "export", + "extends", + "finally", + "for", + "function", + "if", + "import", + "in", + "instanceof", + "new", + "return", + "super", + "switch", + "this", + "throw", + "try", + "typeof", + "var", + "void", + "while", + "with", + "yield", + ] + __JS_RESERVED_KEYWORDS_FUTURE = [ + "enum", + "implements", + "interface", + "let", + "package", + "private", + "protected", + "public", + "static", + "await", + "abstract", + "boolean", + "byte", + "char", + "double", + "final", + "float", + "goto", + "int", + "long", + "native", + "short", + "synchronized", + "throws", + "transient", + "volatile", + ] + __JS_BUILTIN_CONSTANTS = ["null", "true", "false"] + + __RE_INVALID_VAR_NAME = re.compile("[^a-zA-Z0-9_$]") + __RE_INVALID_VAR_NAME_HEAD = re.compile("^[^a-zA-Z$]+") + + @property + def reserved_keywords(self) -> List[str]: + return ( + self.__JS_RESERVED_KEYWORDS_ES6 + + self.__JS_RESERVED_KEYWORDS_FUTURE + + self.__JS_BUILTIN_CONSTANTS + ) + + @property + def _invalid_var_name_head_re(self) -> Pattern[str]: + return self.__RE_INVALID_VAR_NAME_HEAD + + @property + def _invalid_var_name_re(self) -> Pattern[str]: + return self.__RE_INVALID_VAR_NAME + + +def validate_js_var_name(var_name: str) -> None: + """ + :param str var_name: Name to validate. + :raises pathvalidate.ValidationError (ErrorReason.INVALID_CHARACTER): + If the ``var_name`` is invalid as a JavaScript identifier. + :raises pathvalidate.ValidationError (ErrorReason.RESERVED_NAME): + If the ``var_name`` is equals to + `JavaScript reserved keywords + `__. + + .. note:: + + Currently, not supported unicode variable names. + """ + + JavaScriptVarNameSanitizer(var_name).validate() + + +def sanitize_js_var_name(var_name: str, replacement_text: str = "") -> str: + """ + Make a valid JavaScript variable name from ``var_name``. + + To make a valid name: + + - Replace invalid characters for a JavaScript variable name within + the ``var_name`` with the ``replacement_text`` + - Delete invalid chars for the beginning of the variable name + - Append underscore (``"_"``) at the tail of the name if sanitized name + is one of the JavaScript reserved names + + :JavaScriptstr filename: Name to sanitize. + :param str replacement_text: Replacement text. + :return: A replacement string. + :rtype: str + :raises ValueError: If ``var_name`` or ``replacement_text`` is invalid. + + :Example: + :ref:`example-sanitize-var-name` + + .. note:: + Currently, not supported Unicode variable names. + + .. seealso:: + :py:func:`.validate_js_var_name` + """ + + return JavaScriptVarNameSanitizer(var_name).sanitize(replacement_text) diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_python.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_python.py new file mode 100644 index 0000000000000000000000000000000000000000..956567caf0230b611cfe1a659fff11486c8048e7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/sanitizer/_python.py @@ -0,0 +1,117 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import re +from typing import List, Pattern + +from ._base import VarNameSanitizer + + +class PythonVarNameSanitizer(VarNameSanitizer): + __PYTHON_RESERVED_KEYWORDS = [ + "and", + "del", + "from", + "not", + "while", + "as", + "elif", + "global", + "or", + "with", + "assert", + "else", + "if", + "pass", + "yield", + "break", + "except", + "import", + "print", + "class", + "exec", + "in", + "raise", + "continue", + "finally", + "is", + "return", + "def", + "for", + "lambda", + "try", + ] + __PYTHON_BUILTIN_CONSTANTS = [ + "False", + "True", + "None", + "NotImplemented", + "Ellipsis", + "__debug__", + ] + + __RE_INVALID_VAR_NAME = re.compile("[^a-zA-Z0-9_]") + __RE_INVALID_VAR_NAME_HEAD = re.compile("^[^a-zA-Z]+") + + @property + def reserved_keywords(self) -> List[str]: + return self.__PYTHON_RESERVED_KEYWORDS + self.__PYTHON_BUILTIN_CONSTANTS + + @property + def _invalid_var_name_head_re(self) -> Pattern[str]: + return self.__RE_INVALID_VAR_NAME_HEAD + + @property + def _invalid_var_name_re(self) -> Pattern[str]: + return self.__RE_INVALID_VAR_NAME + + +def validate_python_var_name(var_name: str) -> None: + """ + :param str var_name: Name to validate. + :raises pathvalidate.ValidationError (ErrorReason.INVALID_CHARACTER): + If the ``var_name`` is invalid as + `Python identifier + `__. + :raises pathvalidate.ValidationError (ErrorReason.RESERVED_NAME): + If the ``var_name`` is equals to + `Python reserved keywords + `__ + or + `Python built-in constants + `__. + + :Example: + :ref:`example-validate-var-name` + """ + + PythonVarNameSanitizer(var_name).validate() + + +def sanitize_python_var_name(var_name: str, replacement_text: str = "") -> str: + """ + Make a valid Python variable name from ``var_name``. + + To make a valid name: + + - Replace invalid characters for a Python variable name within + the ``var_name`` with the ``replacement_text`` + - Delete invalid chars for the beginning of the variable name + - Append underscore (``"_"``) at the tail of the name if sanitized name + is one of the Python reserved names + + :param str filename: Name to sanitize. + :param str replacement_text: Replacement text. + :return: A replacement string. + :rtype: str + :raises ValueError: If ``var_name`` or ``replacement_text`` is invalid. + + :Example: + :ref:`example-sanitize-var-name` + + .. seealso:: + :py:func:`.validate_python_var_name` + """ + + return PythonVarNameSanitizer(var_name).sanitize(replacement_text) diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/writer/_msgfy.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/writer/_msgfy.py new file mode 100644 index 0000000000000000000000000000000000000000..eb77f6a2ee55b1955dd25c89832f1472f9a61704 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/writer/_msgfy.py @@ -0,0 +1,56 @@ +""" +Import from https://github.com/thombashi/msgfy +""" + +import inspect +import os.path +from types import FrameType +from typing import Optional + + +DEFAULT_ERROR_MESSAGE_FORMAT = "{exception}: {error_msg}" +DEFAULT_DEBUG_MESSAGE_FORMAT = "{exception} {file_name}({line_no}) {func_name}: {error_msg}" + +error_message_format = DEFAULT_ERROR_MESSAGE_FORMAT +debug_message_format = DEFAULT_DEBUG_MESSAGE_FORMAT + + +def _to_message(exception_obj: Exception, format_str: str, frame: Optional[FrameType]) -> str: + if not isinstance(exception_obj, Exception): + raise ValueError("exception_obj must be an instance of a subclass of the Exception class") + + if frame is None: + return str(exception_obj) + + try: + return ( + format_str.replace("{exception}", exception_obj.__class__.__name__) + .replace("{file_name}", os.path.basename(frame.f_code.co_filename)) + .replace("{line_no}", str(frame.f_lineno)) + .replace("{func_name}", frame.f_code.co_name) + .replace("{error_msg}", str(exception_obj)) + ) + except AttributeError: + raise ValueError("format_str must be a string") + + +def to_error_message(exception_obj: Exception, format_str: Optional[str] = None) -> str: + if not format_str: + format_str = error_message_format + + frame = inspect.currentframe() + if frame is None: + return str(exception_obj) + + return _to_message(exception_obj, format_str, frame.f_back) + + +def to_debug_message(exception_obj: Exception, format_str: Optional[str] = None) -> str: + if not format_str: + format_str = debug_message_format + + frame = inspect.currentframe() + if frame is None: + return str(exception_obj) + + return _to_message(exception_obj, format_str, frame.f_back) diff --git a/llmeval-env/lib/python3.10/site-packages/pytablewriter/writer/_table_writer.py b/llmeval-env/lib/python3.10/site-packages/pytablewriter/writer/_table_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..7c6ed20b44f03f9a23cdac933e21f90b317699af --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pytablewriter/writer/_table_writer.py @@ -0,0 +1,1245 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import abc +import copy +import math +import warnings +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence, Union, cast + +import typepy +from dataproperty import ( + Align, + ColumnDataProperty, + DataProperty, + DataPropertyExtractor, + Format, + MatrixFormatting, + Preprocessor, +) +from dataproperty.typing import TransFunc +from tabledata import TableData, convert_idx_to_alphabet, to_value_matrix +from typepy import Typecode, extract_typepy_from_dtype + +from .._logger import WriterLogger +from ..error import EmptyTableDataError, EmptyTableNameError, EmptyValueError, NotSupportedError +from ..style import ( + Cell, + CheckStyleFilterKeywordArgsFunc, + ColSeparatorStyleFilterFunc, + Style, + StyleFilterFunc, + StylerInterface, + ThousandSeparator, + fetch_theme, +) +from ..typehint import Integer, TypeHint +from ._common import HEADER_ROW +from ._interface import TableWriterInterface +from ._msgfy import to_error_message + + +if TYPE_CHECKING: + import pandas + import tablib + + from .._table_format import TableFormat + +_ts_to_flag: Dict[ThousandSeparator, int] = { + ThousandSeparator.NONE: Format.NONE, + ThousandSeparator.COMMA: Format.THOUSAND_SEPARATOR, + ThousandSeparator.SPACE: Format.THOUSAND_SEPARATOR, + ThousandSeparator.UNDERSCORE: Format.THOUSAND_SEPARATOR, +} + + +def header_style_filter(cell: Cell, **kwargs: Any) -> Optional[Style]: + if cell.is_header_row(): + return Style(align=Align.CENTER) + + return None + + +DEFAULT_STYLE_FILTERS: List[StyleFilterFunc] = [header_style_filter] + + +class AbstractTableWriter(TableWriterInterface, metaclass=abc.ABCMeta): + """ + An abstract base class of table writer classes. + + Args: + max_precision (int): Maximum decimal places for real number values. + + dequote (bool): If |True|, dequote values in :py:attr:`~.value_matrix`. + + .. py:attribute:: stream + + Stream to write tables. + You can use arbitrary streams which support ``write``methods + such as ``sys.stdout``, file stream, ``StringIO``, and so forth. + Defaults to ``sys.stdout``. + + :Example: + :ref:`example-configure-stream` + + .. py:attribute:: is_write_header + :type: bool + + Write headers of a table if the value is |True|. + + .. py:attribute:: is_padding + :type: bool + + Padding for each item in the table if the value is |True|. + + .. py:attribute:: iteration_length + :type: int + + The number of iterations to write a table. + This value is used in :py:meth:`.write_table_iter` method. + (defaults to ``-1``, which means the number of iterations is indefinite) + + .. py:attribute:: style_filter_kwargs + :type: Dict[str, Any] + + Extra keyword arguments for style filter functions. + These arguments will pass to filter functions added by + :py:meth:`.add_style_filter` or :py:meth:`.add_col_separator_style_filter` + + .. py:attribute:: colorize_terminal + :type: bool + :value: True + + [Only available for text format writers] [experimental] + If |True|, colorize text outputs with |Style|. + + .. py:attribute:: enable_ansi_escape + :type: bool + :value: True + + [Only available for text format writers] + If |True|, applies ANSI escape sequences to the terminal's text outputs with |Style|. + + .. py:attribute:: write_callback + + The value expected to a function. + The function is called when each of the iterations of writing a table + completed. (defaults to |None|) + For example, a callback function definition is as follows: + + .. code:: python + + def callback_example(iter_count: int, iter_length: int) -> None: + print("{:d}/{:d}".format(iter_count, iter_length)) + + Arguments that passed to the callback are: + + - first argument: current iteration number (start from ``1``) + - second argument: a total number of iteration + """ + + @property + def margin(self) -> int: + raise NotImplementedError() + + @margin.setter + def margin(self, value: int) -> None: + raise NotImplementedError() + + @property + def value_matrix(self) -> Sequence: + """Data of a table to be outputted.""" + + return self.__value_matrix_org + + @value_matrix.setter + def value_matrix(self, value_matrix: Sequence) -> None: + self.__set_value_matrix(value_matrix) + self.__clear_preprocess() + + @property + def table_format(self) -> "TableFormat": + """TableFormat: Get the format of the writer.""" + + from .._table_format import TableFormat + + table_format = TableFormat.from_name(self.format_name) + assert table_format + + return table_format + + @property + def stream(self) -> Any: + return self._stream + + @stream.setter + def stream(self, value: Any) -> None: + self._stream = value + + @abc.abstractmethod + def _write_table(self, **kwargs: Any) -> None: + pass + + def __init__(self, **kwargs: Any) -> None: + self._logger = WriterLogger(self) + + self.table_name = kwargs.get("table_name", "") + self.value_matrix = kwargs.get("value_matrix", []) + + self.is_write_header = kwargs.get("is_write_header", True) + self.is_write_header_separator_row = kwargs.get("is_write_header_separator_row", True) + self.is_write_value_separator_row = kwargs.get("is_write_value_separator_row", False) + self.is_write_opening_row = kwargs.get("is_write_opening_row", False) + self.is_write_closing_row = kwargs.get("is_write_closing_row", False) + + self._use_default_header = False + + self._dp_extractor = DataPropertyExtractor(max_precision=kwargs.get("max_precision")) + self._dp_extractor.min_column_width = 1 + self._dp_extractor.strip_str_header = '"' + self._dp_extractor.preprocessor = Preprocessor(dequote=kwargs.get("dequote", True)) + self._dp_extractor.type_value_map[Typecode.NONE] = "" + self._dp_extractor.matrix_formatting = MatrixFormatting.HEADER_ALIGNED + self._dp_extractor.update_strict_level_map({Typecode.BOOL: 1}) + + self.is_formatting_float = kwargs.get("is_formatting_float", True) + self.is_padding = kwargs.get("is_padding", True) + + self.headers = kwargs.get("headers", []) + self.type_hints = kwargs.get("type_hints", []) + self._quoting_flags = { + Typecode.BOOL: False, + Typecode.DATETIME: True, + Typecode.DICTIONARY: False, + Typecode.INFINITY: False, + Typecode.INTEGER: False, + Typecode.IP_ADDRESS: True, + Typecode.LIST: False, + Typecode.NAN: False, + Typecode.NONE: False, + Typecode.NULL_STRING: True, + Typecode.REAL_NUMBER: False, + Typecode.STRING: True, + } + + self._is_require_table_name = False + self._is_require_header = False + + self.iteration_length: int = kwargs.get("iteration_length", -1) + self.write_callback = kwargs.get( + "write_callback", lambda _iter_count, _iter_length: None # defaults to NOP callback + ) + self._iter_count: Optional[int] = None + + self.__default_style: Style + self.default_style = kwargs.get("default_style", Style()) + + self.__col_style_list: List[Optional[Style]] = [] + self.column_styles = kwargs.get("column_styles", []) + + self._style_filters: List[StyleFilterFunc] = copy.deepcopy(DEFAULT_STYLE_FILTERS) + self._enable_style_filter = True + self._styler = self._create_styler(self) + self.style_filter_kwargs: Dict[str, Any] = kwargs.get("style_filter_kwargs", {}) + self._check_style_filter_kwargs_funcs: List[CheckStyleFilterKeywordArgsFunc] = [] + self.__colorize_terminal = kwargs.get("colorize_terminal", True) + self.__enable_ansi_escape = kwargs.get("enable_ansi_escape", True) + + self.max_workers = kwargs.get("max_workers", 1) + + if "dataframe" in kwargs: + self.from_dataframe(kwargs["dataframe"]) + + self.__clear_preprocess() + + def _repr_html_(self) -> str: + from .text._html import HtmlTableWriter + + writer = HtmlTableWriter( + table_name=self.table_name, + headers=self.headers, + value_matrix=self.value_matrix, + column_styles=self.column_styles, + colorize_terminal=self.colorize_terminal, + enable_ansi_escape=self.enable_ansi_escape, + ) + writer._dp_extractor = self._dp_extractor + + return writer.dumps() + + @property + def value_preprocessor(self) -> Preprocessor: + return self._dp_extractor.preprocessor + + def __clear_preprocess_status(self) -> None: + self._is_complete_table_dp_preprocess = False + self._is_complete_table_property_preprocess = False + self._is_complete_header_preprocess = False + self._is_complete_value_matrix_preprocess = False + + def __clear_preprocess_data(self) -> None: + self._column_dp_list: List[ColumnDataProperty] = [] + self._table_headers: List[str] = [] + self._table_value_matrix: List[Union[List[str], Dict]] = [] + self._table_value_dp_matrix: Sequence[Sequence[DataProperty]] = [] + + @property + def headers(self) -> Sequence[str]: + """Sequence[str]: Headers of a table to be outputted.""" + + return self._dp_extractor.headers + + @headers.setter + def headers(self, value: Sequence[str]) -> None: + self._dp_extractor.headers = value + + @property + def is_formatting_float(self) -> bool: + return self._dp_extractor.is_formatting_float + + @is_formatting_float.setter + def is_formatting_float(self, value: bool) -> None: + if self._dp_extractor.is_formatting_float == value: + return + + self._dp_extractor.is_formatting_float = value + self.__clear_preprocess() + + @property + def max_workers(self) -> int: + return self._dp_extractor.max_workers + + @max_workers.setter + def max_workers(self, value: Optional[int]) -> None: + self._dp_extractor.max_workers = value + + @property + def tabledata(self) -> TableData: + """tabledata.TableData: Get tabular data of the writer.""" + + return TableData( + self.table_name, + self.headers, + self.value_matrix, + max_workers=self.max_workers, + max_precision=self._dp_extractor.max_precision, + ) + + @property + def table_name(self) -> str: + """str: Name of a table.""" + + return self._table_name + + @table_name.setter + def table_name(self, value: str) -> None: + self._table_name = value + + @property + def type_hints(self) -> List[TypeHint]: + """ + Type hints for each column of the tabular data. + Writers convert data for each column using the type hints information + before writing tables when you call ``write_xxx`` methods. + + Acceptable values are as follows: + + - |None| (automatically detect column type from values in the column) + - :py:class:`pytablewriter.typehint.Bool` or ``"bool"`` + - :py:class:`pytablewriter.typehint.DateTime` or ``"datetime"`` + - :py:class:`pytablewriter.typehint.Dictionary` or ``"dict"`` + - :py:class:`pytablewriter.typehint.Infinity` or ``"inf"`` + - :py:class:`pytablewriter.typehint.Integer` or ``"int"`` + - :py:class:`pytablewriter.typehint.IpAddress` or ``"ipaddr"`` + - :py:class:`pytablewriter.typehint.List` or ``"list"`` + - :py:class:`pytablewriter.typehint.Nan` or ``"nan"`` + - :py:class:`pytablewriter.typehint.NoneType` or ``"none"`` + - :py:class:`pytablewriter.typehint.NullString` or ``"nullstr"`` + - :py:class:`pytablewriter.typehint.RealNumber` or ``"realnumber"`` or ``"float"`` + - :py:class:`pytablewriter.typehint.String` or ``"str"`` + + If a type-hint value is not |None|, the writer tries to + convert data for each data in a column to type-hint class. + If the type-hint value is |None| or failed to convert data, + the writer automatically detects column data type from + the column data. + + If ``type_hints`` is |None|, the writer automatically detects data types + for all of the columns and writes a table using detected column types. + + Defaults to |None|. + + Examples: + - :ref:`example-type-hint-js` + - :ref:`example-type-hint-python` + """ + + return self._dp_extractor.column_type_hints + + @type_hints.setter + def type_hints(self, value: Sequence[Union[str, TypeHint]]) -> None: + hints = list(value) + if self.type_hints == hints: + return + + self.__set_type_hints(hints) + self.__clear_preprocess() + + @property + def default_style(self) -> Style: + """Style: Default |Style| of table cells.""" + + return self.__default_style + + @default_style.setter + def default_style(self, style: Optional[Style]) -> None: + if style is None: + style = Style() + + if not isinstance(style, Style): + raise TypeError("default_style must be a Style instance") + + try: + if self.__default_style == style: + return + except AttributeError: + # not yet initialized + pass + + self.__default_style = style + self._dp_extractor.default_format_flags = _ts_to_flag[ + self.__default_style.thousand_separator + ] + self.__clear_preprocess() + + @property + def column_styles(self) -> List[Optional[Style]]: + """List[Optional[Style]]: |Style| for each column.""" + + return self.__col_style_list + + @column_styles.setter + def column_styles(self, value: Sequence[Optional[Style]]) -> None: + if self.__col_style_list == value: + return + + self.__col_style_list = list(value) + + if self.__col_style_list: + self._dp_extractor.format_flags_list = [ + _ts_to_flag[self._get_col_style(col_idx).thousand_separator] + for col_idx in range(len(self.__col_style_list)) + ] + else: + self._dp_extractor.format_flags_list = [] + + self.__clear_preprocess() + + @property + def colorize_terminal(self) -> bool: + return self.__colorize_terminal + + @colorize_terminal.setter + def colorize_terminal(self, value: bool) -> None: + if self.__colorize_terminal == value: + return + + self.__colorize_terminal = value + self.__clear_preprocess() + + @property + def enable_ansi_escape(self) -> bool: + return self.__enable_ansi_escape + + @enable_ansi_escape.setter + def enable_ansi_escape(self, value: bool) -> None: + if self.__enable_ansi_escape == value: + return + + self.__enable_ansi_escape = value + self.__clear_preprocess() + + @property + def _quoting_flags(self) -> Dict[Typecode, bool]: + return self._dp_extractor.quoting_flags + + @_quoting_flags.setter + def _quoting_flags(self, value: Mapping[Typecode, bool]) -> None: + self._dp_extractor.quoting_flags = value + self.__clear_preprocess() + + def add_style_filter(self, style_filter: StyleFilterFunc) -> None: + """Add a style filter function to the writer. + + Args: + style_filter: + A function called for each table cell to apply a style to table cells. + The function will be required to implement the following Protocol: + + .. code-block:: python + + class StyleFilterFunc(Protocol): + def __call__(self, cell: Cell, **kwargs: Any) -> Optional[Style]: + ... + + If more than one style filter function is added to the writer, + it will be called from the last one added. + These style functions should return |None| when not needed to apply styles. + If all of the style functions returned |None|, + :py:attr:`~.default_style` will be used. + + You can pass keyword arguments to style filter functions via + :py:attr:`~.style_filter_kwargs`. In default, the attribute includes: + + - ``writer``: the writer instance that the caller of a ``style_filter function`` + """ + + self._style_filters.insert(0, style_filter) + self.__clear_preprocess() + + def add_col_separator_style_filter(self, style_filter: ColSeparatorStyleFilterFunc) -> None: + self._logger.logger.debug( + "add_col_separator_style_filter method is only implemented in text format writer classes" + ) + + def clear_theme(self) -> None: + """Remove all of the style filters.""" + + if not self._style_filters: + return + + self._style_filters = copy.deepcopy(DEFAULT_STYLE_FILTERS) + self._check_style_filter_kwargs_funcs = [] + self.__clear_preprocess() + + def enable_style_filter(self) -> None: + """Enable style filters.""" + + if self._enable_style_filter is True: + return + + self._enable_style_filter = True + self.__clear_preprocess() + + def disable_style_filter(self, clear_filters: bool = False) -> None: + """Disable style filters. + + Args: + clear_filters (bool): + If |True|, clear all of the style filters. + Defaults to |False|. + """ + + if clear_filters: + self.clear_theme() + return + + if self._enable_style_filter is False: + return + + self._enable_style_filter = False + self.__clear_preprocess() + + def set_style(self, column: Union[str, int], style: Style) -> None: + """Set |Style| for a specific column. + + Args: + column (|int| or |str|): + Column specifier. Column index or header name correlated with the column. + style (|Style|): + Style value to be set to the column. + + Raises: + ValueError: Raised when the column specifier is invalid. + """ + + column_idx = None + + while len(self.headers) > len(self.__col_style_list): + self.__col_style_list.append(None) + + if isinstance(column, int): + column_idx = column + elif isinstance(column, str): + try: + column_idx = self.headers.index(column) + except ValueError: + pass + + if column_idx is not None: + self.__col_style_list[column_idx] = style + self.__clear_preprocess() + self._dp_extractor.format_flags_list = [ + _ts_to_flag[self._get_col_style(col_idx).thousand_separator] + for col_idx in range(len(self.__col_style_list)) + ] + return + + raise ValueError(f"column must be an int or string: actual={column}") + + def set_theme(self, theme: str, **kwargs: Any) -> None: + """Set style filters for a theme. + + Args: + theme (str): + Name of the theme. pytablewriter theme plugin must be installed + corresponding to the theme name. + + Raises: + RuntimeError: Raised when a theme plugin does not install. + """ + + try: + fetched_theme = fetch_theme(theme.strip()) + except RuntimeError as e: + warnings.warn(f"{e}", UserWarning) + return + + if fetched_theme.style_filter: + self.add_style_filter(fetched_theme.style_filter) + + if fetched_theme.col_separator_style_filter: + self.add_col_separator_style_filter(fetched_theme.col_separator_style_filter) + + if fetched_theme.check_style_filter_kwargs: + self._check_style_filter_kwargs_funcs.append(fetched_theme.check_style_filter_kwargs) + + self.style_filter_kwargs.update(**kwargs) + + def __is_skip_close(self) -> bool: + try: + from _pytest.capture import EncodedFile + + if isinstance(self.stream, EncodedFile): + # avoid closing streams for pytest + return True + except ImportError: + pass + + try: + from _pytest.capture import CaptureIO + + if isinstance(self.stream, CaptureIO): + # avoid closing streams for pytest + return True + except ImportError: + try: + # for pytest 5.4.1 or older versions + from _pytest.compat import CaptureIO + + if isinstance(self.stream, CaptureIO): + # avoid closing streams for pytest + return True + except ImportError: + pass + + try: + from ipykernel.iostream import OutStream + + if isinstance(self.stream, OutStream): + # avoid closing streams for Jupyter Notebook + return True + except ImportError: + pass + + return False + + def close(self) -> None: + """ + Close the current |stream|. + """ + + if self.stream is None: + return + + try: + self.stream.isatty() + + if self.stream.name in ["", "", ""]: + return + except AttributeError: + pass + except ValueError: + # raised when executing an operation to a closed stream + pass + + if self.__is_skip_close(): + return + + try: + self.stream.close() + except AttributeError: + self._logger.logger.warning( + f"the stream has no close method implementation: type={type(self.stream)}" + ) + finally: + self._stream = None + + def from_tabledata(self, value: TableData, is_overwrite_table_name: bool = True) -> None: + """ + Set tabular attributes to the writer from |TableData|. + The following attributes are configured: + + - :py:attr:`~.table_name`. + - :py:attr:`~.headers`. + - :py:attr:`~.value_matrix`. + + |TableData| can be created from various data formats by + ``pytablereader``. More detailed information can be found in + https://pytablereader.rtfd.io/en/latest/ + + :param tabledata.TableData value: Input table data. + """ + + self.__clear_preprocess() + + if is_overwrite_table_name: + self.table_name = value.table_name if value.table_name else "" + + self.headers = value.headers + self.value_matrix = list(value.rows) + + if not value.has_value_dp_matrix: + return + + self._table_value_dp_matrix = value.value_dp_matrix + self._column_dp_list = self._dp_extractor.to_column_dp_list( + self._table_value_dp_matrix, self._column_dp_list + ) + self.__set_type_hints([col_dp.type_class for col_dp in self._column_dp_list]) + + self._is_complete_table_dp_preprocess = True + + def from_csv(self, csv_source: str, delimiter: str = ",") -> None: + """ + Set tabular attributes to the writer from a character-separated values (CSV) data source. + The following attributes are set to the writer by the method: + + - :py:attr:`~.headers`. + - :py:attr:`~.value_matrix`. + + :py:attr:`~.table_name` also be set if the CSV data source is a file. + In that case, :py:attr:`~.table_name` is as same as the filename. + + Args: + csv_source (str): + Input CSV data source can be designated CSV text or a CSV file path. + + delimiter (str): + Delimiter character of the CSV data source. + Defaults to ``,``. + + Examples: + :ref:`example-from-csv` + + :Dependency Packages: + - `pytablereader `__ + """ + + import pytablereader as ptr + + loader = ptr.CsvTableTextLoader(csv_source, quoting_flags=self._quoting_flags) + loader.delimiter = delimiter + try: + for table_data in loader.load(): + self.from_tabledata(table_data, is_overwrite_table_name=False) + return + except ptr.DataError: + pass + + loader = ptr.CsvTableFileLoader(csv_source, quoting_flags=self._quoting_flags) + loader.delimiter = delimiter + for table_data in loader.load(): + self.from_tabledata(table_data) + + def from_dataframe( + self, + dataframe: "pandas.DataFrame", + add_index_column: bool = False, + overwrite_type_hints: bool = True, + ) -> None: + """ + Set tabular attributes to the writer from :py:class:`pandas.DataFrame`. + The following attributes are set by the method: + + - :py:attr:`~.headers` + - :py:attr:`~.value_matrix` + - :py:attr:`~.type_hints` + + Args: + dataframe(pandas.DataFrame or |str|): + Input pandas.DataFrame object or path to a DataFrame pickle. + add_index_column(bool, optional): + If |True|, add a column of ``index`` of the ``dataframe``. + Defaults to |False|. + overwrite_type_hints(bool): + If |True|, Overwrite type hints with dtypes within the DataFrame. + + Example: + :ref:`example-from-pandas-dataframe` + """ + + if typepy.String(dataframe).is_type(): + import pandas as pd + + dataframe = pd.read_pickle(dataframe) + + self.headers = list(dataframe.columns.values) + + if not self.type_hints or overwrite_type_hints: + self.type_hints = [extract_typepy_from_dtype(dtype) for dtype in dataframe.dtypes] + + if add_index_column: + self.headers = [" "] + self.headers + if self.type_hints: + self.type_hints = [Integer] + self.type_hints + self.value_matrix = [ + [index] + row + for index, row in zip(dataframe.index.tolist(), dataframe.values.tolist()) + ] + else: + self.value_matrix = dataframe.values.tolist() + + def from_series(self, series: "pandas.Series", add_index_column: bool = True) -> None: + """ + Set tabular attributes to the writer from :py:class:`pandas.Series`. + The following attributes are set by the method: + + - :py:attr:`~.headers` + - :py:attr:`~.value_matrix` + - :py:attr:`~.type_hints` + + Args: + series(pandas.Series): + Input pandas.Series object. + add_index_column(bool, optional): + If |True|, add a column of ``index`` of the ``series``. + Defaults to |True|. + """ + + if series.name: + self.headers = [series.name] + else: + self.headers = ["value"] + + self.type_hints = [extract_typepy_from_dtype(series.dtype)] + + if add_index_column: + self.headers = [""] + self.headers + if self.type_hints: + self.type_hints = [None] + self.type_hints + self.value_matrix = [ + [index] + [value] for index, value in zip(series.index.tolist(), series.tolist()) + ] + else: + self.value_matrix = [[value] for value in series.tolist()] + + def from_tablib(self, tablib_dataset: "tablib.Dataset") -> None: + """ + Set tabular attributes to the writer from :py:class:`tablib.Dataset`. + """ + + self.headers = tablib_dataset.headers + self.value_matrix = [row for row in tablib_dataset] + + def from_writer( + self, writer: "AbstractTableWriter", is_overwrite_table_name: bool = True + ) -> None: + """ + Copy attributes from another table writer class instance. + + Args: + writer (pytablewriter.writer.AbstractTableWriter): + Another table writer instance. + is_overwrite_table_name (bool, optional): + Overwrite the table name of the writer with the table name of the ``writer``. + Defaults to |True|. + """ + + self.__clear_preprocess() + + if is_overwrite_table_name: + self.table_name = str(writer.table_name) + + self.headers = writer.headers + self.value_matrix = writer.value_matrix + + self.type_hints = writer.type_hints + self.column_styles = writer.column_styles + self._style_filters = writer._style_filters + self.style_filter_kwargs = writer.style_filter_kwargs + self.margin = writer.margin + + self._table_headers = writer._table_headers + self._table_value_dp_matrix = writer._table_value_dp_matrix + self._column_dp_list = writer._column_dp_list + self._table_value_matrix = writer._table_value_matrix + + self.stream = writer.stream + + self._is_complete_table_dp_preprocess = writer._is_complete_table_dp_preprocess + self._is_complete_table_property_preprocess = writer._is_complete_table_property_preprocess + self._is_complete_header_preprocess = writer._is_complete_header_preprocess + self._is_complete_value_matrix_preprocess = writer._is_complete_value_matrix_preprocess + + def register_trans_func(self, trans_func: TransFunc) -> None: + self._dp_extractor.register_trans_func(trans_func) + self.__clear_preprocess() + + def update_preprocessor(self, **kwargs: Any) -> None: + # TODO: documentation + # is_escape_formula_injection: for CSV/Excel + + if not self._dp_extractor.update_preprocessor(**kwargs): + return + + self.__clear_preprocess() + + def write_table(self, **kwargs: Any) -> None: + """ + |write_table|. + """ + + with self._logger: + try: + self._verify_property() + except EmptyTableDataError: + self._logger.logger.debug("no tabular data found") + return + + self._write_table(**kwargs) + + def _write_table_iter(self, **kwargs: Any) -> None: + if not self.support_split_write: + raise NotSupportedError("the class not supported the write_table_iter method") + + self._verify_style_filter_kwargs() + self._verify_table_name() + self._verify_stream() + + if all( + [typepy.is_empty_sequence(self.headers), typepy.is_empty_sequence(self.value_matrix)] + ): + self._logger.logger.debug("no tabular data found") + return + + self._verify_header() + + self._logger.logger.debug(f"_write_table_iter: iteration-length={self.iteration_length:d}") + + stash_is_write_header = self.is_write_header + stach_is_write_opening_row = self.is_write_opening_row + stash_is_write_closing_row = self.is_write_closing_row + + try: + self.is_write_closing_row = False + self._iter_count = 1 + + for work_matrix in self.value_matrix: + is_final_iter = all( + [self.iteration_length > 0, self._iter_count >= self.iteration_length] + ) + + if is_final_iter: + self.is_write_closing_row = True + + self.__set_value_matrix(work_matrix) + self.__clear_preprocess_status() + + with self._logger: + self._write_table(**kwargs) + + if not is_final_iter: + self._write_value_row_separator() + + self.is_write_opening_row = False + self.is_write_header = False + + self.write_callback(self._iter_count, self.iteration_length) + + # update typehint for the next iteration + """ + if self.type_hints is None: + self.__set_type_hints([ + column_dp.type_class for column_dp in self._column_dp_list + ]) + """ + + if is_final_iter: + break + + self._iter_count += 1 + finally: + self.is_write_header = stash_is_write_header + self.is_write_opening_row = stach_is_write_opening_row + self.is_write_closing_row = stash_is_write_closing_row + self._iter_count = None + + def _get_padding_len( + self, column_dp: ColumnDataProperty, value_dp: Optional[DataProperty] = None + ) -> int: + if not self.is_padding: + return 0 + + try: + return cast(DataProperty, value_dp).get_padding_len(column_dp.ascii_char_width) + except AttributeError: + return column_dp.ascii_char_width + + def _to_header_item(self, col_dp: ColumnDataProperty, value_dp: DataProperty) -> str: + style = self._fetch_style(HEADER_ROW, col_dp, value_dp) + header = self._apply_style_to_header_item(col_dp, value_dp, style) + header = self._styler.apply_terminal_style(header, style=style) + + return header + + def _apply_style_to_header_item( + self, col_dp: ColumnDataProperty, value_dp: DataProperty, style: Style + ) -> str: + return self._styler.apply_align( + self._styler.apply(col_dp.dp_to_str(value_dp), style=style), style=style + ) + + def _to_row_item(self, row_idx: int, col_dp: ColumnDataProperty, value_dp: DataProperty) -> str: + style = self._fetch_style(row_idx, col_dp, value_dp) + value = self._apply_style_to_row_item(row_idx, col_dp, value_dp, style) + + return self._styler.apply_terminal_style(value, style=style) + + def _apply_style_to_row_item( + self, row_idx: int, col_dp: ColumnDataProperty, value_dp: DataProperty, style: Style + ) -> str: + return self._styler.apply_align( + self._styler.apply(col_dp.dp_to_str(value_dp), style=style), style=style + ) + + def _fetch_style_from_filter( + self, row_idx: int, col_dp: ColumnDataProperty, value_dp: DataProperty, default_style: Style + ) -> Style: + if not self._enable_style_filter: + return default_style + + self.style_filter_kwargs.update({"writer": self}) + + style: Optional[Style] = None + for style_filter in self._style_filters: + style = style_filter( + Cell( + row=row_idx, + col=col_dp.column_index, + value=value_dp.data, + default_style=default_style, + ), + **self.style_filter_kwargs, + ) + if style: + break + + if style is None: + style = copy.deepcopy(default_style) + + if style.align is None or (style.align == Align.AUTO and row_idx >= 0): + style.align = self.__retrieve_align_from_data(col_dp, value_dp) + + if style.padding is None: + style.padding = self._get_padding_len(col_dp, value_dp) + + return style + + def _get_col_style(self, col_idx: int) -> Style: + try: + style = self.column_styles[col_idx] + except (TypeError, IndexError, KeyError): + pass + else: + if style: + return style + + return self.default_style + + def _get_align(self, col_idx: int, default_align: Align) -> Align: + align = self._get_col_style(col_idx).align + + if align is None: + return default_align + + if align == Align.AUTO: + return default_align + + return align + + def __retrieve_align_from_data( + self, col_dp: ColumnDataProperty, value_dp: DataProperty + ) -> Align: + if col_dp.typecode == Typecode.STRING and ( + value_dp.typecode in (Typecode.INTEGER, Typecode.REAL_NUMBER) + or value_dp.typecode == Typecode.STRING + and value_dp.is_include_ansi_escape + ): + return value_dp.align + + return col_dp.align + + def _verify_property(self) -> None: + self._verify_style_filter_kwargs() + self._verify_table_name() + self._verify_stream() + + if all( + [ + typepy.is_empty_sequence(self.headers), + typepy.is_empty_sequence(self.value_matrix), + typepy.is_empty_sequence(self._table_value_dp_matrix), + ] + ): + raise EmptyTableDataError() + + self._verify_header() + try: + self._verify_value_matrix() + except EmptyValueError: + pass + + def __set_value_matrix(self, value_matrix: Sequence) -> None: + self.__value_matrix_org = value_matrix + + def __set_type_hints(self, type_hints: Sequence[Union[str, TypeHint]]) -> None: + self._dp_extractor.column_type_hints = type_hints + + def _verify_style_filter_kwargs(self) -> None: + for checker in self._check_style_filter_kwargs_funcs: + checker(**self.style_filter_kwargs) + + def _verify_table_name(self) -> None: + if all([self._is_require_table_name, typepy.is_null_string(self.table_name)]): + raise EmptyTableNameError( + "table_name must be a string, with at least one or more character." + ) + + def _verify_stream(self) -> None: + if self.stream is None: + raise OSError("null output stream") + + def _verify_header(self) -> None: + if self._is_require_header and not self._use_default_header: + self._validate_empty_header() + + def _validate_empty_header(self) -> None: + """ + Raises: + ValueError: If the |headers| is empty. + """ + + if typepy.is_empty_sequence(self.headers): + raise ValueError("headers expected to have one or more header names") + + def _verify_value_matrix(self) -> None: + if typepy.is_empty_sequence(self.value_matrix): + raise EmptyValueError() + + def _create_styler(self, writer: "AbstractTableWriter") -> StylerInterface: + from ..style._styler import NullStyler + + return NullStyler(writer) + + def _preprocess_table_dp(self) -> None: + if self._is_complete_table_dp_preprocess: + return + + self._logger.logger.debug("_preprocess_table_dp") + + if typepy.is_empty_sequence(self.headers) and self._use_default_header: + self.headers = [ + convert_idx_to_alphabet(col_idx) + for col_idx in range(len(self.__value_matrix_org[0])) + ] + + try: + self._table_value_dp_matrix = self._dp_extractor.to_dp_matrix( + to_value_matrix(self.headers, self.__value_matrix_org) + ) + except TypeError as e: + self._logger.logger.debug(to_error_message(e)) + self._table_value_dp_matrix = [] + + self._column_dp_list = self._dp_extractor.to_column_dp_list( + self._table_value_dp_matrix, self._column_dp_list + ) + + self._is_complete_table_dp_preprocess = True + + def _fetch_style(self, row: int, col_dp: ColumnDataProperty, value_dp: DataProperty) -> Style: + default_style = self._get_col_style(col_dp.column_index) + return self._fetch_style_from_filter(row, col_dp, value_dp, default_style) + + def _preprocess_table_property(self) -> None: + if self._is_complete_table_property_preprocess: + return + + self._logger.logger.debug("_preprocess_table_property") + + if self._iter_count == 1: + for column_dp in self._column_dp_list: + column_dp.extend_width(int(math.ceil(column_dp.ascii_char_width * 0.25))) + + header_dp_list = self._dp_extractor.to_header_dp_list() + if not header_dp_list: + return + + for column_dp in self._column_dp_list: + style = self._get_col_style(column_dp.column_index) + header_style = self._fetch_style( + HEADER_ROW, column_dp, header_dp_list[column_dp.column_index] + ) + body_width = self._styler.get_additional_char_width(style) + header_width = self._styler.get_additional_char_width(header_style) + column_dp.extend_body_width(max(body_width, header_width)) + + self._is_complete_table_property_preprocess = True + + def _preprocess_header(self) -> None: + if self._is_complete_header_preprocess: + return + + self._logger.logger.debug("_preprocess_header") + + self._table_headers = [ + self._to_header_item(col_dp, header_dp) + for col_dp, header_dp in zip( + self._column_dp_list, self._dp_extractor.to_header_dp_list() + ) + ] + + self._is_complete_header_preprocess = True + + def _preprocess_value_matrix(self) -> None: + if self._is_complete_value_matrix_preprocess: + return + + self._logger.logger.debug( + f"_preprocess_value_matrix: value-rows={len(self._table_value_dp_matrix)}" + ) + + self._table_value_matrix = [ + [ + self._to_row_item(row_idx, col_dp, value_dp) + for col_dp, value_dp in zip(self._column_dp_list, value_dp_list) + ] + for row_idx, value_dp_list in enumerate(self._table_value_dp_matrix) + ] + + self._is_complete_value_matrix_preprocess = True + + def _preprocess(self) -> None: + self._preprocess_table_dp() + self._preprocess_table_property() + self._preprocess_header() + self._preprocess_value_matrix() + + def _clear_preprocess(self) -> None: + self.__clear_preprocess() + + def __clear_preprocess(self) -> None: + self.__clear_preprocess_status() + self.__clear_preprocess_data() diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/odr/__pycache__/_odrpack.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/odr/__pycache__/_odrpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc14813eddc5efdc80a2b0c5dfd612867c1d266e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/odr/__pycache__/_odrpack.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/odr/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/odr/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eead9b4a2ad614505a1fdbd43d26f37880fb7b8a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..175f946a839c62fbb593fa3f29bdc73ef39819b1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py b/llmeval-env/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py new file mode 100644 index 0000000000000000000000000000000000000000..3b30d46f1e8f0935fc9fb2116118292679d8941b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py @@ -0,0 +1,565 @@ +import tempfile +import shutil +import os + +import numpy as np +from numpy import pi +from numpy.testing import (assert_array_almost_equal, + assert_equal, assert_warns, + assert_allclose) +import pytest +from pytest import raises as assert_raises + +from scipy.odr import (Data, Model, ODR, RealData, OdrStop, OdrWarning, + multilinear, exponential, unilinear, quadratic, + polynomial) + + +class TestODR: + + # Bad Data for 'x' + + def test_bad_data(self): + assert_raises(ValueError, Data, 2, 1) + assert_raises(ValueError, RealData, 2, 1) + + # Empty Data for 'x' + def empty_data_func(self, B, x): + return B[0]*x + B[1] + + def test_empty_data(self): + beta0 = [0.02, 0.0] + linear = Model(self.empty_data_func) + + empty_dat = Data([], []) + assert_warns(OdrWarning, ODR, + empty_dat, linear, beta0=beta0) + + empty_dat = RealData([], []) + assert_warns(OdrWarning, ODR, + empty_dat, linear, beta0=beta0) + + # Explicit Example + + def explicit_fcn(self, B, x): + ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2) + return ret + + def explicit_fjd(self, B, x): + eBx = np.exp(B[2]*x) + ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx + return ret + + def explicit_fjb(self, B, x): + eBx = np.exp(B[2]*x) + res = np.vstack([np.ones(x.shape[-1]), + np.power(eBx-1.0, 2), + B[1]*2.0*(eBx-1.0)*eBx*x]) + return res + + def test_explicit(self): + explicit_mod = Model( + self.explicit_fcn, + fjacb=self.explicit_fjb, + fjacd=self.explicit_fjd, + meta=dict(name='Sample Explicit Model', + ref='ODRPACK UG, pg. 39'), + ) + explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.], + [1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6, + 1213.8,1215.5,1212.]) + explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1], + ifixx=[0,0,1,1,1,1,1,1,1,1,1,0]) + explicit_odr.set_job(deriv=2) + explicit_odr.set_iprint(init=0, iter=0, final=0) + + out = explicit_odr.run() + assert_array_almost_equal( + out.beta, + np.array([1.2646548050648876e+03, -5.4018409956678255e+01, + -8.7849712165253724e-02]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[4.4949592379003039e-01, -3.7421976890364739e-01, + -8.0978217468468912e-04], + [-3.7421976890364739e-01, 1.0529686462751804e+00, + -1.9453521827942002e-03], + [-8.0978217468468912e-04, -1.9453521827942002e-03, + 1.6827336938454476e-05]]), + ) + + # Implicit Example + + def implicit_fcn(self, B, x): + return (B[2]*np.power(x[0]-B[0], 2) + + 2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) + + B[4]*np.power(x[1]-B[1], 2) - 1.0) + + def test_implicit(self): + implicit_mod = Model( + self.implicit_fcn, + implicit=1, + meta=dict(name='Sample Implicit Model', + ref='ODRPACK UG, pg. 49'), + ) + implicit_dat = Data([ + [0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28, + -0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44], + [-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32, + -6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]], + 1, + ) + implicit_odr = ODR(implicit_dat, implicit_mod, + beta0=[-1.0, -3.0, 0.09, 0.02, 0.08]) + + out = implicit_odr.run() + assert_array_almost_equal( + out.beta, + np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354, + 0.0162299708984738, 0.0797537982976416]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314, + 0.0027500347539902, 0.0034962501532468]), + ) + assert_allclose( + out.cov_beta, + np.array([[2.1089274602333052e+00, -1.9437686411979040e+00, + 7.0263550868344446e-02, -4.7175267373474862e-02, + 5.2515575927380355e-02], + [-1.9437686411979040e+00, 2.0481509222414456e+00, + -6.1600515853057307e-02, 4.6268827806232933e-02, + -5.8822307501391467e-02], + [7.0263550868344446e-02, -6.1600515853057307e-02, + 2.8659542561579308e-03, -1.4628662260014491e-03, + 1.4528860663055824e-03], + [-4.7175267373474862e-02, 4.6268827806232933e-02, + -1.4628662260014491e-03, 1.2855592885514335e-03, + -1.2692942951415293e-03], + [5.2515575927380355e-02, -5.8822307501391467e-02, + 1.4528860663055824e-03, -1.2692942951415293e-03, + 2.0778813389755596e-03]]), + rtol=1e-6, atol=2e-6, + ) + + # Multi-variable Example + + def multi_fcn(self, B, x): + if (x < 0.0).any(): + raise OdrStop + theta = pi*B[3]/2. + ctheta = np.cos(theta) + stheta = np.sin(theta) + omega = np.power(2.*pi*x*np.exp(-B[2]), B[3]) + phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta)) + r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) + + np.power(omega*stheta, 2)), -B[4]) + ret = np.vstack([B[1] + r*np.cos(B[4]*phi), + r*np.sin(B[4]*phi)]) + return ret + + def test_multi(self): + multi_mod = Model( + self.multi_fcn, + meta=dict(name='Sample Multi-Response Model', + ref='ODRPACK UG, pg. 56'), + ) + + multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0, + 700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0, + 15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0]) + multi_y = np.array([ + [4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713, + 3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984, + 2.934, 2.876, 2.838, 2.798, 2.759], + [0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309, + 0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218, + 0.202, 0.182, 0.168, 0.153, 0.139], + ]) + n = len(multi_x) + multi_we = np.zeros((2, 2, n), dtype=float) + multi_ifixx = np.ones(n, dtype=int) + multi_delta = np.zeros(n, dtype=float) + + multi_we[0,0,:] = 559.6 + multi_we[1,0,:] = multi_we[0,1,:] = -1634.0 + multi_we[1,1,:] = 8397.0 + + for i in range(n): + if multi_x[i] < 100.0: + multi_ifixx[i] = 0 + elif multi_x[i] <= 150.0: + pass # defaults are fine + elif multi_x[i] <= 1000.0: + multi_delta[i] = 25.0 + elif multi_x[i] <= 10000.0: + multi_delta[i] = 560.0 + elif multi_x[i] <= 100000.0: + multi_delta[i] = 9500.0 + else: + multi_delta[i] = 144000.0 + if multi_x[i] == 100.0 or multi_x[i] == 150.0: + multi_we[:,:,i] = 0.0 + + multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2), + we=multi_we) + multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5], + delta0=multi_delta, ifixx=multi_ifixx) + multi_odr.set_job(deriv=1, del_init=1) + + out = multi_odr.run() + assert_array_almost_equal( + out.beta, + np.array([4.3799880305938963, 2.4333057577497703, 8.0028845899503978, + 0.5101147161764654, 0.5173902330489161]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.0130625231081944, 0.0130499785273277, 0.1167085962217757, + 0.0132642749596149, 0.0288529201353984]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[0.0064918418231375, 0.0036159705923791, 0.0438637051470406, + -0.0058700836512467, 0.011281212888768], + [0.0036159705923791, 0.0064793789429006, 0.0517610978353126, + -0.0051181304940204, 0.0130726943624117], + [0.0438637051470406, 0.0517610978353126, 0.5182263323095322, + -0.0563083340093696, 0.1269490939468611], + [-0.0058700836512467, -0.0051181304940204, -0.0563083340093696, + 0.0066939246261263, -0.0140184391377962], + [0.011281212888768, 0.0130726943624117, 0.1269490939468611, + -0.0140184391377962, 0.0316733013820852]]), + ) + + # Pearson's Data + # K. Pearson, Philosophical Magazine, 2, 559 (1901) + + def pearson_fcn(self, B, x): + return B[0] + B[1]*x + + def test_pearson(self): + p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4]) + p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5]) + p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.]) + p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04]) + + p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy) + + # Reverse the data to test invariance of results + pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx) + + p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit')) + + p_odr = ODR(p_dat, p_mod, beta0=[1.,1.]) + pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.]) + + out = p_odr.run() + assert_array_almost_equal( + out.beta, + np.array([5.4767400299231674, -0.4796082367610305]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.3590121690702467, 0.0706291186037444]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[0.0854275622946333, -0.0161807025443155], + [-0.0161807025443155, 0.003306337993922]]), + ) + + rout = pr_odr.run() + assert_array_almost_equal( + rout.beta, + np.array([11.4192022410781231, -2.0850374506165474]), + ) + assert_array_almost_equal( + rout.sd_beta, + np.array([0.9820231665657161, 0.3070515616198911]), + ) + assert_array_almost_equal( + rout.cov_beta, + np.array([[0.6391799462548782, -0.1955657291119177], + [-0.1955657291119177, 0.0624888159223392]]), + ) + + # Lorentz Peak + # The data is taken from one of the undergraduate physics labs I performed. + + def lorentz(self, beta, x): + return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x - + beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0))) + + def test_lorentz(self): + l_sy = np.array([.29]*18) + l_sx = np.array([.000972971,.000948268,.000707632,.000706679, + .000706074, .000703918,.000698955,.000456856, + .000455207,.000662717,.000654619,.000652694, + .000000859202,.00106589,.00106378,.00125483, .00140818,.00241839]) + + l_dat = RealData( + [3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608, + 3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982, + 3.6562, 3.62498, 3.55525, 3.41886], + [652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122, + 957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5], + sx=l_sx, + sy=l_sy, + ) + l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak')) + l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8)) + + out = l_odr.run() + assert_array_almost_equal( + out.beta, + np.array([1.4306780846149925e+03, 1.3390509034538309e-01, + 3.7798193600109009e+00]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([7.3621186811330963e-01, 3.5068899941471650e-04, + 2.4451209281408992e-04]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[2.4714409064597873e-01, -6.9067261911110836e-05, + -3.1236953270424990e-05], + [-6.9067261911110836e-05, 5.6077531517333009e-08, + 3.6133261832722601e-08], + [-3.1236953270424990e-05, 3.6133261832722601e-08, + 2.7261220025171730e-08]]), + ) + + def test_ticket_1253(self): + def linear(c, x): + return c[0]*x+c[1] + + c = [2.0, 3.0] + x = np.linspace(0, 10) + y = linear(c, x) + + model = Model(linear) + data = Data(x, y, wd=1.0, we=1.0) + job = ODR(data, model, beta0=[1.0, 1.0]) + result = job.run() + assert_equal(result.info, 2) + + # Verify fix for gh-9140 + + def test_ifixx(self): + x1 = [-2.01, -0.99, -0.001, 1.02, 1.98] + x2 = [3.98, 1.01, 0.001, 0.998, 4.01] + fix = np.vstack((np.zeros_like(x1, dtype=int), np.ones_like(x2, dtype=int))) + data = Data(np.vstack((x1, x2)), y=1, fix=fix) + model = Model(lambda beta, x: x[1, :] - beta[0] * x[0, :]**2., implicit=True) + + odr1 = ODR(data, model, beta0=np.array([1.])) + sol1 = odr1.run() + odr2 = ODR(data, model, beta0=np.array([1.]), ifixx=fix) + sol2 = odr2.run() + assert_equal(sol1.beta, sol2.beta) + + # verify bugfix for #11800 in #11802 + def test_ticket_11800(self): + # parameters + beta_true = np.array([1.0, 2.3, 1.1, -1.0, 1.3, 0.5]) + nr_measurements = 10 + + std_dev_x = 0.01 + x_error = np.array([[0.00063445, 0.00515731, 0.00162719, 0.01022866, + -0.01624845, 0.00482652, 0.00275988, -0.00714734, -0.00929201, -0.00687301], + [-0.00831623, -0.00821211, -0.00203459, 0.00938266, -0.00701829, + 0.0032169, 0.00259194, -0.00581017, -0.0030283, 0.01014164]]) + + std_dev_y = 0.05 + y_error = np.array([[0.05275304, 0.04519563, -0.07524086, 0.03575642, + 0.04745194, 0.03806645, 0.07061601, -0.00753604, -0.02592543, -0.02394929], + [0.03632366, 0.06642266, 0.08373122, 0.03988822, -0.0092536, + -0.03750469, -0.03198903, 0.01642066, 0.01293648, -0.05627085]]) + + beta_solution = np.array([ + 2.62920235756665876536e+00, -1.26608484996299608838e+02, + 1.29703572775403074502e+02, -1.88560985401185465804e+00, + 7.83834160771274923718e+01, -7.64124076838087091801e+01]) + + # model's function and Jacobians + def func(beta, x): + y0 = beta[0] + beta[1] * x[0, :] + beta[2] * x[1, :] + y1 = beta[3] + beta[4] * x[0, :] + beta[5] * x[1, :] + + return np.vstack((y0, y1)) + + def df_dbeta_odr(beta, x): + nr_meas = np.shape(x)[1] + zeros = np.zeros(nr_meas) + ones = np.ones(nr_meas) + + dy0 = np.array([ones, x[0, :], x[1, :], zeros, zeros, zeros]) + dy1 = np.array([zeros, zeros, zeros, ones, x[0, :], x[1, :]]) + + return np.stack((dy0, dy1)) + + def df_dx_odr(beta, x): + nr_meas = np.shape(x)[1] + ones = np.ones(nr_meas) + + dy0 = np.array([beta[1] * ones, beta[2] * ones]) + dy1 = np.array([beta[4] * ones, beta[5] * ones]) + return np.stack((dy0, dy1)) + + # do measurements with errors in independent and dependent variables + x0_true = np.linspace(1, 10, nr_measurements) + x1_true = np.linspace(1, 10, nr_measurements) + x_true = np.array([x0_true, x1_true]) + + y_true = func(beta_true, x_true) + + x_meas = x_true + x_error + y_meas = y_true + y_error + + # estimate model's parameters + model_f = Model(func, fjacb=df_dbeta_odr, fjacd=df_dx_odr) + + data = RealData(x_meas, y_meas, sx=std_dev_x, sy=std_dev_y) + + odr_obj = ODR(data, model_f, beta0=0.9 * beta_true, maxit=100) + #odr_obj.set_iprint(init=2, iter=0, iter_step=1, final=1) + odr_obj.set_job(deriv=3) + + odr_out = odr_obj.run() + + # check results + assert_equal(odr_out.info, 1) + assert_array_almost_equal(odr_out.beta, beta_solution) + + def test_multilinear_model(self): + x = np.linspace(0.0, 5.0) + y = 10.0 + 5.0 * x + data = Data(x, y) + odr_obj = ODR(data, multilinear) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [10.0, 5.0]) + + def test_exponential_model(self): + x = np.linspace(0.0, 5.0) + y = -10.0 + np.exp(0.5*x) + data = Data(x, y) + odr_obj = ODR(data, exponential) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [-10.0, 0.5]) + + def test_polynomial_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 + 2.0 * x + 3.0 * x ** 2 + 4.0 * x ** 3 + poly_model = polynomial(3) + data = Data(x, y) + odr_obj = ODR(data, poly_model) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0, 4.0]) + + def test_unilinear_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + data = Data(x, y) + odr_obj = ODR(data, unilinear) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0]) + + def test_quadratic_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x ** 2 + 2.0 * x + 3.0 + data = Data(x, y) + odr_obj = ODR(data, quadratic) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0]) + + def test_work_ind(self): + + def func(par, x): + b0, b1 = par + return b0 + b1 * x + + # generate some data + n_data = 4 + x = np.arange(n_data) + y = np.where(x % 2, x + 0.1, x - 0.1) + x_err = np.full(n_data, 0.1) + y_err = np.full(n_data, 0.1) + + # do the fitting + linear_model = Model(func) + real_data = RealData(x, y, sx=x_err, sy=y_err) + odr_obj = ODR(real_data, linear_model, beta0=[0.4, 0.4]) + odr_obj.set_job(fit_type=0) + out = odr_obj.run() + + sd_ind = out.work_ind['sd'] + assert_array_almost_equal(out.sd_beta, + out.work[sd_ind:sd_ind + len(out.sd_beta)]) + + @pytest.mark.skipif(True, reason="Fortran I/O prone to crashing so better " + "not to run this test, see gh-13127") + def test_output_file_overwrite(self): + """ + Verify fix for gh-1892 + """ + def func(b, x): + return b[0] + b[1] * x + + p = Model(func) + data = Data(np.arange(10), 12 * np.arange(10)) + tmp_dir = tempfile.mkdtemp() + error_file_path = os.path.join(tmp_dir, "error.dat") + report_file_path = os.path.join(tmp_dir, "report.dat") + try: + ODR(data, p, beta0=[0.1, 13], errfile=error_file_path, + rptfile=report_file_path).run() + ODR(data, p, beta0=[0.1, 13], errfile=error_file_path, + rptfile=report_file_path, overwrite=True).run() + finally: + # remove output files for clean up + shutil.rmtree(tmp_dir) + + def test_odr_model_default_meta(self): + def func(b, x): + return b[0] + b[1] * x + + p = Model(func) + p.set_meta(name='Sample Model Meta', ref='ODRPACK') + assert_equal(p.meta, {'name': 'Sample Model Meta', 'ref': 'ODRPACK'}) + + def test_work_array_del_init(self): + """ + Verify fix for gh-18739 where del_init=1 fails. + """ + def func(b, x): + return b[0] + b[1] * x + + # generate some data + n_data = 4 + x = np.arange(n_data) + y = np.where(x % 2, x + 0.1, x - 0.1) + x_err = np.full(n_data, 0.1) + y_err = np.full(n_data, 0.1) + + linear_model = Model(func) + # Try various shapes of the `we` array from various `sy` and `covy` + rd0 = RealData(x, y, sx=x_err, sy=y_err) + rd1 = RealData(x, y, sx=x_err, sy=0.1) + rd2 = RealData(x, y, sx=x_err, sy=[0.1]) + rd3 = RealData(x, y, sx=x_err, sy=np.full((1, n_data), 0.1)) + rd4 = RealData(x, y, sx=x_err, covy=[[0.01]]) + rd5 = RealData(x, y, sx=x_err, covy=np.full((1, 1, n_data), 0.01)) + for rd in [rd0, rd1, rd2, rd3, rd4, rd5]: + odr_obj = ODR(rd, linear_model, beta0=[0.4, 0.4], + delta0=np.full(n_data, -0.1)) + odr_obj.set_job(fit_type=0, del_init=1) + # Just make sure that it runs without raising an exception. + odr_obj.run() diff --git a/llmeval-env/lib/python3.10/site-packages/tabulate/__init__.py b/llmeval-env/lib/python3.10/site-packages/tabulate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..503df34848d710b62099e514dee3202de29c3f2a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tabulate/__init__.py @@ -0,0 +1,2716 @@ +"""Pretty-print tabular data.""" + +from collections import namedtuple +from collections.abc import Iterable, Sized +from html import escape as htmlescape +from itertools import chain, zip_longest as izip_longest +from functools import reduce, partial +import io +import re +import math +import textwrap +import dataclasses + +try: + import wcwidth # optional wide-character (CJK) support +except ImportError: + wcwidth = None + + +def _is_file(f): + return isinstance(f, io.IOBase) + + +__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"] +try: + from .version import version as __version__ # noqa: F401 +except ImportError: + pass # running __init__.py as a script, AppVeyor pytests + + +# minimum extra space in headers +MIN_PADDING = 2 + +# Whether or not to preserve leading/trailing whitespace in data. +PRESERVE_WHITESPACE = False + +_DEFAULT_FLOATFMT = "g" +_DEFAULT_INTFMT = "" +_DEFAULT_MISSINGVAL = "" +# default align will be overwritten by "left", "center" or "decimal" +# depending on the formatter +_DEFAULT_ALIGN = "default" + + +# if True, enable wide-character (CJK) support +WIDE_CHARS_MODE = wcwidth is not None + +# Constant that can be used as part of passed rows to generate a separating line +# It is purposely an unprintable character, very unlikely to be used in a table +SEPARATING_LINE = "\001" + +Line = namedtuple("Line", ["begin", "hline", "sep", "end"]) + + +DataRow = namedtuple("DataRow", ["begin", "sep", "end"]) + + +# A table structure is supposed to be: +# +# --- lineabove --------- +# headerrow +# --- linebelowheader --- +# datarow +# --- linebetweenrows --- +# ... (more datarows) ... +# --- linebetweenrows --- +# last datarow +# --- linebelow --------- +# +# TableFormat's line* elements can be +# +# - either None, if the element is not used, +# - or a Line tuple, +# - or a function: [col_widths], [col_alignments] -> string. +# +# TableFormat's *row elements can be +# +# - either None, if the element is not used, +# - or a DataRow tuple, +# - or a function: [cell_values], [col_widths], [col_alignments] -> string. +# +# padding (an integer) is the amount of white space around data values. +# +# with_header_hide: +# +# - either None, to display all table elements unconditionally, +# - or a list of elements not to be displayed if the table has column headers. +# +TableFormat = namedtuple( + "TableFormat", + [ + "lineabove", + "linebelowheader", + "linebetweenrows", + "linebelow", + "headerrow", + "datarow", + "padding", + "with_header_hide", + ], +) + + +def _is_separating_line(row): + row_type = type(row) + is_sl = (row_type == list or row_type == str) and ( + (len(row) >= 1 and row[0] == SEPARATING_LINE) + or (len(row) >= 2 and row[1] == SEPARATING_LINE) + ) + return is_sl + + +def _pipe_segment_with_colons(align, colwidth): + """Return a segment of a horizontal line with optional colons which + indicate column's alignment (as in `pipe` output format).""" + w = colwidth + if align in ["right", "decimal"]: + return ("-" * (w - 1)) + ":" + elif align == "center": + return ":" + ("-" * (w - 2)) + ":" + elif align == "left": + return ":" + ("-" * (w - 1)) + else: + return "-" * w + + +def _pipe_line_with_colons(colwidths, colaligns): + """Return a horizontal line with optional colons to indicate column's + alignment (as in `pipe` output format).""" + if not colaligns: # e.g. printing an empty data frame (github issue #15) + colaligns = [""] * len(colwidths) + segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)] + return "|" + "|".join(segments) + "|" + + +def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns): + alignment = { + "left": "", + "right": 'align="right"| ', + "center": 'align="center"| ', + "decimal": 'align="right"| ', + } + # hard-coded padding _around_ align attribute and value together + # rather than padding parameter which affects only the value + values_with_attrs = [ + " " + alignment.get(a, "") + c + " " for c, a in zip(cell_values, colaligns) + ] + colsep = separator * 2 + return (separator + colsep.join(values_with_attrs)).rstrip() + + +def _textile_row_with_attrs(cell_values, colwidths, colaligns): + cell_values[0] += " " + alignment = {"left": "<.", "right": ">.", "center": "=.", "decimal": ">."} + values = (alignment.get(a, "") + v for a, v in zip(colaligns, cell_values)) + return "|" + "|".join(values) + "|" + + +def _html_begin_table_without_header(colwidths_ignore, colaligns_ignore): + # this table header will be suppressed if there is a header row + return "\n" + + +def _html_row_with_attrs(celltag, unsafe, cell_values, colwidths, colaligns): + alignment = { + "left": "", + "right": ' style="text-align: right;"', + "center": ' style="text-align: center;"', + "decimal": ' style="text-align: right;"', + } + if unsafe: + values_with_attrs = [ + "<{0}{1}>{2}".format(celltag, alignment.get(a, ""), c) + for c, a in zip(cell_values, colaligns) + ] + else: + values_with_attrs = [ + "<{0}{1}>{2}".format(celltag, alignment.get(a, ""), htmlescape(c)) + for c, a in zip(cell_values, colaligns) + ] + rowhtml = "{}".format("".join(values_with_attrs).rstrip()) + if celltag == "th": # it's a header row, create a new table header + rowhtml = f"
\n\n{rowhtml}\n\n" + return rowhtml + + +def _moin_row_with_attrs(celltag, cell_values, colwidths, colaligns, header=""): + alignment = { + "left": "", + "right": '', + "center": '', + "decimal": '', + } + values_with_attrs = [ + "{}{} {} ".format(celltag, alignment.get(a, ""), header + c + header) + for c, a in zip(cell_values, colaligns) + ] + return "".join(values_with_attrs) + "||" + + +def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False, longtable=False): + alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"} + tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns]) + return "\n".join( + [ + ("\\begin{tabular}{" if not longtable else "\\begin{longtable}{") + + tabular_columns_fmt + + "}", + "\\toprule" if booktabs else "\\hline", + ] + ) + + +def _asciidoc_row(is_header, *args): + """handle header and data rows for asciidoc format""" + + def make_header_line(is_header, colwidths, colaligns): + # generate the column specifiers + + alignment = {"left": "<", "right": ">", "center": "^", "decimal": ">"} + # use the column widths generated by tabulate for the asciidoc column width specifiers + asciidoc_alignments = zip( + colwidths, [alignment[colalign] for colalign in colaligns] + ) + asciidoc_column_specifiers = [ + "{:d}{}".format(width, align) for width, align in asciidoc_alignments + ] + header_list = ['cols="' + (",".join(asciidoc_column_specifiers)) + '"'] + + # generate the list of options (currently only "header") + options_list = [] + + if is_header: + options_list.append("header") + + if options_list: + header_list += ['options="' + ",".join(options_list) + '"'] + + # generate the list of entries in the table header field + + return "[{}]\n|====".format(",".join(header_list)) + + if len(args) == 2: + # two arguments are passed if called in the context of aboveline + # print the table header with column widths and optional header tag + return make_header_line(False, *args) + + elif len(args) == 3: + # three arguments are passed if called in the context of dataline or headerline + # print the table line and make the aboveline if it is a header + + cell_values, colwidths, colaligns = args + data_line = "|" + "|".join(cell_values) + + if is_header: + return make_header_line(True, colwidths, colaligns) + "\n" + data_line + else: + return data_line + + else: + raise ValueError( + " _asciidoc_row() requires two (colwidths, colaligns) " + + "or three (cell_values, colwidths, colaligns) arguments) " + ) + + +LATEX_ESCAPE_RULES = { + r"&": r"\&", + r"%": r"\%", + r"$": r"\$", + r"#": r"\#", + r"_": r"\_", + r"^": r"\^{}", + r"{": r"\{", + r"}": r"\}", + r"~": r"\textasciitilde{}", + "\\": r"\textbackslash{}", + r"<": r"\ensuremath{<}", + r">": r"\ensuremath{>}", +} + + +def _latex_row(cell_values, colwidths, colaligns, escrules=LATEX_ESCAPE_RULES): + def escape_char(c): + return escrules.get(c, c) + + escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values] + rowfmt = DataRow("", "&", "\\\\") + return _build_simple_row(escaped_values, rowfmt) + + +def _rst_escape_first_column(rows, headers): + def escape_empty(val): + if isinstance(val, (str, bytes)) and not val.strip(): + return ".." + else: + return val + + new_headers = list(headers) + new_rows = [] + if headers: + new_headers[0] = escape_empty(headers[0]) + for row in rows: + new_row = list(row) + if new_row: + new_row[0] = escape_empty(row[0]) + new_rows.append(new_row) + return new_rows, new_headers + + +_table_formats = { + "simple": TableFormat( + lineabove=Line("", "-", " ", ""), + linebelowheader=Line("", "-", " ", ""), + linebetweenrows=None, + linebelow=Line("", "-", " ", ""), + headerrow=DataRow("", " ", ""), + datarow=DataRow("", " ", ""), + padding=0, + with_header_hide=["lineabove", "linebelow"], + ), + "plain": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("", " ", ""), + datarow=DataRow("", " ", ""), + padding=0, + with_header_hide=None, + ), + "grid": TableFormat( + lineabove=Line("+", "-", "+", "+"), + linebelowheader=Line("+", "=", "+", "+"), + linebetweenrows=Line("+", "-", "+", "+"), + linebelow=Line("+", "-", "+", "+"), + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "simple_grid": TableFormat( + lineabove=Line("┌", "─", "┬", "┐"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("└", "─", "┴", "┘"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "rounded_grid": TableFormat( + lineabove=Line("╭", "─", "┬", "╮"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("╰", "─", "┴", "╯"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "heavy_grid": TableFormat( + lineabove=Line("┏", "━", "┳", "┓"), + linebelowheader=Line("┣", "━", "╋", "┫"), + linebetweenrows=Line("┣", "━", "╋", "┫"), + linebelow=Line("┗", "━", "┻", "┛"), + headerrow=DataRow("┃", "┃", "┃"), + datarow=DataRow("┃", "┃", "┃"), + padding=1, + with_header_hide=None, + ), + "mixed_grid": TableFormat( + lineabove=Line("┍", "━", "┯", "┑"), + linebelowheader=Line("┝", "━", "┿", "┥"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("┕", "━", "┷", "┙"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "double_grid": TableFormat( + lineabove=Line("╔", "═", "╦", "╗"), + linebelowheader=Line("╠", "═", "╬", "╣"), + linebetweenrows=Line("╠", "═", "╬", "╣"), + linebelow=Line("╚", "═", "╩", "╝"), + headerrow=DataRow("║", "║", "║"), + datarow=DataRow("║", "║", "║"), + padding=1, + with_header_hide=None, + ), + "fancy_grid": TableFormat( + lineabove=Line("╒", "═", "╤", "╕"), + linebelowheader=Line("╞", "═", "╪", "╡"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("╘", "═", "╧", "╛"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "outline": TableFormat( + lineabove=Line("+", "-", "+", "+"), + linebelowheader=Line("+", "=", "+", "+"), + linebetweenrows=None, + linebelow=Line("+", "-", "+", "+"), + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "simple_outline": TableFormat( + lineabove=Line("┌", "─", "┬", "┐"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=None, + linebelow=Line("└", "─", "┴", "┘"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "rounded_outline": TableFormat( + lineabove=Line("╭", "─", "┬", "╮"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=None, + linebelow=Line("╰", "─", "┴", "╯"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "heavy_outline": TableFormat( + lineabove=Line("┏", "━", "┳", "┓"), + linebelowheader=Line("┣", "━", "╋", "┫"), + linebetweenrows=None, + linebelow=Line("┗", "━", "┻", "┛"), + headerrow=DataRow("┃", "┃", "┃"), + datarow=DataRow("┃", "┃", "┃"), + padding=1, + with_header_hide=None, + ), + "mixed_outline": TableFormat( + lineabove=Line("┍", "━", "┯", "┑"), + linebelowheader=Line("┝", "━", "┿", "┥"), + linebetweenrows=None, + linebelow=Line("┕", "━", "┷", "┙"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "double_outline": TableFormat( + lineabove=Line("╔", "═", "╦", "╗"), + linebelowheader=Line("╠", "═", "╬", "╣"), + linebetweenrows=None, + linebelow=Line("╚", "═", "╩", "╝"), + headerrow=DataRow("║", "║", "║"), + datarow=DataRow("║", "║", "║"), + padding=1, + with_header_hide=None, + ), + "fancy_outline": TableFormat( + lineabove=Line("╒", "═", "╤", "╕"), + linebelowheader=Line("╞", "═", "╪", "╡"), + linebetweenrows=None, + linebelow=Line("╘", "═", "╧", "╛"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "github": TableFormat( + lineabove=Line("|", "-", "|", "|"), + linebelowheader=Line("|", "-", "|", "|"), + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=["lineabove"], + ), + "pipe": TableFormat( + lineabove=_pipe_line_with_colons, + linebelowheader=_pipe_line_with_colons, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=["lineabove"], + ), + "orgtbl": TableFormat( + lineabove=None, + linebelowheader=Line("|", "-", "+", "|"), + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "jira": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("||", "||", "||"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "presto": TableFormat( + lineabove=None, + linebelowheader=Line("", "-", "+", ""), + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("", "|", ""), + datarow=DataRow("", "|", ""), + padding=1, + with_header_hide=None, + ), + "pretty": TableFormat( + lineabove=Line("+", "-", "+", "+"), + linebelowheader=Line("+", "-", "+", "+"), + linebetweenrows=None, + linebelow=Line("+", "-", "+", "+"), + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "psql": TableFormat( + lineabove=Line("+", "-", "+", "+"), + linebelowheader=Line("|", "-", "+", "|"), + linebetweenrows=None, + linebelow=Line("+", "-", "+", "+"), + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "rst": TableFormat( + lineabove=Line("", "=", " ", ""), + linebelowheader=Line("", "=", " ", ""), + linebetweenrows=None, + linebelow=Line("", "=", " ", ""), + headerrow=DataRow("", " ", ""), + datarow=DataRow("", " ", ""), + padding=0, + with_header_hide=None, + ), + "mediawiki": TableFormat( + lineabove=Line( + '{| class="wikitable" style="text-align: left;"', + "", + "", + "\n|+ \n|-", + ), + linebelowheader=Line("|-", "", "", ""), + linebetweenrows=Line("|-", "", "", ""), + linebelow=Line("|}", "", "", ""), + headerrow=partial(_mediawiki_row_with_attrs, "!"), + datarow=partial(_mediawiki_row_with_attrs, "|"), + padding=0, + with_header_hide=None, + ), + "moinmoin": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=partial(_moin_row_with_attrs, "||", header="'''"), + datarow=partial(_moin_row_with_attrs, "||"), + padding=1, + with_header_hide=None, + ), + "youtrack": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|| ", " || ", " || "), + datarow=DataRow("| ", " | ", " |"), + padding=1, + with_header_hide=None, + ), + "html": TableFormat( + lineabove=_html_begin_table_without_header, + linebelowheader="", + linebetweenrows=None, + linebelow=Line("\n
", "", "", ""), + headerrow=partial(_html_row_with_attrs, "th", False), + datarow=partial(_html_row_with_attrs, "td", False), + padding=0, + with_header_hide=["lineabove"], + ), + "unsafehtml": TableFormat( + lineabove=_html_begin_table_without_header, + linebelowheader="", + linebetweenrows=None, + linebelow=Line("\n", "", "", ""), + headerrow=partial(_html_row_with_attrs, "th", True), + datarow=partial(_html_row_with_attrs, "td", True), + padding=0, + with_header_hide=["lineabove"], + ), + "latex": TableFormat( + lineabove=_latex_line_begin_tabular, + linebelowheader=Line("\\hline", "", "", ""), + linebetweenrows=None, + linebelow=Line("\\hline\n\\end{tabular}", "", "", ""), + headerrow=_latex_row, + datarow=_latex_row, + padding=1, + with_header_hide=None, + ), + "latex_raw": TableFormat( + lineabove=_latex_line_begin_tabular, + linebelowheader=Line("\\hline", "", "", ""), + linebetweenrows=None, + linebelow=Line("\\hline\n\\end{tabular}", "", "", ""), + headerrow=partial(_latex_row, escrules={}), + datarow=partial(_latex_row, escrules={}), + padding=1, + with_header_hide=None, + ), + "latex_booktabs": TableFormat( + lineabove=partial(_latex_line_begin_tabular, booktabs=True), + linebelowheader=Line("\\midrule", "", "", ""), + linebetweenrows=None, + linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""), + headerrow=_latex_row, + datarow=_latex_row, + padding=1, + with_header_hide=None, + ), + "latex_longtable": TableFormat( + lineabove=partial(_latex_line_begin_tabular, longtable=True), + linebelowheader=Line("\\hline\n\\endhead", "", "", ""), + linebetweenrows=None, + linebelow=Line("\\hline\n\\end{longtable}", "", "", ""), + headerrow=_latex_row, + datarow=_latex_row, + padding=1, + with_header_hide=None, + ), + "tsv": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("", "\t", ""), + datarow=DataRow("", "\t", ""), + padding=0, + with_header_hide=None, + ), + "textile": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|_. ", "|_.", "|"), + datarow=_textile_row_with_attrs, + padding=1, + with_header_hide=None, + ), + "asciidoc": TableFormat( + lineabove=partial(_asciidoc_row, False), + linebelowheader=None, + linebetweenrows=None, + linebelow=Line("|====", "", "", ""), + headerrow=partial(_asciidoc_row, True), + datarow=partial(_asciidoc_row, False), + padding=1, + with_header_hide=["lineabove"], + ), +} + + +tabulate_formats = list(sorted(_table_formats.keys())) + +# The table formats for which multiline cells will be folded into subsequent +# table rows. The key is the original format specified at the API. The value is +# the format that will be used to represent the original format. +multiline_formats = { + "plain": "plain", + "simple": "simple", + "grid": "grid", + "simple_grid": "simple_grid", + "rounded_grid": "rounded_grid", + "heavy_grid": "heavy_grid", + "mixed_grid": "mixed_grid", + "double_grid": "double_grid", + "fancy_grid": "fancy_grid", + "pipe": "pipe", + "orgtbl": "orgtbl", + "jira": "jira", + "presto": "presto", + "pretty": "pretty", + "psql": "psql", + "rst": "rst", +} + +# TODO: Add multiline support for the remaining table formats: +# - mediawiki: Replace \n with
+# - moinmoin: TBD +# - youtrack: TBD +# - html: Replace \n with
+# - latex*: Use "makecell" package: In header, replace X\nY with +# \thead{X\\Y} and in data row, replace X\nY with \makecell{X\\Y} +# - tsv: TBD +# - textile: Replace \n with
(must be well-formed XML) + +_multiline_codes = re.compile(r"\r|\n|\r\n") +_multiline_codes_bytes = re.compile(b"\r|\n|\r\n") + +# Handle ANSI escape sequences for both control sequence introducer (CSI) and +# operating system command (OSC). Both of these begin with 0x1b (or octal 033), +# which will be shown below as ESC. +# +# CSI ANSI escape codes have the following format, defined in section 5.4 of ECMA-48: +# +# CSI: ESC followed by the '[' character (0x5b) +# Parameter Bytes: 0..n bytes in the range 0x30-0x3f +# Intermediate Bytes: 0..n bytes in the range 0x20-0x2f +# Final Byte: a single byte in the range 0x40-0x7e +# +# Also include the terminal hyperlink sequences as described here: +# https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda +# +# OSC 8 ; params ; uri ST display_text OSC 8 ;; ST +# +# Example: \x1b]8;;https://example.com\x5ctext to show\x1b]8;;\x5c +# +# Where: +# OSC: ESC followed by the ']' character (0x5d) +# params: 0..n optional key value pairs separated by ':' (e.g. foo=bar:baz=qux:abc=123) +# URI: the actual URI with protocol scheme (e.g. https://, file://, ftp://) +# ST: ESC followed by the '\' character (0x5c) +_esc = r"\x1b" +_csi = rf"{_esc}\[" +_osc = rf"{_esc}\]" +_st = rf"{_esc}\\" + +_ansi_escape_pat = rf""" + ( + # terminal colors, etc + {_csi} # CSI + [\x30-\x3f]* # parameter bytes + [\x20-\x2f]* # intermediate bytes + [\x40-\x7e] # final byte + | + # terminal hyperlinks + {_osc}8; # OSC opening + (\w+=\w+:?)* # key=value params list (submatch 2) + ; # delimiter + ([^{_esc}]+) # URI - anything but ESC (submatch 3) + {_st} # ST + ([^{_esc}]+) # link text - anything but ESC (submatch 4) + {_osc}8;;{_st} # "closing" OSC sequence + ) +""" +_ansi_codes = re.compile(_ansi_escape_pat, re.VERBOSE) +_ansi_codes_bytes = re.compile(_ansi_escape_pat.encode("utf8"), re.VERBOSE) +_ansi_color_reset_code = "\033[0m" + +_float_with_thousands_separators = re.compile( + r"^(([+-]?[0-9]{1,3})(?:,([0-9]{3}))*)?(?(1)\.[0-9]*|\.[0-9]+)?$" +) + + +def simple_separated_format(separator): + """Construct a simple TableFormat with columns separated by a separator. + + >>> tsv = simple_separated_format("\\t") ; \ + tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23' + True + + """ + return TableFormat( + None, + None, + None, + None, + headerrow=DataRow("", separator, ""), + datarow=DataRow("", separator, ""), + padding=0, + with_header_hide=None, + ) + + +def _isnumber_with_thousands_separator(string): + """ + >>> _isnumber_with_thousands_separator(".") + False + >>> _isnumber_with_thousands_separator("1") + True + >>> _isnumber_with_thousands_separator("1.") + True + >>> _isnumber_with_thousands_separator(".1") + True + >>> _isnumber_with_thousands_separator("1000") + False + >>> _isnumber_with_thousands_separator("1,000") + True + >>> _isnumber_with_thousands_separator("1,0000") + False + >>> _isnumber_with_thousands_separator("1,000.1234") + True + >>> _isnumber_with_thousands_separator(b"1,000.1234") + True + >>> _isnumber_with_thousands_separator("+1,000.1234") + True + >>> _isnumber_with_thousands_separator("-1,000.1234") + True + """ + try: + string = string.decode() + except (UnicodeDecodeError, AttributeError): + pass + + return bool(re.match(_float_with_thousands_separators, string)) + + +def _isconvertible(conv, string): + try: + conv(string) + return True + except (ValueError, TypeError): + return False + + +def _isnumber(string): + """ + >>> _isnumber("123.45") + True + >>> _isnumber("123") + True + >>> _isnumber("spam") + False + >>> _isnumber("123e45678") + False + >>> _isnumber("inf") + True + """ + if not _isconvertible(float, string): + return False + elif isinstance(string, (str, bytes)) and ( + math.isinf(float(string)) or math.isnan(float(string)) + ): + return string.lower() in ["inf", "-inf", "nan"] + return True + + +def _isint(string, inttype=int): + """ + >>> _isint("123") + True + >>> _isint("123.45") + False + """ + return ( + type(string) is inttype + or isinstance(string, (bytes, str)) + and _isconvertible(inttype, string) + ) + + +def _isbool(string): + """ + >>> _isbool(True) + True + >>> _isbool("False") + True + >>> _isbool(1) + False + """ + return type(string) is bool or ( + isinstance(string, (bytes, str)) and string in ("True", "False") + ) + + +def _type(string, has_invisible=True, numparse=True): + """The least generic type (type(None), int, float, str, unicode). + + >>> _type(None) is type(None) + True + >>> _type("foo") is type("") + True + >>> _type("1") is type(1) + True + >>> _type('\x1b[31m42\x1b[0m') is type(42) + True + >>> _type('\x1b[31m42\x1b[0m') is type(42) + True + + """ + + if has_invisible and isinstance(string, (str, bytes)): + string = _strip_ansi(string) + + if string is None: + return type(None) + elif hasattr(string, "isoformat"): # datetime.datetime, date, and time + return str + elif _isbool(string): + return bool + elif _isint(string) and numparse: + return int + elif _isnumber(string) and numparse: + return float + elif isinstance(string, bytes): + return bytes + else: + return str + + +def _afterpoint(string): + """Symbols after a decimal point, -1 if the string lacks the decimal point. + + >>> _afterpoint("123.45") + 2 + >>> _afterpoint("1001") + -1 + >>> _afterpoint("eggs") + -1 + >>> _afterpoint("123e45") + 2 + >>> _afterpoint("123,456.78") + 2 + + """ + if _isnumber(string) or _isnumber_with_thousands_separator(string): + if _isint(string): + return -1 + else: + pos = string.rfind(".") + pos = string.lower().rfind("e") if pos < 0 else pos + if pos >= 0: + return len(string) - pos - 1 + else: + return -1 # no point + else: + return -1 # not a number + + +def _padleft(width, s): + """Flush right. + + >>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430' + True + + """ + fmt = "{0:>%ds}" % width + return fmt.format(s) + + +def _padright(width, s): + """Flush left. + + >>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 ' + True + + """ + fmt = "{0:<%ds}" % width + return fmt.format(s) + + +def _padboth(width, s): + """Center string. + + >>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 ' + True + + """ + fmt = "{0:^%ds}" % width + return fmt.format(s) + + +def _padnone(ignore_width, s): + return s + + +def _strip_ansi(s): + r"""Remove ANSI escape sequences, both CSI (color codes, etc) and OSC hyperlinks. + + CSI sequences are simply removed from the output, while OSC hyperlinks are replaced + with the link text. Note: it may be desirable to show the URI instead but this is not + supported. + + >>> repr(_strip_ansi('\x1B]8;;https://example.com\x1B\\This is a link\x1B]8;;\x1B\\')) + "'This is a link'" + + >>> repr(_strip_ansi('\x1b[31mred\x1b[0m text')) + "'red text'" + + """ + if isinstance(s, str): + return _ansi_codes.sub(r"\4", s) + else: # a bytestring + return _ansi_codes_bytes.sub(r"\4", s) + + +def _visible_width(s): + """Visible width of a printed string. ANSI color codes are removed. + + >>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world") + (5, 5) + + """ + # optional wide-character support + if wcwidth is not None and WIDE_CHARS_MODE: + len_fn = wcwidth.wcswidth + else: + len_fn = len + if isinstance(s, (str, bytes)): + return len_fn(_strip_ansi(s)) + else: + return len_fn(str(s)) + + +def _is_multiline(s): + if isinstance(s, str): + return bool(re.search(_multiline_codes, s)) + else: # a bytestring + return bool(re.search(_multiline_codes_bytes, s)) + + +def _multiline_width(multiline_s, line_width_fn=len): + """Visible width of a potentially multiline content.""" + return max(map(line_width_fn, re.split("[\r\n]", multiline_s))) + + +def _choose_width_fn(has_invisible, enable_widechars, is_multiline): + """Return a function to calculate visible cell width.""" + if has_invisible: + line_width_fn = _visible_width + elif enable_widechars: # optional wide-character support if available + line_width_fn = wcwidth.wcswidth + else: + line_width_fn = len + if is_multiline: + width_fn = lambda s: _multiline_width(s, line_width_fn) # noqa + else: + width_fn = line_width_fn + return width_fn + + +def _align_column_choose_padfn(strings, alignment, has_invisible): + if alignment == "right": + if not PRESERVE_WHITESPACE: + strings = [s.strip() for s in strings] + padfn = _padleft + elif alignment == "center": + if not PRESERVE_WHITESPACE: + strings = [s.strip() for s in strings] + padfn = _padboth + elif alignment == "decimal": + if has_invisible: + decimals = [_afterpoint(_strip_ansi(s)) for s in strings] + else: + decimals = [_afterpoint(s) for s in strings] + maxdecimals = max(decimals) + strings = [s + (maxdecimals - decs) * " " for s, decs in zip(strings, decimals)] + padfn = _padleft + elif not alignment: + padfn = _padnone + else: + if not PRESERVE_WHITESPACE: + strings = [s.strip() for s in strings] + padfn = _padright + return strings, padfn + + +def _align_column_choose_width_fn(has_invisible, enable_widechars, is_multiline): + if has_invisible: + line_width_fn = _visible_width + elif enable_widechars: # optional wide-character support if available + line_width_fn = wcwidth.wcswidth + else: + line_width_fn = len + if is_multiline: + width_fn = lambda s: _align_column_multiline_width(s, line_width_fn) # noqa + else: + width_fn = line_width_fn + return width_fn + + +def _align_column_multiline_width(multiline_s, line_width_fn=len): + """Visible width of a potentially multiline content.""" + return list(map(line_width_fn, re.split("[\r\n]", multiline_s))) + + +def _flat_list(nested_list): + ret = [] + for item in nested_list: + if isinstance(item, list): + for subitem in item: + ret.append(subitem) + else: + ret.append(item) + return ret + + +def _align_column( + strings, + alignment, + minwidth=0, + has_invisible=True, + enable_widechars=False, + is_multiline=False, +): + """[string] -> [padded_string]""" + strings, padfn = _align_column_choose_padfn(strings, alignment, has_invisible) + width_fn = _align_column_choose_width_fn( + has_invisible, enable_widechars, is_multiline + ) + + s_widths = list(map(width_fn, strings)) + maxwidth = max(max(_flat_list(s_widths)), minwidth) + # TODO: refactor column alignment in single-line and multiline modes + if is_multiline: + if not enable_widechars and not has_invisible: + padded_strings = [ + "\n".join([padfn(maxwidth, s) for s in ms.splitlines()]) + for ms in strings + ] + else: + # enable wide-character width corrections + s_lens = [[len(s) for s in re.split("[\r\n]", ms)] for ms in strings] + visible_widths = [ + [maxwidth - (w - l) for w, l in zip(mw, ml)] + for mw, ml in zip(s_widths, s_lens) + ] + # wcswidth and _visible_width don't count invisible characters; + # padfn doesn't need to apply another correction + padded_strings = [ + "\n".join([padfn(w, s) for s, w in zip((ms.splitlines() or ms), mw)]) + for ms, mw in zip(strings, visible_widths) + ] + else: # single-line cell values + if not enable_widechars and not has_invisible: + padded_strings = [padfn(maxwidth, s) for s in strings] + else: + # enable wide-character width corrections + s_lens = list(map(len, strings)) + visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)] + # wcswidth and _visible_width don't count invisible characters; + # padfn doesn't need to apply another correction + padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)] + return padded_strings + + +def _more_generic(type1, type2): + types = { + type(None): 0, + bool: 1, + int: 2, + float: 3, + bytes: 4, + str: 5, + } + invtypes = { + 5: str, + 4: bytes, + 3: float, + 2: int, + 1: bool, + 0: type(None), + } + moregeneric = max(types.get(type1, 5), types.get(type2, 5)) + return invtypes[moregeneric] + + +def _column_type(strings, has_invisible=True, numparse=True): + """The least generic type all column values are convertible to. + + >>> _column_type([True, False]) is bool + True + >>> _column_type(["1", "2"]) is int + True + >>> _column_type(["1", "2.3"]) is float + True + >>> _column_type(["1", "2.3", "four"]) is str + True + >>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is str + True + >>> _column_type([None, "brux"]) is str + True + >>> _column_type([1, 2, None]) is int + True + >>> import datetime as dt + >>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is str + True + + """ + types = [_type(s, has_invisible, numparse) for s in strings] + return reduce(_more_generic, types, bool) + + +def _format(val, valtype, floatfmt, intfmt, missingval="", has_invisible=True): + """Format a value according to its type. + + Unicode is supported: + + >>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \ + tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \ + good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \ + tabulate(tbl, headers=hrow) == good_result + True + + """ # noqa + if val is None: + return missingval + + if valtype is str: + return f"{val}" + elif valtype is int: + return format(val, intfmt) + elif valtype is bytes: + try: + return str(val, "ascii") + except (TypeError, UnicodeDecodeError): + return str(val) + elif valtype is float: + is_a_colored_number = has_invisible and isinstance(val, (str, bytes)) + if is_a_colored_number: + raw_val = _strip_ansi(val) + formatted_val = format(float(raw_val), floatfmt) + return val.replace(raw_val, formatted_val) + else: + return format(float(val), floatfmt) + else: + return f"{val}" + + +def _align_header( + header, alignment, width, visible_width, is_multiline=False, width_fn=None +): + "Pad string header to width chars given known visible_width of the header." + if is_multiline: + header_lines = re.split(_multiline_codes, header) + padded_lines = [ + _align_header(h, alignment, width, width_fn(h)) for h in header_lines + ] + return "\n".join(padded_lines) + # else: not multiline + ninvisible = len(header) - visible_width + width += ninvisible + if alignment == "left": + return _padright(width, header) + elif alignment == "center": + return _padboth(width, header) + elif not alignment: + return f"{header}" + else: + return _padleft(width, header) + + +def _remove_separating_lines(rows): + if type(rows) == list: + separating_lines = [] + sans_rows = [] + for index, row in enumerate(rows): + if _is_separating_line(row): + separating_lines.append(index) + else: + sans_rows.append(row) + return sans_rows, separating_lines + else: + return rows, None + + +def _reinsert_separating_lines(rows, separating_lines): + if separating_lines: + for index in separating_lines: + rows.insert(index, SEPARATING_LINE) + + +def _prepend_row_index(rows, index): + """Add a left-most index column.""" + if index is None or index is False: + return rows + if isinstance(index, Sized) and len(index) != len(rows): + raise ValueError( + "index must be as long as the number of data rows: " + + "len(index)={} len(rows)={}".format(len(index), len(rows)) + ) + sans_rows, separating_lines = _remove_separating_lines(rows) + new_rows = [] + index_iter = iter(index) + for row in sans_rows: + index_v = next(index_iter) + new_rows.append([index_v] + list(row)) + rows = new_rows + _reinsert_separating_lines(rows, separating_lines) + return rows + + +def _bool(val): + "A wrapper around standard bool() which doesn't throw on NumPy arrays" + try: + return bool(val) + except ValueError: # val is likely to be a numpy array with many elements + return False + + +def _normalize_tabular_data(tabular_data, headers, showindex="default"): + """Transform a supported data type to a list of lists, and a list of headers. + + Supported tabular data types: + + * list-of-lists or another iterable of iterables + + * list of named tuples (usually used with headers="keys") + + * list of dicts (usually used with headers="keys") + + * list of OrderedDicts (usually used with headers="keys") + + * list of dataclasses (Python 3.7+ only, usually used with headers="keys") + + * 2D NumPy arrays + + * NumPy record arrays (usually used with headers="keys") + + * dict of iterables (usually used with headers="keys") + + * pandas.DataFrame (usually used with headers="keys") + + The first row can be used as headers if headers="firstrow", + column indices can be used as headers if headers="keys". + + If showindex="default", show row indices of the pandas.DataFrame. + If showindex="always", show row indices for all types of data. + If showindex="never", don't show row indices for all types of data. + If showindex is an iterable, show its values as row indices. + + """ + + try: + bool(headers) + is_headers2bool_broken = False # noqa + except ValueError: # numpy.ndarray, pandas.core.index.Index, ... + is_headers2bool_broken = True # noqa + headers = list(headers) + + index = None + if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"): + # dict-like and pandas.DataFrame? + if hasattr(tabular_data.values, "__call__"): + # likely a conventional dict + keys = tabular_data.keys() + rows = list( + izip_longest(*tabular_data.values()) + ) # columns have to be transposed + elif hasattr(tabular_data, "index"): + # values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0) + keys = list(tabular_data) + if ( + showindex in ["default", "always", True] + and tabular_data.index.name is not None + ): + if isinstance(tabular_data.index.name, list): + keys[:0] = tabular_data.index.name + else: + keys[:0] = [tabular_data.index.name] + vals = tabular_data.values # values matrix doesn't need to be transposed + # for DataFrames add an index per default + index = list(tabular_data.index) + rows = [list(row) for row in vals] + else: + raise ValueError("tabular data doesn't appear to be a dict or a DataFrame") + + if headers == "keys": + headers = list(map(str, keys)) # headers should be strings + + else: # it's a usual iterable of iterables, or a NumPy array, or an iterable of dataclasses + rows = list(tabular_data) + + if headers == "keys" and not rows: + # an empty table (issue #81) + headers = [] + elif ( + headers == "keys" + and hasattr(tabular_data, "dtype") + and getattr(tabular_data.dtype, "names") + ): + # numpy record array + headers = tabular_data.dtype.names + elif ( + headers == "keys" + and len(rows) > 0 + and isinstance(rows[0], tuple) + and hasattr(rows[0], "_fields") + ): + # namedtuple + headers = list(map(str, rows[0]._fields)) + elif len(rows) > 0 and hasattr(rows[0], "keys") and hasattr(rows[0], "values"): + # dict-like object + uniq_keys = set() # implements hashed lookup + keys = [] # storage for set + if headers == "firstrow": + firstdict = rows[0] if len(rows) > 0 else {} + keys.extend(firstdict.keys()) + uniq_keys.update(keys) + rows = rows[1:] + for row in rows: + for k in row.keys(): + # Save unique items in input order + if k not in uniq_keys: + keys.append(k) + uniq_keys.add(k) + if headers == "keys": + headers = keys + elif isinstance(headers, dict): + # a dict of headers for a list of dicts + headers = [headers.get(k, k) for k in keys] + headers = list(map(str, headers)) + elif headers == "firstrow": + if len(rows) > 0: + headers = [firstdict.get(k, k) for k in keys] + headers = list(map(str, headers)) + else: + headers = [] + elif headers: + raise ValueError( + "headers for a list of dicts is not a dict or a keyword" + ) + rows = [[row.get(k) for k in keys] for row in rows] + + elif ( + headers == "keys" + and hasattr(tabular_data, "description") + and hasattr(tabular_data, "fetchone") + and hasattr(tabular_data, "rowcount") + ): + # Python Database API cursor object (PEP 0249) + # print tabulate(cursor, headers='keys') + headers = [column[0] for column in tabular_data.description] + + elif ( + dataclasses is not None + and len(rows) > 0 + and dataclasses.is_dataclass(rows[0]) + ): + # Python 3.7+'s dataclass + field_names = [field.name for field in dataclasses.fields(rows[0])] + if headers == "keys": + headers = field_names + rows = [[getattr(row, f) for f in field_names] for row in rows] + + elif headers == "keys" and len(rows) > 0: + # keys are column indices + headers = list(map(str, range(len(rows[0])))) + + # take headers from the first row if necessary + if headers == "firstrow" and len(rows) > 0: + if index is not None: + headers = [index[0]] + list(rows[0]) + index = index[1:] + else: + headers = rows[0] + headers = list(map(str, headers)) # headers should be strings + rows = rows[1:] + elif headers == "firstrow": + headers = [] + + headers = list(map(str, headers)) + # rows = list(map(list, rows)) + rows = list(map(lambda r: r if _is_separating_line(r) else list(r), rows)) + + # add or remove an index column + showindex_is_a_str = type(showindex) in [str, bytes] + if showindex == "default" and index is not None: + rows = _prepend_row_index(rows, index) + elif isinstance(showindex, Sized) and not showindex_is_a_str: + rows = _prepend_row_index(rows, list(showindex)) + elif isinstance(showindex, Iterable) and not showindex_is_a_str: + rows = _prepend_row_index(rows, showindex) + elif showindex == "always" or (_bool(showindex) and not showindex_is_a_str): + if index is None: + index = list(range(len(rows))) + rows = _prepend_row_index(rows, index) + elif showindex == "never" or (not _bool(showindex) and not showindex_is_a_str): + pass + + # pad with empty headers for initial columns if necessary + if headers and len(rows) > 0: + nhs = len(headers) + ncols = len(rows[0]) + if nhs < ncols: + headers = [""] * (ncols - nhs) + headers + + return rows, headers + + +def _wrap_text_to_colwidths(list_of_lists, colwidths, numparses=True): + numparses = _expand_iterable(numparses, len(list_of_lists[0]), True) + + result = [] + + for row in list_of_lists: + new_row = [] + for cell, width, numparse in zip(row, colwidths, numparses): + if _isnumber(cell) and numparse: + new_row.append(cell) + continue + + if width is not None: + wrapper = _CustomTextWrap(width=width) + # Cast based on our internal type handling + # Any future custom formatting of types (such as datetimes) + # may need to be more explicit than just `str` of the object + casted_cell = ( + str(cell) if _isnumber(cell) else _type(cell, numparse)(cell) + ) + wrapped = wrapper.wrap(casted_cell) + new_row.append("\n".join(wrapped)) + else: + new_row.append(cell) + result.append(new_row) + + return result + + +def _to_str(s, encoding="utf8", errors="ignore"): + """ + A type safe wrapper for converting a bytestring to str. This is essentially just + a wrapper around .decode() intended for use with things like map(), but with some + specific behavior: + + 1. if the given parameter is not a bytestring, it is returned unmodified + 2. decode() is called for the given parameter and assumes utf8 encoding, but the + default error behavior is changed from 'strict' to 'ignore' + + >>> repr(_to_str(b'foo')) + "'foo'" + + >>> repr(_to_str('foo')) + "'foo'" + + >>> repr(_to_str(42)) + "'42'" + + """ + if isinstance(s, bytes): + return s.decode(encoding=encoding, errors=errors) + return str(s) + + +def tabulate( + tabular_data, + headers=(), + tablefmt="simple", + floatfmt=_DEFAULT_FLOATFMT, + intfmt=_DEFAULT_INTFMT, + numalign=_DEFAULT_ALIGN, + stralign=_DEFAULT_ALIGN, + missingval=_DEFAULT_MISSINGVAL, + showindex="default", + disable_numparse=False, + colalign=None, + maxcolwidths=None, + rowalign=None, + maxheadercolwidths=None, +): + """Format a fixed width table for pretty printing. + + >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]])) + --- --------- + 1 2.34 + -56 8.999 + 2 10001 + --- --------- + + The first required argument (`tabular_data`) can be a + list-of-lists (or another iterable of iterables), a list of named + tuples, a dictionary of iterables, an iterable of dictionaries, + an iterable of dataclasses (Python 3.7+), a two-dimensional NumPy array, + NumPy record array, or a Pandas' dataframe. + + + Table headers + ------------- + + To print nice column headers, supply the second argument (`headers`): + + - `headers` can be an explicit list of column headers + - if `headers="firstrow"`, then the first row of data is used + - if `headers="keys"`, then dictionary keys or column indices are used + + Otherwise a headerless table is produced. + + If the number of headers is less than the number of columns, they + are supposed to be names of the last columns. This is consistent + with the plain-text format of R and Pandas' dataframes. + + >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]], + ... headers="firstrow")) + sex age + ----- ----- ----- + Alice F 24 + Bob M 19 + + By default, pandas.DataFrame data have an additional column called + row index. To add a similar column to all other types of data, + use `showindex="always"` or `showindex=True`. To suppress row indices + for all types of data, pass `showindex="never" or `showindex=False`. + To add a custom row index column, pass `showindex=some_iterable`. + + >>> print(tabulate([["F",24],["M",19]], showindex="always")) + - - -- + 0 F 24 + 1 M 19 + - - -- + + + Column alignment + ---------------- + + `tabulate` tries to detect column types automatically, and aligns + the values properly. By default it aligns decimal points of the + numbers (or flushes integer numbers to the right), and flushes + everything else to the left. Possible column alignments + (`numalign`, `stralign`) are: "right", "center", "left", "decimal" + (only for `numalign`), and None (to disable alignment). + + + Table formats + ------------- + + `intfmt` is a format specification used for columns which + contain numeric data without a decimal point. This can also be + a list or tuple of format strings, one per column. + + `floatfmt` is a format specification used for columns which + contain numeric data with a decimal point. This can also be + a list or tuple of format strings, one per column. + + `None` values are replaced with a `missingval` string (like + `floatfmt`, this can also be a list of values for different + columns): + + >>> print(tabulate([["spam", 1, None], + ... ["eggs", 42, 3.14], + ... ["other", None, 2.7]], missingval="?")) + ----- -- ---- + spam 1 ? + eggs 42 3.14 + other ? 2.7 + ----- -- ---- + + Various plain-text table formats (`tablefmt`) are supported: + 'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki', + 'latex', 'latex_raw', 'latex_booktabs', 'latex_longtable' and tsv. + Variable `tabulate_formats`contains the list of currently supported formats. + + "plain" format doesn't use any pseudographics to draw tables, + it separates columns with a double space: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "plain")) + strings numbers + spam 41.9999 + eggs 451 + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain")) + spam 41.9999 + eggs 451 + + "simple" format is like Pandoc simple_tables: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "simple")) + strings numbers + --------- --------- + spam 41.9999 + eggs 451 + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple")) + ---- -------- + spam 41.9999 + eggs 451 + ---- -------- + + "grid" is similar to tables produced by Emacs table.el package or + Pandoc grid_tables: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "grid")) + +-----------+-----------+ + | strings | numbers | + +===========+===========+ + | spam | 41.9999 | + +-----------+-----------+ + | eggs | 451 | + +-----------+-----------+ + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid")) + +------+----------+ + | spam | 41.9999 | + +------+----------+ + | eggs | 451 | + +------+----------+ + + "simple_grid" draws a grid using single-line box-drawing + characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "simple_grid")) + ┌───────────┬───────────┐ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + └───────────┴───────────┘ + + "rounded_grid" draws a grid using single-line box-drawing + characters with rounded corners: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "rounded_grid")) + ╭───────────┬───────────╮ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + ╰───────────┴───────────╯ + + "heavy_grid" draws a grid using bold (thick) single-line box-drawing + characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "heavy_grid")) + ┏━━━━━━━━━━━┳━━━━━━━━━━━┓ + ┃ strings ┃ numbers ┃ + ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ + ┃ spam ┃ 41.9999 ┃ + ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ + ┃ eggs ┃ 451 ┃ + ┗━━━━━━━━━━━┻━━━━━━━━━━━┛ + + "mixed_grid" draws a grid using a mix of light (thin) and heavy (thick) lines + box-drawing characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "mixed_grid")) + ┍━━━━━━━━━━━┯━━━━━━━━━━━┑ + │ strings │ numbers │ + ┝━━━━━━━━━━━┿━━━━━━━━━━━┥ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + ┕━━━━━━━━━━━┷━━━━━━━━━━━┙ + + "double_grid" draws a grid using double-line box-drawing + characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "double_grid")) + ╔═══════════╦═══════════╗ + ║ strings ║ numbers ║ + ╠═══════════╬═══════════╣ + ║ spam ║ 41.9999 ║ + ╠═══════════╬═══════════╣ + ║ eggs ║ 451 ║ + ╚═══════════╩═══════════╝ + + "fancy_grid" draws a grid using a mix of single and + double-line box-drawing characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "fancy_grid")) + ╒═══════════╤═══════════╕ + │ strings │ numbers │ + ╞═══════════╪═══════════╡ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + ╘═══════════╧═══════════╛ + + "outline" is the same as the "grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "outline")) + +-----------+-----------+ + | strings | numbers | + +===========+===========+ + | spam | 41.9999 | + | eggs | 451 | + +-----------+-----------+ + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="outline")) + +------+----------+ + | spam | 41.9999 | + | eggs | 451 | + +------+----------+ + + "simple_outline" is the same as the "simple_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "simple_outline")) + ┌───────────┬───────────┐ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + └───────────┴───────────┘ + + "rounded_outline" is the same as the "rounded_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "rounded_outline")) + ╭───────────┬───────────╮ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + ╰───────────┴───────────╯ + + "heavy_outline" is the same as the "heavy_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "heavy_outline")) + ┏━━━━━━━━━━━┳━━━━━━━━━━━┓ + ┃ strings ┃ numbers ┃ + ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ + ┃ spam ┃ 41.9999 ┃ + ┃ eggs ┃ 451 ┃ + ┗━━━━━━━━━━━┻━━━━━━━━━━━┛ + + "mixed_outline" is the same as the "mixed_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "mixed_outline")) + ┍━━━━━━━━━━━┯━━━━━━━━━━━┑ + │ strings │ numbers │ + ┝━━━━━━━━━━━┿━━━━━━━━━━━┥ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + ┕━━━━━━━━━━━┷━━━━━━━━━━━┙ + + "double_outline" is the same as the "double_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "double_outline")) + ╔═══════════╦═══════════╗ + ║ strings ║ numbers ║ + ╠═══════════╬═══════════╣ + ║ spam ║ 41.9999 ║ + ║ eggs ║ 451 ║ + ╚═══════════╩═══════════╝ + + "fancy_outline" is the same as the "fancy_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "fancy_outline")) + ╒═══════════╤═══════════╕ + │ strings │ numbers │ + ╞═══════════╪═══════════╡ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + ╘═══════════╧═══════════╛ + + "pipe" is like tables in PHP Markdown Extra extension or Pandoc + pipe_tables: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "pipe")) + | strings | numbers | + |:----------|----------:| + | spam | 41.9999 | + | eggs | 451 | + + "presto" is like tables produce by the Presto CLI: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "presto")) + strings | numbers + -----------+----------- + spam | 41.9999 + eggs | 451 + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe")) + |:-----|---------:| + | spam | 41.9999 | + | eggs | 451 | + + "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They + are slightly different from "pipe" format by not using colons to + define column alignment, and using a "+" sign to indicate line + intersections: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "orgtbl")) + | strings | numbers | + |-----------+-----------| + | spam | 41.9999 | + | eggs | 451 | + + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl")) + | spam | 41.9999 | + | eggs | 451 | + + "rst" is like a simple table format from reStructuredText; please + note that reStructuredText accepts also "grid" tables: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "rst")) + ========= ========= + strings numbers + ========= ========= + spam 41.9999 + eggs 451 + ========= ========= + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst")) + ==== ======== + spam 41.9999 + eggs 451 + ==== ======== + + "mediawiki" produces a table markup used in Wikipedia and on other + MediaWiki-based sites: + + >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], + ... headers="firstrow", tablefmt="mediawiki")) + {| class="wikitable" style="text-align: left;" + |+ + |- + ! strings !! align="right"| numbers + |- + | spam || align="right"| 41.9999 + |- + | eggs || align="right"| 451 + |} + + "html" produces HTML markup as an html.escape'd str + with a ._repr_html_ method so that Jupyter Lab and Notebook display the HTML + and a .str property so that the raw HTML remains accessible + the unsafehtml table format can be used if an unescaped HTML format is required: + + >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], + ... headers="firstrow", tablefmt="html")) + + + + + + + + +
strings numbers
spam 41.9999
eggs 451
+ + "latex" produces a tabular environment of LaTeX document markup: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex")) + \\begin{tabular}{lr} + \\hline + spam & 41.9999 \\\\ + eggs & 451 \\\\ + \\hline + \\end{tabular} + + "latex_raw" is similar to "latex", but doesn't escape special characters, + such as backslash and underscore, so LaTeX commands may embedded into + cells' values: + + >>> print(tabulate([["spam$_9$", 41.9999], ["\\\\emph{eggs}", "451.0"]], tablefmt="latex_raw")) + \\begin{tabular}{lr} + \\hline + spam$_9$ & 41.9999 \\\\ + \\emph{eggs} & 451 \\\\ + \\hline + \\end{tabular} + + "latex_booktabs" produces a tabular environment of LaTeX document markup + using the booktabs.sty package: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs")) + \\begin{tabular}{lr} + \\toprule + spam & 41.9999 \\\\ + eggs & 451 \\\\ + \\bottomrule + \\end{tabular} + + "latex_longtable" produces a tabular environment that can stretch along + multiple pages, using the longtable package for LaTeX. + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_longtable")) + \\begin{longtable}{lr} + \\hline + spam & 41.9999 \\\\ + eggs & 451 \\\\ + \\hline + \\end{longtable} + + + Number parsing + -------------- + By default, anything which can be parsed as a number is a number. + This ensures numbers represented as strings are aligned properly. + This can lead to weird results for particular strings such as + specific git SHAs e.g. "42992e1" will be parsed into the number + 429920 and aligned as such. + + To completely disable number parsing (and alignment), use + `disable_numparse=True`. For more fine grained control, a list column + indices is used to disable number parsing only on those columns + e.g. `disable_numparse=[0, 2]` would disable number parsing only on the + first and third columns. + + Column Widths and Auto Line Wrapping + ------------------------------------ + Tabulate will, by default, set the width of each column to the length of the + longest element in that column. However, in situations where fields are expected + to reasonably be too long to look good as a single line, tabulate can help automate + word wrapping long fields for you. Use the parameter `maxcolwidth` to provide a + list of maximal column widths + + >>> print(tabulate( \ + [('1', 'John Smith', \ + 'This is a rather long description that might look better if it is wrapped a bit')], \ + headers=("Issue Id", "Author", "Description"), \ + maxcolwidths=[None, None, 30], \ + tablefmt="grid" \ + )) + +------------+------------+-------------------------------+ + | Issue Id | Author | Description | + +============+============+===============================+ + | 1 | John Smith | This is a rather long | + | | | description that might look | + | | | better if it is wrapped a bit | + +------------+------------+-------------------------------+ + + Header column width can be specified in a similar way using `maxheadercolwidth` + + """ + + if tabular_data is None: + tabular_data = [] + + list_of_lists, headers = _normalize_tabular_data( + tabular_data, headers, showindex=showindex + ) + list_of_lists, separating_lines = _remove_separating_lines(list_of_lists) + + if maxcolwidths is not None: + num_cols = len(list_of_lists[0]) + if isinstance(maxcolwidths, int): # Expand scalar for all columns + maxcolwidths = _expand_iterable(maxcolwidths, num_cols, maxcolwidths) + else: # Ignore col width for any 'trailing' columns + maxcolwidths = _expand_iterable(maxcolwidths, num_cols, None) + + numparses = _expand_numparse(disable_numparse, num_cols) + list_of_lists = _wrap_text_to_colwidths( + list_of_lists, maxcolwidths, numparses=numparses + ) + + if maxheadercolwidths is not None: + num_cols = len(list_of_lists[0]) + if isinstance(maxheadercolwidths, int): # Expand scalar for all columns + maxheadercolwidths = _expand_iterable( + maxheadercolwidths, num_cols, maxheadercolwidths + ) + else: # Ignore col width for any 'trailing' columns + maxheadercolwidths = _expand_iterable(maxheadercolwidths, num_cols, None) + + numparses = _expand_numparse(disable_numparse, num_cols) + headers = _wrap_text_to_colwidths( + [headers], maxheadercolwidths, numparses=numparses + )[0] + + # empty values in the first column of RST tables should be escaped (issue #82) + # "" should be escaped as "\\ " or ".." + if tablefmt == "rst": + list_of_lists, headers = _rst_escape_first_column(list_of_lists, headers) + + # PrettyTable formatting does not use any extra padding. + # Numbers are not parsed and are treated the same as strings for alignment. + # Check if pretty is the format being used and override the defaults so it + # does not impact other formats. + min_padding = MIN_PADDING + if tablefmt == "pretty": + min_padding = 0 + disable_numparse = True + numalign = "center" if numalign == _DEFAULT_ALIGN else numalign + stralign = "center" if stralign == _DEFAULT_ALIGN else stralign + else: + numalign = "decimal" if numalign == _DEFAULT_ALIGN else numalign + stralign = "left" if stralign == _DEFAULT_ALIGN else stralign + + # optimization: look for ANSI control codes once, + # enable smart width functions only if a control code is found + # + # convert the headers and rows into a single, tab-delimited string ensuring + # that any bytestrings are decoded safely (i.e. errors ignored) + plain_text = "\t".join( + chain( + # headers + map(_to_str, headers), + # rows: chain the rows together into a single iterable after mapping + # the bytestring conversino to each cell value + chain.from_iterable(map(_to_str, row) for row in list_of_lists), + ) + ) + + has_invisible = _ansi_codes.search(plain_text) is not None + + enable_widechars = wcwidth is not None and WIDE_CHARS_MODE + if ( + not isinstance(tablefmt, TableFormat) + and tablefmt in multiline_formats + and _is_multiline(plain_text) + ): + tablefmt = multiline_formats.get(tablefmt, tablefmt) + is_multiline = True + else: + is_multiline = False + width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline) + + # format rows and columns, convert numeric values to strings + cols = list(izip_longest(*list_of_lists)) + numparses = _expand_numparse(disable_numparse, len(cols)) + coltypes = [_column_type(col, numparse=np) for col, np in zip(cols, numparses)] + if isinstance(floatfmt, str): # old version + float_formats = len(cols) * [ + floatfmt + ] # just duplicate the string to use in each column + else: # if floatfmt is list, tuple etc we have one per column + float_formats = list(floatfmt) + if len(float_formats) < len(cols): + float_formats.extend((len(cols) - len(float_formats)) * [_DEFAULT_FLOATFMT]) + if isinstance(intfmt, str): # old version + int_formats = len(cols) * [ + intfmt + ] # just duplicate the string to use in each column + else: # if intfmt is list, tuple etc we have one per column + int_formats = list(intfmt) + if len(int_formats) < len(cols): + int_formats.extend((len(cols) - len(int_formats)) * [_DEFAULT_INTFMT]) + if isinstance(missingval, str): + missing_vals = len(cols) * [missingval] + else: + missing_vals = list(missingval) + if len(missing_vals) < len(cols): + missing_vals.extend((len(cols) - len(missing_vals)) * [_DEFAULT_MISSINGVAL]) + cols = [ + [_format(v, ct, fl_fmt, int_fmt, miss_v, has_invisible) for v in c] + for c, ct, fl_fmt, int_fmt, miss_v in zip( + cols, coltypes, float_formats, int_formats, missing_vals + ) + ] + + # align columns + aligns = [numalign if ct in [int, float] else stralign for ct in coltypes] + if colalign is not None: + assert isinstance(colalign, Iterable) + for idx, align in enumerate(colalign): + aligns[idx] = align + minwidths = ( + [width_fn(h) + min_padding for h in headers] if headers else [0] * len(cols) + ) + cols = [ + _align_column(c, a, minw, has_invisible, enable_widechars, is_multiline) + for c, a, minw in zip(cols, aligns, minwidths) + ] + + if headers: + # align headers and add headers + t_cols = cols or [[""]] * len(headers) + t_aligns = aligns or [stralign] * len(headers) + minwidths = [ + max(minw, max(width_fn(cl) for cl in c)) + for minw, c in zip(minwidths, t_cols) + ] + headers = [ + _align_header(h, a, minw, width_fn(h), is_multiline, width_fn) + for h, a, minw in zip(headers, t_aligns, minwidths) + ] + rows = list(zip(*cols)) + else: + minwidths = [max(width_fn(cl) for cl in c) for c in cols] + rows = list(zip(*cols)) + + if not isinstance(tablefmt, TableFormat): + tablefmt = _table_formats.get(tablefmt, _table_formats["simple"]) + + ra_default = rowalign if isinstance(rowalign, str) else None + rowaligns = _expand_iterable(rowalign, len(rows), ra_default) + _reinsert_separating_lines(rows, separating_lines) + + return _format_table( + tablefmt, headers, rows, minwidths, aligns, is_multiline, rowaligns=rowaligns + ) + + +def _expand_numparse(disable_numparse, column_count): + """ + Return a list of bools of length `column_count` which indicates whether + number parsing should be used on each column. + If `disable_numparse` is a list of indices, each of those indices are False, + and everything else is True. + If `disable_numparse` is a bool, then the returned list is all the same. + """ + if isinstance(disable_numparse, Iterable): + numparses = [True] * column_count + for index in disable_numparse: + numparses[index] = False + return numparses + else: + return [not disable_numparse] * column_count + + +def _expand_iterable(original, num_desired, default): + """ + Expands the `original` argument to return a return a list of + length `num_desired`. If `original` is shorter than `num_desired`, it will + be padded with the value in `default`. + If `original` is not a list to begin with (i.e. scalar value) a list of + length `num_desired` completely populated with `default will be returned + """ + if isinstance(original, Iterable) and not isinstance(original, str): + return original + [default] * (num_desired - len(original)) + else: + return [default] * num_desired + + +def _pad_row(cells, padding): + if cells: + pad = " " * padding + padded_cells = [pad + cell + pad for cell in cells] + return padded_cells + else: + return cells + + +def _build_simple_row(padded_cells, rowfmt): + "Format row according to DataRow format without padding." + begin, sep, end = rowfmt + return (begin + sep.join(padded_cells) + end).rstrip() + + +def _build_row(padded_cells, colwidths, colaligns, rowfmt): + "Return a string which represents a row of data cells." + if not rowfmt: + return None + if hasattr(rowfmt, "__call__"): + return rowfmt(padded_cells, colwidths, colaligns) + else: + return _build_simple_row(padded_cells, rowfmt) + + +def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt, rowalign=None): + # NOTE: rowalign is ignored and exists for api compatibility with _append_multiline_row + lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt)) + return lines + + +def _align_cell_veritically(text_lines, num_lines, column_width, row_alignment): + delta_lines = num_lines - len(text_lines) + blank = [" " * column_width] + if row_alignment == "bottom": + return blank * delta_lines + text_lines + elif row_alignment == "center": + top_delta = delta_lines // 2 + bottom_delta = delta_lines - top_delta + return top_delta * blank + text_lines + bottom_delta * blank + else: + return text_lines + blank * delta_lines + + +def _append_multiline_row( + lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad, rowalign=None +): + colwidths = [w - 2 * pad for w in padded_widths] + cells_lines = [c.splitlines() for c in padded_multiline_cells] + nlines = max(map(len, cells_lines)) # number of lines in the row + # vertically pad cells where some lines are missing + # cells_lines = [ + # (cl + [" " * w] * (nlines - len(cl))) for cl, w in zip(cells_lines, colwidths) + # ] + + cells_lines = [ + _align_cell_veritically(cl, nlines, w, rowalign) + for cl, w in zip(cells_lines, colwidths) + ] + lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)] + for ln in lines_cells: + padded_ln = _pad_row(ln, pad) + _append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt) + return lines + + +def _build_line(colwidths, colaligns, linefmt): + "Return a string which represents a horizontal line." + if not linefmt: + return None + if hasattr(linefmt, "__call__"): + return linefmt(colwidths, colaligns) + else: + begin, fill, sep, end = linefmt + cells = [fill * w for w in colwidths] + return _build_simple_row(cells, (begin, sep, end)) + + +def _append_line(lines, colwidths, colaligns, linefmt): + lines.append(_build_line(colwidths, colaligns, linefmt)) + return lines + + +class JupyterHTMLStr(str): + """Wrap the string with a _repr_html_ method so that Jupyter + displays the HTML table""" + + def _repr_html_(self): + return self + + @property + def str(self): + """add a .str property so that the raw string is still accessible""" + return self + + +def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline, rowaligns): + """Produce a plain-text representation of the table.""" + lines = [] + hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else [] + pad = fmt.padding + headerrow = fmt.headerrow + + padded_widths = [(w + 2 * pad) for w in colwidths] + if is_multiline: + pad_row = lambda row, _: row # noqa do it later, in _append_multiline_row + append_row = partial(_append_multiline_row, pad=pad) + else: + pad_row = _pad_row + append_row = _append_basic_row + + padded_headers = pad_row(headers, pad) + padded_rows = [pad_row(row, pad) for row in rows] + + if fmt.lineabove and "lineabove" not in hidden: + _append_line(lines, padded_widths, colaligns, fmt.lineabove) + + if padded_headers: + append_row(lines, padded_headers, padded_widths, colaligns, headerrow) + if fmt.linebelowheader and "linebelowheader" not in hidden: + _append_line(lines, padded_widths, colaligns, fmt.linebelowheader) + + if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden: + # initial rows with a line below + for row, ralign in zip(padded_rows[:-1], rowaligns): + append_row( + lines, row, padded_widths, colaligns, fmt.datarow, rowalign=ralign + ) + _append_line(lines, padded_widths, colaligns, fmt.linebetweenrows) + # the last row without a line below + append_row( + lines, + padded_rows[-1], + padded_widths, + colaligns, + fmt.datarow, + rowalign=rowaligns[-1], + ) + else: + separating_line = ( + fmt.linebetweenrows + or fmt.linebelowheader + or fmt.linebelow + or fmt.lineabove + or Line("", "", "", "") + ) + for row in padded_rows: + # test to see if either the 1st column or the 2nd column (account for showindex) has + # the SEPARATING_LINE flag + if _is_separating_line(row): + _append_line(lines, padded_widths, colaligns, separating_line) + else: + append_row(lines, row, padded_widths, colaligns, fmt.datarow) + + if fmt.linebelow and "linebelow" not in hidden: + _append_line(lines, padded_widths, colaligns, fmt.linebelow) + + if headers or rows: + output = "\n".join(lines) + if fmt.lineabove == _html_begin_table_without_header: + return JupyterHTMLStr(output) + else: + return output + else: # a completely empty table + return "" + + +class _CustomTextWrap(textwrap.TextWrapper): + """A custom implementation of CPython's textwrap.TextWrapper. This supports + both wide characters (Korea, Japanese, Chinese) - including mixed string. + For the most part, the `_handle_long_word` and `_wrap_chunks` functions were + copy pasted out of the CPython baseline, and updated with our custom length + and line appending logic. + """ + + def __init__(self, *args, **kwargs): + self._active_codes = [] + self.max_lines = None # For python2 compatibility + textwrap.TextWrapper.__init__(self, *args, **kwargs) + + @staticmethod + def _len(item): + """Custom len that gets console column width for wide + and non-wide characters as well as ignores color codes""" + stripped = _strip_ansi(item) + if wcwidth: + return wcwidth.wcswidth(stripped) + else: + return len(stripped) + + def _update_lines(self, lines, new_line): + """Adds a new line to the list of lines the text is being wrapped into + This function will also track any ANSI color codes in this string as well + as add any colors from previous lines order to preserve the same formatting + as a single unwrapped string. + """ + code_matches = [x for x in _ansi_codes.finditer(new_line)] + color_codes = [ + code.string[code.span()[0] : code.span()[1]] for code in code_matches + ] + + # Add color codes from earlier in the unwrapped line, and then track any new ones we add. + new_line = "".join(self._active_codes) + new_line + + for code in color_codes: + if code != _ansi_color_reset_code: + self._active_codes.append(code) + else: # A single reset code resets everything + self._active_codes = [] + + # Always ensure each line is color terminted if any colors are + # still active, otherwise colors will bleed into other cells on the console + if len(self._active_codes) > 0: + new_line = new_line + _ansi_color_reset_code + + lines.append(new_line) + + def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): + """_handle_long_word(chunks : [string], + cur_line : [string], + cur_len : int, width : int) + Handle a chunk of text (most likely a word, not whitespace) that + is too long to fit in any line. + """ + # Figure out when indent is larger than the specified width, and make + # sure at least one character is stripped off on every pass + if width < 1: + space_left = 1 + else: + space_left = width - cur_len + + # If we're allowed to break long words, then do so: put as much + # of the next chunk onto the current line as will fit. + if self.break_long_words: + # Tabulate Custom: Build the string up piece-by-piece in order to + # take each charcter's width into account + chunk = reversed_chunks[-1] + i = 1 + while self._len(chunk[:i]) <= space_left: + i = i + 1 + cur_line.append(chunk[: i - 1]) + reversed_chunks[-1] = chunk[i - 1 :] + + # Otherwise, we have to preserve the long word intact. Only add + # it to the current line if there's nothing already there -- + # that minimizes how much we violate the width constraint. + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + # If we're not allowed to break long words, and there's already + # text on the current line, do nothing. Next time through the + # main loop of _wrap_chunks(), we'll wind up here again, but + # cur_len will be zero, so the next line will be entirely + # devoted to the long word that we can't handle right now. + + def _wrap_chunks(self, chunks): + """_wrap_chunks(chunks : [string]) -> [string] + Wrap a sequence of text chunks and return a list of lines of + length 'self.width' or less. (If 'break_long_words' is false, + some lines may be longer than this.) Chunks correspond roughly + to words and the whitespace between them: each chunk is + indivisible (modulo 'break_long_words'), but a line break can + come between any two chunks. Chunks should not have internal + whitespace; ie. a chunk is either all whitespace or a "word". + Whitespace chunks will be removed from the beginning and end of + lines, but apart from that whitespace is preserved. + """ + lines = [] + if self.width <= 0: + raise ValueError("invalid width %r (must be > 0)" % self.width) + if self.max_lines is not None: + if self.max_lines > 1: + indent = self.subsequent_indent + else: + indent = self.initial_indent + if self._len(indent) + self._len(self.placeholder.lstrip()) > self.width: + raise ValueError("placeholder too large for max width") + + # Arrange in reverse order so items can be efficiently popped + # from a stack of chucks. + chunks.reverse() + + while chunks: + + # Start the list of chunks that will make up the current line. + # cur_len is just the length of all the chunks in cur_line. + cur_line = [] + cur_len = 0 + + # Figure out which static string will prefix this line. + if lines: + indent = self.subsequent_indent + else: + indent = self.initial_indent + + # Maximum width for this line. + width = self.width - self._len(indent) + + # First chunk on line is whitespace -- drop it, unless this + # is the very beginning of the text (ie. no lines started yet). + if self.drop_whitespace and chunks[-1].strip() == "" and lines: + del chunks[-1] + + while chunks: + chunk_len = self._len(chunks[-1]) + + # Can at least squeeze this chunk onto the current line. + if cur_len + chunk_len <= width: + cur_line.append(chunks.pop()) + cur_len += chunk_len + + # Nope, this line is full. + else: + break + + # The current line is full, and the next chunk is too big to + # fit on *any* line (not just this one). + if chunks and self._len(chunks[-1]) > width: + self._handle_long_word(chunks, cur_line, cur_len, width) + cur_len = sum(map(self._len, cur_line)) + + # If the last chunk on this line is all whitespace, drop it. + if self.drop_whitespace and cur_line and cur_line[-1].strip() == "": + cur_len -= self._len(cur_line[-1]) + del cur_line[-1] + + if cur_line: + if ( + self.max_lines is None + or len(lines) + 1 < self.max_lines + or ( + not chunks + or self.drop_whitespace + and len(chunks) == 1 + and not chunks[0].strip() + ) + and cur_len <= width + ): + # Convert current line back to a string and store it in + # list of all lines (return value). + self._update_lines(lines, indent + "".join(cur_line)) + else: + while cur_line: + if ( + cur_line[-1].strip() + and cur_len + self._len(self.placeholder) <= width + ): + cur_line.append(self.placeholder) + self._update_lines(lines, indent + "".join(cur_line)) + break + cur_len -= self._len(cur_line[-1]) + del cur_line[-1] + else: + if lines: + prev_line = lines[-1].rstrip() + if ( + self._len(prev_line) + self._len(self.placeholder) + <= self.width + ): + lines[-1] = prev_line + self.placeholder + break + self._update_lines(lines, indent + self.placeholder.lstrip()) + break + + return lines + + +def _main(): + """\ + Usage: tabulate [options] [FILE ...] + + Pretty-print tabular data. + See also https://github.com/astanin/python-tabulate + + FILE a filename of the file with tabular data; + if "-" or missing, read data from stdin. + + Options: + + -h, --help show this message + -1, --header use the first row of data as a table header + -o FILE, --output FILE print table to FILE (default: stdout) + -s REGEXP, --sep REGEXP use a custom column separator (default: whitespace) + -F FPFMT, --float FPFMT floating point number format (default: g) + -I INTFMT, --int INTFMT integer point number format (default: "") + -f FMT, --format FMT set output table format; supported formats: + plain, simple, grid, fancy_grid, pipe, orgtbl, + rst, mediawiki, html, latex, latex_raw, + latex_booktabs, latex_longtable, tsv + (default: simple) + """ + import getopt + import sys + import textwrap + + usage = textwrap.dedent(_main.__doc__) + try: + opts, args = getopt.getopt( + sys.argv[1:], + "h1o:s:F:A:f:", + ["help", "header", "output", "sep=", "float=", "int=", "align=", "format="], + ) + except getopt.GetoptError as e: + print(e) + print(usage) + sys.exit(2) + headers = [] + floatfmt = _DEFAULT_FLOATFMT + intfmt = _DEFAULT_INTFMT + colalign = None + tablefmt = "simple" + sep = r"\s+" + outfile = "-" + for opt, value in opts: + if opt in ["-1", "--header"]: + headers = "firstrow" + elif opt in ["-o", "--output"]: + outfile = value + elif opt in ["-F", "--float"]: + floatfmt = value + elif opt in ["-I", "--int"]: + intfmt = value + elif opt in ["-C", "--colalign"]: + colalign = value.split() + elif opt in ["-f", "--format"]: + if value not in tabulate_formats: + print("%s is not a supported table format" % value) + print(usage) + sys.exit(3) + tablefmt = value + elif opt in ["-s", "--sep"]: + sep = value + elif opt in ["-h", "--help"]: + print(usage) + sys.exit(0) + files = [sys.stdin] if not args else args + with (sys.stdout if outfile == "-" else open(outfile, "w")) as out: + for f in files: + if f == "-": + f = sys.stdin + if _is_file(f): + _pprint_file( + f, + headers=headers, + tablefmt=tablefmt, + sep=sep, + floatfmt=floatfmt, + intfmt=intfmt, + file=out, + colalign=colalign, + ) + else: + with open(f) as fobj: + _pprint_file( + fobj, + headers=headers, + tablefmt=tablefmt, + sep=sep, + floatfmt=floatfmt, + intfmt=intfmt, + file=out, + colalign=colalign, + ) + + +def _pprint_file(fobject, headers, tablefmt, sep, floatfmt, intfmt, file, colalign): + rows = fobject.readlines() + table = [re.split(sep, r.rstrip()) for r in rows if r.strip()] + print( + tabulate( + table, + headers, + tablefmt, + floatfmt=floatfmt, + intfmt=intfmt, + colalign=colalign, + ), + file=file, + ) + + +if __name__ == "__main__": + _main() diff --git a/llmeval-env/lib/python3.10/site-packages/tabulate/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tabulate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdbd85ac7b26b3cbe65556fec434e4f9f2d9009c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tabulate/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/tabulate/__pycache__/version.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/tabulate/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dad53923d329149f0e3960df442acd48df029bb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/tabulate/__pycache__/version.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/tabulate/version.py b/llmeval-env/lib/python3.10/site-packages/tabulate/version.py new file mode 100644 index 0000000000000000000000000000000000000000..1dd234cadb9062a0be1c43f22b3c0b4c10bbba5d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tabulate/version.py @@ -0,0 +1,5 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = '0.9.0' +__version_tuple__ = version_tuple = (0, 9, 0)