applied-ai-018 commited on
Commit
957eeee
·
verified ·
1 Parent(s): 668ca81

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/datasets/formatting/__init__.py +139 -0
  2. llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/polars_formatter.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/datasets/formatting/formatting.py +653 -0
  10. llmeval-env/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py +160 -0
  11. llmeval-env/lib/python3.10/site-packages/datasets/formatting/np_formatter.py +106 -0
  12. llmeval-env/lib/python3.10/site-packages/datasets/formatting/polars_formatter.py +122 -0
  13. llmeval-env/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py +115 -0
  14. llmeval-env/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py +115 -0
  15. llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/__init__.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/automatic_speech_recognition.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/base.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/question_answering.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/text_classification.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/_dataset_viewer.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/cache.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/deprecation_utils.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/doc_utils.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/hub.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/logging.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/readme.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/tqdm.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/version.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/INSTALLER +1 -0
  31. llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/LICENSE +13 -0
  32. llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/METADATA +132 -0
  33. llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/RECORD +19 -0
  34. llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/WHEEL +6 -0
  35. llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/COPYING +28 -0
  36. llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/INSTALLER +1 -0
  37. llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/LICENSE +38 -0
  38. llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/METADATA +203 -0
  39. llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/RECORD +73 -0
  40. llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/WHEEL +5 -0
  41. llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/top_level.txt +2 -0
  42. llmeval-env/lib/python3.10/site-packages/pytablewriter/__init__.py +133 -0
  43. llmeval-env/lib/python3.10/site-packages/pytablewriter/__version__.py +6 -0
  44. llmeval-env/lib/python3.10/site-packages/pytablewriter/_converter.py +11 -0
  45. llmeval-env/lib/python3.10/site-packages/pytablewriter/_factory.py +274 -0
  46. llmeval-env/lib/python3.10/site-packages/pytablewriter/_function.py +84 -0
  47. llmeval-env/lib/python3.10/site-packages/pytablewriter/_table_format.py +353 -0
  48. llmeval-env/lib/python3.10/site-packages/pytablewriter/_typing.py +0 -0
  49. llmeval-env/lib/python3.10/site-packages/pytablewriter/error.py +34 -0
  50. llmeval-env/lib/python3.10/site-packages/pytablewriter/py.typed +0 -0
llmeval-env/lib/python3.10/site-packages/datasets/formatting/__init__.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # ruff: noqa
16
+
17
+ from typing import Dict, List, Optional, Type
18
+
19
+ from .. import config
20
+ from ..utils import logging
21
+ from .formatting import (
22
+ ArrowFormatter,
23
+ CustomFormatter,
24
+ Formatter,
25
+ PandasFormatter,
26
+ PythonFormatter,
27
+ TensorFormatter,
28
+ format_table,
29
+ query_table,
30
+ )
31
+ from .np_formatter import NumpyFormatter
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+ _FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {}
37
+ _FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {}
38
+ _FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {}
39
+
40
+
41
+ def _register_formatter(
42
+ formatter_cls: type,
43
+ format_type: Optional[str],
44
+ aliases: Optional[List[str]] = None,
45
+ ):
46
+ """
47
+ Register a Formatter object using a name and optional aliases.
48
+ This function must be used on a Formatter class.
49
+ """
50
+ aliases = aliases if aliases is not None else []
51
+ if format_type in _FORMAT_TYPES:
52
+ logger.warning(
53
+ f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})"
54
+ )
55
+ _FORMAT_TYPES[format_type] = formatter_cls
56
+ for alias in set(aliases + [format_type]):
57
+ if alias in _FORMAT_TYPES_ALIASES:
58
+ logger.warning(
59
+ f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})"
60
+ )
61
+ _FORMAT_TYPES_ALIASES[alias] = format_type
62
+
63
+
64
+ def _register_unavailable_formatter(
65
+ unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None
66
+ ):
67
+ """
68
+ Register an unavailable Formatter object using a name and optional aliases.
69
+ This function must be used on an Exception object that is raised when trying to get the unavailable formatter.
70
+ """
71
+ aliases = aliases if aliases is not None else []
72
+ for alias in set(aliases + [format_type]):
73
+ _FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error
74
+
75
+
76
+ # Here we define all the available formatting functions that can be used by `Dataset.set_format`
77
+ _register_formatter(PythonFormatter, None, aliases=["python"])
78
+ _register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
79
+ _register_formatter(NumpyFormatter, "numpy", aliases=["np"])
80
+ _register_formatter(PandasFormatter, "pandas", aliases=["pd"])
81
+ _register_formatter(CustomFormatter, "custom")
82
+
83
+ if config.POLARS_AVAILABLE:
84
+ from .polars_formatter import PolarsFormatter
85
+
86
+ _register_formatter(PolarsFormatter, "polars", aliases=["pl"])
87
+ else:
88
+ _polars_error = ValueError("Polars needs to be installed to be able to return Polars dataframes.")
89
+ _register_unavailable_formatter(_polars_error, "polars", aliases=["pl"])
90
+
91
+ if config.TORCH_AVAILABLE:
92
+ from .torch_formatter import TorchFormatter
93
+
94
+ _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
95
+ else:
96
+ _torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
97
+ _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
98
+
99
+ if config.TF_AVAILABLE:
100
+ from .tf_formatter import TFFormatter
101
+
102
+ _register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
103
+ else:
104
+ _tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
105
+ _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
106
+
107
+ if config.JAX_AVAILABLE:
108
+ from .jax_formatter import JaxFormatter
109
+
110
+ _register_formatter(JaxFormatter, "jax", aliases=[])
111
+ else:
112
+ _jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.")
113
+ _register_unavailable_formatter(_jax_error, "jax", aliases=[])
114
+
115
+
116
+ def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]:
117
+ """If the given format type is a known alias, then return its main type name. Otherwise return the type with no change."""
118
+ if format_type in _FORMAT_TYPES_ALIASES:
119
+ return _FORMAT_TYPES_ALIASES[format_type]
120
+ else:
121
+ return format_type
122
+
123
+
124
+ def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter:
125
+ """
126
+ Factory function to get a Formatter given its type name and keyword arguments.
127
+ A formatter is an object that extracts and formats data from pyarrow table.
128
+ It defines the formatting for rows, colums and batches.
129
+ If the formatter for a given type name doesn't exist or is not available, an error is raised.
130
+ """
131
+ format_type = get_format_type_from_alias(format_type)
132
+ if format_type in _FORMAT_TYPES:
133
+ return _FORMAT_TYPES[format_type](**format_kwargs)
134
+ if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
135
+ raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
136
+ else:
137
+ raise ValueError(
138
+ f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got '{format_type}'"
139
+ )
llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc ADDED
Binary file (26.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc ADDED
Binary file (5.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/polars_formatter.cpython-310.pyc ADDED
Binary file (4.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc ADDED
Binary file (4.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc ADDED
Binary file (3.98 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/formatting/formatting.py ADDED
@@ -0,0 +1,653 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import operator
16
+ from collections.abc import Mapping, MutableMapping
17
+ from functools import partial
18
+
19
+ # Lint as: python3
20
+ from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
21
+
22
+ import numpy as np
23
+ import pandas as pd
24
+ import pyarrow as pa
25
+ from packaging import version
26
+
27
+ from .. import config
28
+ from ..features import Features
29
+ from ..features.features import _ArrayXDExtensionType, _is_zero_copy_only, decode_nested_example, pandas_types_mapper
30
+ from ..table import Table
31
+ from ..utils.py_utils import no_op_if_value_is_null
32
+
33
+
34
+ T = TypeVar("T")
35
+
36
+ RowFormat = TypeVar("RowFormat")
37
+ ColumnFormat = TypeVar("ColumnFormat")
38
+ BatchFormat = TypeVar("BatchFormat")
39
+
40
+
41
+ def _is_range_contiguous(key: range) -> bool:
42
+ return key.step == 1 and key.stop >= key.start
43
+
44
+
45
+ def _raise_bad_key_type(key: Any):
46
+ raise TypeError(
47
+ f"Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable."
48
+ )
49
+
50
+
51
+ def _query_table_with_indices_mapping(
52
+ table: Table, key: Union[int, slice, range, str, Iterable], indices: Table
53
+ ) -> pa.Table:
54
+ """
55
+ Query a pyarrow Table to extract the subtable that correspond to the given key.
56
+ The :obj:`indices` parameter corresponds to the indices mapping in case we cant to take into
57
+ account a shuffling or an indices selection for example.
58
+ The indices table must contain one column named "indices" of type uint64.
59
+ """
60
+ if isinstance(key, int):
61
+ key = indices.fast_slice(key % indices.num_rows, 1).column(0)[0].as_py()
62
+ return _query_table(table, key)
63
+ if isinstance(key, slice):
64
+ key = range(*key.indices(indices.num_rows))
65
+ if isinstance(key, range):
66
+ if _is_range_contiguous(key) and key.start >= 0:
67
+ return _query_table(
68
+ table, [i.as_py() for i in indices.fast_slice(key.start, key.stop - key.start).column(0)]
69
+ )
70
+ else:
71
+ pass # treat as an iterable
72
+ if isinstance(key, str):
73
+ table = table.select([key])
74
+ return _query_table(table, indices.column(0).to_pylist())
75
+ if isinstance(key, Iterable):
76
+ return _query_table(table, [indices.fast_slice(i, 1).column(0)[0].as_py() for i in key])
77
+
78
+ _raise_bad_key_type(key)
79
+
80
+
81
+ def _query_table(table: Table, key: Union[int, slice, range, str, Iterable]) -> pa.Table:
82
+ """
83
+ Query a pyarrow Table to extract the subtable that correspond to the given key.
84
+ """
85
+ if isinstance(key, int):
86
+ return table.fast_slice(key % table.num_rows, 1)
87
+ if isinstance(key, slice):
88
+ key = range(*key.indices(table.num_rows))
89
+ if isinstance(key, range):
90
+ if _is_range_contiguous(key) and key.start >= 0:
91
+ return table.fast_slice(key.start, key.stop - key.start)
92
+ else:
93
+ pass # treat as an iterable
94
+ if isinstance(key, str):
95
+ return table.table.drop([column for column in table.column_names if column != key])
96
+ if isinstance(key, Iterable):
97
+ key = np.fromiter(key, np.int64)
98
+ if len(key) == 0:
99
+ return table.table.slice(0, 0)
100
+ # don't use pyarrow.Table.take even for pyarrow >=1.0 (see https://issues.apache.org/jira/browse/ARROW-9773)
101
+ return table.fast_gather(key % table.num_rows)
102
+
103
+ _raise_bad_key_type(key)
104
+
105
+
106
+ def _is_array_with_nulls(pa_array: pa.Array) -> bool:
107
+ return pa_array.null_count > 0
108
+
109
+
110
+ class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]):
111
+ """
112
+ Arrow extractor are used to extract data from pyarrow tables.
113
+ It makes it possible to extract rows, columns and batches.
114
+ These three extractions types have to be implemented.
115
+ """
116
+
117
+ def extract_row(self, pa_table: pa.Table) -> RowFormat:
118
+ raise NotImplementedError
119
+
120
+ def extract_column(self, pa_table: pa.Table) -> ColumnFormat:
121
+ raise NotImplementedError
122
+
123
+ def extract_batch(self, pa_table: pa.Table) -> BatchFormat:
124
+ raise NotImplementedError
125
+
126
+
127
+ def _unnest(py_dict: Dict[str, List[T]]) -> Dict[str, T]:
128
+ """Return the first element of a batch (dict) as a row (dict)"""
129
+ return {key: array[0] for key, array in py_dict.items()}
130
+
131
+
132
+ class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]):
133
+ def extract_row(self, pa_table: pa.Table) -> pa.Table:
134
+ return pa_table
135
+
136
+ def extract_column(self, pa_table: pa.Table) -> pa.Array:
137
+ return pa_table.column(0)
138
+
139
+ def extract_batch(self, pa_table: pa.Table) -> pa.Table:
140
+ return pa_table
141
+
142
+
143
+ class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]):
144
+ def extract_row(self, pa_table: pa.Table) -> dict:
145
+ return _unnest(pa_table.to_pydict())
146
+
147
+ def extract_column(self, pa_table: pa.Table) -> list:
148
+ return pa_table.column(0).to_pylist()
149
+
150
+ def extract_batch(self, pa_table: pa.Table) -> dict:
151
+ return pa_table.to_pydict()
152
+
153
+
154
+ class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]):
155
+ def __init__(self, **np_array_kwargs):
156
+ self.np_array_kwargs = np_array_kwargs
157
+
158
+ def extract_row(self, pa_table: pa.Table) -> dict:
159
+ return _unnest(self.extract_batch(pa_table))
160
+
161
+ def extract_column(self, pa_table: pa.Table) -> np.ndarray:
162
+ return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]])
163
+
164
+ def extract_batch(self, pa_table: pa.Table) -> dict:
165
+ return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names}
166
+
167
+ def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray:
168
+ if isinstance(pa_array, pa.ChunkedArray):
169
+ if isinstance(pa_array.type, _ArrayXDExtensionType):
170
+ # don't call to_pylist() to preserve dtype of the fixed-size array
171
+ zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
172
+ array: List = [
173
+ row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
174
+ ]
175
+ else:
176
+ zero_copy_only = _is_zero_copy_only(pa_array.type) and all(
177
+ not _is_array_with_nulls(chunk) for chunk in pa_array.chunks
178
+ )
179
+ array: List = [
180
+ row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
181
+ ]
182
+ else:
183
+ if isinstance(pa_array.type, _ArrayXDExtensionType):
184
+ # don't call to_pylist() to preserve dtype of the fixed-size array
185
+ zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
186
+ array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only)
187
+ else:
188
+ zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array)
189
+ array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist()
190
+ if len(array) > 0:
191
+ if any(
192
+ (isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape))
193
+ or (isinstance(x, float) and np.isnan(x))
194
+ for x in array
195
+ ):
196
+ return np.array(array, copy=False, dtype=object)
197
+ return np.array(array, copy=False)
198
+
199
+
200
+ class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]):
201
+ def extract_row(self, pa_table: pa.Table) -> pd.DataFrame:
202
+ return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper)
203
+
204
+ def extract_column(self, pa_table: pa.Table) -> pd.Series:
205
+ return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]]
206
+
207
+ def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame:
208
+ return pa_table.to_pandas(types_mapper=pandas_types_mapper)
209
+
210
+
211
+ class PythonFeaturesDecoder:
212
+ def __init__(self, features: Optional[Features]):
213
+ self.features = features
214
+
215
+ def decode_row(self, row: dict) -> dict:
216
+ return self.features.decode_example(row) if self.features else row
217
+
218
+ def decode_column(self, column: list, column_name: str) -> list:
219
+ return self.features.decode_column(column, column_name) if self.features else column
220
+
221
+ def decode_batch(self, batch: dict) -> dict:
222
+ return self.features.decode_batch(batch) if self.features else batch
223
+
224
+
225
+ class PandasFeaturesDecoder:
226
+ def __init__(self, features: Optional[Features]):
227
+ self.features = features
228
+
229
+ def decode_row(self, row: pd.DataFrame) -> pd.DataFrame:
230
+ decode = (
231
+ {
232
+ column_name: no_op_if_value_is_null(partial(decode_nested_example, feature))
233
+ for column_name, feature in self.features.items()
234
+ if self.features._column_requires_decoding[column_name]
235
+ }
236
+ if self.features
237
+ else {}
238
+ )
239
+ if decode:
240
+ row[list(decode.keys())] = row.transform(decode)
241
+ return row
242
+
243
+ def decode_column(self, column: pd.Series, column_name: str) -> pd.Series:
244
+ decode = (
245
+ no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name]))
246
+ if self.features and column_name in self.features and self.features._column_requires_decoding[column_name]
247
+ else None
248
+ )
249
+ if decode:
250
+ column = column.transform(decode)
251
+ return column
252
+
253
+ def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame:
254
+ return self.decode_row(batch)
255
+
256
+
257
+ class LazyDict(MutableMapping):
258
+ """A dictionary backed by Arrow data. The values are formatted on-the-fly when accessing the dictionary."""
259
+
260
+ def __init__(self, pa_table: pa.Table, formatter: "Formatter"):
261
+ self.pa_table = pa_table
262
+ self.formatter = formatter
263
+
264
+ self.data = {key: None for key in pa_table.column_names}
265
+ self.keys_to_format = set(self.data.keys())
266
+
267
+ def __len__(self):
268
+ return len(self.data)
269
+
270
+ def __getitem__(self, key):
271
+ value = self.data[key]
272
+ if key in self.keys_to_format:
273
+ value = self.format(key)
274
+ self.data[key] = value
275
+ self.keys_to_format.remove(key)
276
+ return value
277
+
278
+ def __setitem__(self, key, value):
279
+ if key in self.keys_to_format:
280
+ self.keys_to_format.remove(key)
281
+ self.data[key] = value
282
+
283
+ def __delitem__(self, key) -> None:
284
+ if key in self.keys_to_format:
285
+ self.keys_to_format.remove(key)
286
+ del self.data[key]
287
+
288
+ def __iter__(self):
289
+ return iter(self.data)
290
+
291
+ def __contains__(self, key):
292
+ return key in self.data
293
+
294
+ def __repr__(self):
295
+ self._format_all()
296
+ return repr(self.data)
297
+
298
+ if config.PY_VERSION >= version.parse("3.9"):
299
+ # merging with the union ("|") operator is supported in Python 3.9+
300
+
301
+ def __or__(self, other):
302
+ if isinstance(other, LazyDict):
303
+ inst = self.copy()
304
+ other = other.copy()
305
+ other._format_all()
306
+ inst.keys_to_format -= other.data.keys()
307
+ inst.data = inst.data | other.data
308
+ return inst
309
+ if isinstance(other, dict):
310
+ inst = self.copy()
311
+ inst.keys_to_format -= other.keys()
312
+ inst.data = inst.data | other
313
+ return inst
314
+ return NotImplemented
315
+
316
+ def __ror__(self, other):
317
+ if isinstance(other, LazyDict):
318
+ inst = self.copy()
319
+ other = other.copy()
320
+ other._format_all()
321
+ inst.keys_to_format -= other.data.keys()
322
+ inst.data = other.data | inst.data
323
+ return inst
324
+ if isinstance(other, dict):
325
+ inst = self.copy()
326
+ inst.keys_to_format -= other.keys()
327
+ inst.data = other | inst.data
328
+ return inst
329
+ return NotImplemented
330
+
331
+ def __ior__(self, other):
332
+ if isinstance(other, LazyDict):
333
+ other = other.copy()
334
+ other._format_all()
335
+ self.keys_to_format -= other.data.keys()
336
+ self.data |= other.data
337
+ else:
338
+ self.keys_to_format -= other.keys()
339
+ self.data |= other
340
+ return self
341
+
342
+ def __copy__(self):
343
+ # Identical to `UserDict.__copy__`
344
+ inst = self.__class__.__new__(self.__class__)
345
+ inst.__dict__.update(self.__dict__)
346
+ # Create a copy and avoid triggering descriptors
347
+ inst.__dict__["data"] = self.__dict__["data"].copy()
348
+ inst.__dict__["keys_to_format"] = self.__dict__["keys_to_format"].copy()
349
+ return inst
350
+
351
+ def copy(self):
352
+ import copy
353
+
354
+ return copy.copy(self)
355
+
356
+ @classmethod
357
+ def fromkeys(cls, iterable, value=None):
358
+ raise NotImplementedError
359
+
360
+ def format(self, key):
361
+ raise NotImplementedError
362
+
363
+ def _format_all(self):
364
+ for key in self.keys_to_format:
365
+ self.data[key] = self.format(key)
366
+ self.keys_to_format.clear()
367
+
368
+
369
+ class LazyRow(LazyDict):
370
+ def format(self, key):
371
+ return self.formatter.format_column(self.pa_table.select([key]))[0]
372
+
373
+
374
+ class LazyBatch(LazyDict):
375
+ def format(self, key):
376
+ return self.formatter.format_column(self.pa_table.select([key]))
377
+
378
+
379
+ class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]):
380
+ """
381
+ A formatter is an object that extracts and formats data from pyarrow tables.
382
+ It defines the formatting for rows, columns and batches.
383
+ """
384
+
385
+ simple_arrow_extractor = SimpleArrowExtractor
386
+ python_arrow_extractor = PythonArrowExtractor
387
+ numpy_arrow_extractor = NumpyArrowExtractor
388
+ pandas_arrow_extractor = PandasArrowExtractor
389
+
390
+ def __init__(self, features: Optional[Features] = None):
391
+ self.features = features
392
+ self.python_features_decoder = PythonFeaturesDecoder(self.features)
393
+ self.pandas_features_decoder = PandasFeaturesDecoder(self.features)
394
+
395
+ def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]:
396
+ if query_type == "row":
397
+ return self.format_row(pa_table)
398
+ elif query_type == "column":
399
+ return self.format_column(pa_table)
400
+ elif query_type == "batch":
401
+ return self.format_batch(pa_table)
402
+
403
+ def format_row(self, pa_table: pa.Table) -> RowFormat:
404
+ raise NotImplementedError
405
+
406
+ def format_column(self, pa_table: pa.Table) -> ColumnFormat:
407
+ raise NotImplementedError
408
+
409
+ def format_batch(self, pa_table: pa.Table) -> BatchFormat:
410
+ raise NotImplementedError
411
+
412
+
413
+ class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]):
414
+ def recursive_tensorize(self, data_struct: dict):
415
+ raise NotImplementedError
416
+
417
+
418
+ class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]):
419
+ def format_row(self, pa_table: pa.Table) -> pa.Table:
420
+ return self.simple_arrow_extractor().extract_row(pa_table)
421
+
422
+ def format_column(self, pa_table: pa.Table) -> pa.Array:
423
+ return self.simple_arrow_extractor().extract_column(pa_table)
424
+
425
+ def format_batch(self, pa_table: pa.Table) -> pa.Table:
426
+ return self.simple_arrow_extractor().extract_batch(pa_table)
427
+
428
+
429
+ class PythonFormatter(Formatter[Mapping, list, Mapping]):
430
+ def __init__(self, features=None, lazy=False):
431
+ super().__init__(features)
432
+ self.lazy = lazy
433
+
434
+ def format_row(self, pa_table: pa.Table) -> Mapping:
435
+ if self.lazy:
436
+ return LazyRow(pa_table, self)
437
+ row = self.python_arrow_extractor().extract_row(pa_table)
438
+ row = self.python_features_decoder.decode_row(row)
439
+ return row
440
+
441
+ def format_column(self, pa_table: pa.Table) -> list:
442
+ column = self.python_arrow_extractor().extract_column(pa_table)
443
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
444
+ return column
445
+
446
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
447
+ if self.lazy:
448
+ return LazyBatch(pa_table, self)
449
+ batch = self.python_arrow_extractor().extract_batch(pa_table)
450
+ batch = self.python_features_decoder.decode_batch(batch)
451
+ return batch
452
+
453
+
454
+ class PandasFormatter(Formatter[pd.DataFrame, pd.Series, pd.DataFrame]):
455
+ def format_row(self, pa_table: pa.Table) -> pd.DataFrame:
456
+ row = self.pandas_arrow_extractor().extract_row(pa_table)
457
+ row = self.pandas_features_decoder.decode_row(row)
458
+ return row
459
+
460
+ def format_column(self, pa_table: pa.Table) -> pd.Series:
461
+ column = self.pandas_arrow_extractor().extract_column(pa_table)
462
+ column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0])
463
+ return column
464
+
465
+ def format_batch(self, pa_table: pa.Table) -> pd.DataFrame:
466
+ row = self.pandas_arrow_extractor().extract_batch(pa_table)
467
+ row = self.pandas_features_decoder.decode_batch(row)
468
+ return row
469
+
470
+
471
+ class CustomFormatter(Formatter[dict, ColumnFormat, dict]):
472
+ """
473
+ A user-defined custom formatter function defined by a ``transform``.
474
+ The transform must take as input a batch of data extracted for an arrow table using the python extractor,
475
+ and return a batch.
476
+ If the output batch is not a dict, then output_all_columns won't work.
477
+ If the ouput batch has several fields, then querying a single column won't work since we don't know which field
478
+ to return.
479
+ """
480
+
481
+ def __init__(self, transform: Callable[[dict], dict], features=None, **kwargs):
482
+ super().__init__(features=features)
483
+ self.transform = transform
484
+
485
+ def format_row(self, pa_table: pa.Table) -> dict:
486
+ formatted_batch = self.format_batch(pa_table)
487
+ try:
488
+ return _unnest(formatted_batch)
489
+ except Exception as exc:
490
+ raise TypeError(
491
+ f"Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}"
492
+ ) from exc
493
+
494
+ def format_column(self, pa_table: pa.Table) -> ColumnFormat:
495
+ formatted_batch = self.format_batch(pa_table)
496
+ if hasattr(formatted_batch, "keys"):
497
+ if len(formatted_batch.keys()) > 1:
498
+ raise TypeError(
499
+ "Tried to query a column but the custom formatting function returns too many columns. "
500
+ f"Only one column was expected but got columns {list(formatted_batch.keys())}."
501
+ )
502
+ else:
503
+ raise TypeError(
504
+ f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
505
+ )
506
+ try:
507
+ return formatted_batch[pa_table.column_names[0]]
508
+ except Exception as exc:
509
+ raise TypeError(
510
+ f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
511
+ ) from exc
512
+
513
+ def format_batch(self, pa_table: pa.Table) -> dict:
514
+ batch = self.python_arrow_extractor().extract_batch(pa_table)
515
+ batch = self.python_features_decoder.decode_batch(batch)
516
+ return self.transform(batch)
517
+
518
+
519
+ def _check_valid_column_key(key: str, columns: List[str]) -> None:
520
+ if key not in columns:
521
+ raise KeyError(f"Column {key} not in the dataset. Current columns in the dataset: {columns}")
522
+
523
+
524
+ def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None:
525
+ if isinstance(key, int):
526
+ if (key < 0 and key + size < 0) or (key >= size):
527
+ raise IndexError(f"Invalid key: {key} is out of bounds for size {size}")
528
+ return
529
+ elif isinstance(key, slice):
530
+ pass
531
+ elif isinstance(key, range):
532
+ if len(key) > 0:
533
+ _check_valid_index_key(max(key), size=size)
534
+ _check_valid_index_key(min(key), size=size)
535
+ elif isinstance(key, Iterable):
536
+ if len(key) > 0:
537
+ _check_valid_index_key(int(max(key)), size=size)
538
+ _check_valid_index_key(int(min(key)), size=size)
539
+ else:
540
+ _raise_bad_key_type(key)
541
+
542
+
543
+ def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str:
544
+ if isinstance(key, int):
545
+ return "row"
546
+ elif isinstance(key, str):
547
+ return "column"
548
+ elif isinstance(key, (slice, range, Iterable)):
549
+ return "batch"
550
+ _raise_bad_key_type(key)
551
+
552
+
553
+ def query_table(
554
+ table: Table,
555
+ key: Union[int, slice, range, str, Iterable],
556
+ indices: Optional[Table] = None,
557
+ ) -> pa.Table:
558
+ """
559
+ Query a Table to extract the subtable that correspond to the given key.
560
+
561
+ Args:
562
+ table (``datasets.table.Table``): The input Table to query from
563
+ key (``Union[int, slice, range, str, Iterable]``): The key can be of different types:
564
+ - an integer i: the subtable containing only the i-th row
565
+ - a slice [i:j:k]: the subtable containing the rows that correspond to this slice
566
+ - a range(i, j, k): the subtable containing the rows that correspond to this range
567
+ - a string c: the subtable containing all the rows but only the column c
568
+ - an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable
569
+ indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows.
570
+ The indices table must contain one column named "indices" of type uint64.
571
+ This is used in case of shuffling or rows selection.
572
+
573
+
574
+ Returns:
575
+ ``pyarrow.Table``: the result of the query on the input table
576
+ """
577
+ # Check if key is valid
578
+ if not isinstance(key, (int, slice, range, str, Iterable)):
579
+ try:
580
+ key = operator.index(key)
581
+ except TypeError:
582
+ _raise_bad_key_type(key)
583
+ if isinstance(key, str):
584
+ _check_valid_column_key(key, table.column_names)
585
+ else:
586
+ size = indices.num_rows if indices is not None else table.num_rows
587
+ _check_valid_index_key(key, size)
588
+ # Query the main table
589
+ if indices is None:
590
+ pa_subtable = _query_table(table, key)
591
+ else:
592
+ pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices)
593
+ return pa_subtable
594
+
595
+
596
+ def format_table(
597
+ table: Table,
598
+ key: Union[int, slice, range, str, Iterable],
599
+ formatter: Formatter,
600
+ format_columns: Optional[list] = None,
601
+ output_all_columns=False,
602
+ ):
603
+ """
604
+ Format a Table depending on the key that was used and a Formatter object.
605
+
606
+ Args:
607
+ table (``datasets.table.Table``): The input Table to format
608
+ key (``Union[int, slice, range, str, Iterable]``): Depending on the key that was used, the formatter formats
609
+ the table as either a row, a column or a batch.
610
+ formatter (``datasets.formatting.formatting.Formatter``): Any subclass of a Formatter such as
611
+ PythonFormatter, NumpyFormatter, etc.
612
+ format_columns (:obj:`List[str]`, optional): if not None, it defines the columns that will be formatted using the
613
+ given formatter. Other columns are discarded (unless ``output_all_columns`` is True)
614
+ output_all_columns (:obj:`bool`, defaults to False). If True, the formatted output is completed using the columns
615
+ that are not in the ``format_columns`` list. For these columns, the PythonFormatter is used.
616
+
617
+
618
+ Returns:
619
+ A row, column or batch formatted object defined by the Formatter:
620
+ - the PythonFormatter returns a dictionary for a row or a batch, and a list for a column.
621
+ - the NumpyFormatter returns a dictionary for a row or a batch, and a np.array for a column.
622
+ - the PandasFormatter returns a pd.DataFrame for a row or a batch, and a pd.Series for a column.
623
+ - the TorchFormatter returns a dictionary for a row or a batch, and a torch.Tensor for a column.
624
+ - the TFFormatter returns a dictionary for a row or a batch, and a tf.Tensor for a column.
625
+ """
626
+ if isinstance(table, Table):
627
+ pa_table = table.table
628
+ else:
629
+ pa_table = table
630
+ query_type = key_to_query_type(key)
631
+ python_formatter = PythonFormatter(features=formatter.features)
632
+ if format_columns is None:
633
+ return formatter(pa_table, query_type=query_type)
634
+ elif query_type == "column":
635
+ if key in format_columns:
636
+ return formatter(pa_table, query_type)
637
+ else:
638
+ return python_formatter(pa_table, query_type=query_type)
639
+ else:
640
+ pa_table_to_format = pa_table.drop(col for col in pa_table.column_names if col not in format_columns)
641
+ formatted_output = formatter(pa_table_to_format, query_type=query_type)
642
+ if output_all_columns:
643
+ if isinstance(formatted_output, MutableMapping):
644
+ pa_table_with_remaining_columns = pa_table.drop(
645
+ col for col in pa_table.column_names if col in format_columns
646
+ )
647
+ remaining_columns_dict = python_formatter(pa_table_with_remaining_columns, query_type=query_type)
648
+ formatted_output.update(remaining_columns_dict)
649
+ else:
650
+ raise TypeError(
651
+ f"Custom formatting function must return a dict to work with output_all_columns=True, but got {formatted_output}"
652
+ )
653
+ return formatted_output
llmeval-env/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ import sys
17
+ from collections.abc import Mapping
18
+ from typing import TYPE_CHECKING, Dict, Optional
19
+
20
+ import numpy as np
21
+ import pyarrow as pa
22
+
23
+ from .. import config
24
+ from ..utils.logging import get_logger
25
+ from ..utils.py_utils import map_nested
26
+ from .formatting import TensorFormatter
27
+
28
+
29
+ if TYPE_CHECKING:
30
+ import jax
31
+ import jaxlib
32
+
33
+ logger = get_logger()
34
+
35
+ DEVICE_MAPPING: Optional[dict] = None
36
+
37
+
38
+ class JaxFormatter(TensorFormatter[Mapping, "jax.Array", Mapping]):
39
+ def __init__(self, features=None, device=None, **jnp_array_kwargs):
40
+ super().__init__(features=features)
41
+ import jax
42
+ from jaxlib.xla_client import Device
43
+
44
+ if isinstance(device, Device):
45
+ raise ValueError(
46
+ f"Expected {device} to be a `str` not {type(device)}, as `jaxlib.xla_extension.Device` "
47
+ "is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
48
+ "the device with `str()` to get its string identifier that will be internally mapped "
49
+ "to the actual `jaxlib.xla_extension.Device`."
50
+ )
51
+ self.device = device if isinstance(device, str) else str(jax.devices()[0])
52
+ # using global variable since `jaxlib.xla_extension.Device` is not serializable neither
53
+ # with `pickle` nor with `dill`, so we need to use a global variable instead
54
+ global DEVICE_MAPPING
55
+ if DEVICE_MAPPING is None:
56
+ DEVICE_MAPPING = self._map_devices_to_str()
57
+ if self.device not in list(DEVICE_MAPPING.keys()):
58
+ logger.warning(
59
+ f"Device with string identifier {self.device} not listed among the available "
60
+ f"devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default "
61
+ f"device: {str(jax.devices()[0])}."
62
+ )
63
+ self.device = str(jax.devices()[0])
64
+ self.jnp_array_kwargs = jnp_array_kwargs
65
+
66
+ @staticmethod
67
+ def _map_devices_to_str() -> Dict[str, "jaxlib.xla_extension.Device"]:
68
+ import jax
69
+
70
+ return {str(device): device for device in jax.devices()}
71
+
72
+ def _consolidate(self, column):
73
+ import jax
74
+ import jax.numpy as jnp
75
+
76
+ if isinstance(column, list) and column:
77
+ if all(
78
+ isinstance(x, jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
79
+ ):
80
+ return jnp.stack(column, axis=0)
81
+ return column
82
+
83
+ def _tensorize(self, value):
84
+ import jax
85
+ import jax.numpy as jnp
86
+
87
+ if isinstance(value, (str, bytes, type(None))):
88
+ return value
89
+ elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
90
+ return value.tolist()
91
+
92
+ default_dtype = {}
93
+
94
+ if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
95
+ # the default int precision depends on the jax config
96
+ # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
97
+ if jax.config.jax_enable_x64:
98
+ default_dtype = {"dtype": jnp.int64}
99
+ else:
100
+ default_dtype = {"dtype": jnp.int32}
101
+ elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
102
+ default_dtype = {"dtype": jnp.float32}
103
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules:
104
+ import PIL.Image
105
+
106
+ if isinstance(value, PIL.Image.Image):
107
+ value = np.asarray(value)
108
+
109
+ # using global variable since `jaxlib.xla_extension.Device` is not serializable neither
110
+ # with `pickle` nor with `dill`, so we need to use a global variable instead
111
+ global DEVICE_MAPPING
112
+ if DEVICE_MAPPING is None:
113
+ DEVICE_MAPPING = self._map_devices_to_str()
114
+
115
+ with jax.default_device(DEVICE_MAPPING[self.device]):
116
+ # calling jnp.array on a np.ndarray does copy the data
117
+ # see https://github.com/google/jax/issues/4486
118
+ return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs})
119
+
120
+ def _recursive_tensorize(self, data_struct):
121
+ import jax
122
+
123
+ # support for torch, tf, jax etc.
124
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
125
+ import torch
126
+
127
+ if isinstance(data_struct, torch.Tensor):
128
+ return self._tensorize(data_struct.detach().cpu().numpy()[()])
129
+ if hasattr(data_struct, "__array__") and not isinstance(data_struct, jax.Array):
130
+ data_struct = data_struct.__array__()
131
+ # support for nested types like struct of list of struct
132
+ if isinstance(data_struct, np.ndarray):
133
+ if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
134
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
135
+ elif isinstance(data_struct, (list, tuple)):
136
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
137
+ return self._tensorize(data_struct)
138
+
139
+ def recursive_tensorize(self, data_struct: dict):
140
+ return map_nested(self._recursive_tensorize, data_struct, map_list=False)
141
+
142
+ def format_row(self, pa_table: pa.Table) -> Mapping:
143
+ row = self.numpy_arrow_extractor().extract_row(pa_table)
144
+ row = self.python_features_decoder.decode_row(row)
145
+ return self.recursive_tensorize(row)
146
+
147
+ def format_column(self, pa_table: pa.Table) -> "jax.Array":
148
+ column = self.numpy_arrow_extractor().extract_column(pa_table)
149
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
150
+ column = self.recursive_tensorize(column)
151
+ column = self._consolidate(column)
152
+ return column
153
+
154
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
155
+ batch = self.numpy_arrow_extractor().extract_batch(pa_table)
156
+ batch = self.python_features_decoder.decode_batch(batch)
157
+ batch = self.recursive_tensorize(batch)
158
+ for column_name in batch:
159
+ batch[column_name] = self._consolidate(batch[column_name])
160
+ return batch
llmeval-env/lib/python3.10/site-packages/datasets/formatting/np_formatter.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import sys
16
+ from collections.abc import Mapping
17
+
18
+ import numpy as np
19
+ import pyarrow as pa
20
+
21
+ from .. import config
22
+ from ..utils.py_utils import map_nested
23
+ from .formatting import TensorFormatter
24
+
25
+
26
+ class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]):
27
+ def __init__(self, features=None, **np_array_kwargs):
28
+ super().__init__(features=features)
29
+ self.np_array_kwargs = np_array_kwargs
30
+
31
+ def _consolidate(self, column):
32
+ if isinstance(column, list):
33
+ if column and all(
34
+ isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
35
+ ):
36
+ return np.stack(column)
37
+ else:
38
+ # don't use np.array(column, dtype=object)
39
+ # since it fails in certain cases
40
+ # see https://stackoverflow.com/q/51005699
41
+ out = np.empty(len(column), dtype=object)
42
+ out[:] = column
43
+ return out
44
+ return column
45
+
46
+ def _tensorize(self, value):
47
+ if isinstance(value, (str, bytes, type(None))):
48
+ return value
49
+ elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
50
+ return value
51
+ elif isinstance(value, np.number):
52
+ return value
53
+
54
+ default_dtype = {}
55
+
56
+ if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer):
57
+ default_dtype = {"dtype": np.int64}
58
+ elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating):
59
+ default_dtype = {"dtype": np.float32}
60
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules:
61
+ import PIL.Image
62
+
63
+ if isinstance(value, PIL.Image.Image):
64
+ return np.asarray(value, **self.np_array_kwargs)
65
+
66
+ return np.asarray(value, **{**default_dtype, **self.np_array_kwargs})
67
+
68
+ def _recursive_tensorize(self, data_struct):
69
+ # support for torch, tf, jax etc.
70
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
71
+ import torch
72
+
73
+ if isinstance(data_struct, torch.Tensor):
74
+ return self._tensorize(data_struct.detach().cpu().numpy()[()])
75
+ if hasattr(data_struct, "__array__") and not isinstance(data_struct, (np.ndarray, np.character, np.number)):
76
+ data_struct = data_struct.__array__()
77
+ # support for nested types like struct of list of struct
78
+ if isinstance(data_struct, np.ndarray):
79
+ if data_struct.dtype == object:
80
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
81
+ if isinstance(data_struct, (list, tuple)):
82
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
83
+ return self._tensorize(data_struct)
84
+
85
+ def recursive_tensorize(self, data_struct: dict):
86
+ return map_nested(self._recursive_tensorize, data_struct, map_list=False)
87
+
88
+ def format_row(self, pa_table: pa.Table) -> Mapping:
89
+ row = self.numpy_arrow_extractor().extract_row(pa_table)
90
+ row = self.python_features_decoder.decode_row(row)
91
+ return self.recursive_tensorize(row)
92
+
93
+ def format_column(self, pa_table: pa.Table) -> np.ndarray:
94
+ column = self.numpy_arrow_extractor().extract_column(pa_table)
95
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
96
+ column = self.recursive_tensorize(column)
97
+ column = self._consolidate(column)
98
+ return column
99
+
100
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
101
+ batch = self.numpy_arrow_extractor().extract_batch(pa_table)
102
+ batch = self.python_features_decoder.decode_batch(batch)
103
+ batch = self.recursive_tensorize(batch)
104
+ for column_name in batch:
105
+ batch[column_name] = self._consolidate(batch[column_name])
106
+ return batch
llmeval-env/lib/python3.10/site-packages/datasets/formatting/polars_formatter.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import sys
16
+ from collections.abc import Mapping
17
+ from functools import partial
18
+ from typing import TYPE_CHECKING, Optional
19
+
20
+ import pyarrow as pa
21
+
22
+ from .. import config
23
+ from ..features import Features
24
+ from ..features.features import decode_nested_example
25
+ from ..utils.py_utils import no_op_if_value_is_null
26
+ from .formatting import BaseArrowExtractor, TensorFormatter
27
+
28
+
29
+ if TYPE_CHECKING:
30
+ import polars as pl
31
+
32
+
33
+ class PolarsArrowExtractor(BaseArrowExtractor["pl.DataFrame", "pl.Series", "pl.DataFrame"]):
34
+ def extract_row(self, pa_table: pa.Table) -> "pl.DataFrame":
35
+ if config.POLARS_AVAILABLE:
36
+ if "polars" not in sys.modules:
37
+ import polars
38
+ else:
39
+ polars = sys.modules["polars"]
40
+
41
+ return polars.from_arrow(pa_table.slice(length=1))
42
+ else:
43
+ raise ValueError("Polars needs to be installed to be able to return Polars dataframes.")
44
+
45
+ def extract_column(self, pa_table: pa.Table) -> "pl.Series":
46
+ if config.POLARS_AVAILABLE:
47
+ if "polars" not in sys.modules:
48
+ import polars
49
+ else:
50
+ polars = sys.modules["polars"]
51
+
52
+ return polars.from_arrow(pa_table.select([0]))[pa_table.column_names[0]]
53
+ else:
54
+ raise ValueError("Polars needs to be installed to be able to return Polars dataframes.")
55
+
56
+ def extract_batch(self, pa_table: pa.Table) -> "pl.DataFrame":
57
+ if config.POLARS_AVAILABLE:
58
+ if "polars" not in sys.modules:
59
+ import polars
60
+ else:
61
+ polars = sys.modules["polars"]
62
+
63
+ return polars.from_arrow(pa_table)
64
+ else:
65
+ raise ValueError("Polars needs to be installed to be able to return Polars dataframes.")
66
+
67
+
68
+ class PolarsFeaturesDecoder:
69
+ def __init__(self, features: Optional[Features]):
70
+ self.features = features
71
+ import polars as pl # noqa: F401 - import pl at initialization
72
+
73
+ def decode_row(self, row: "pl.DataFrame") -> "pl.DataFrame":
74
+ decode = (
75
+ {
76
+ column_name: no_op_if_value_is_null(partial(decode_nested_example, feature))
77
+ for column_name, feature in self.features.items()
78
+ if self.features._column_requires_decoding[column_name]
79
+ }
80
+ if self.features
81
+ else {}
82
+ )
83
+ if decode:
84
+ row[list(decode.keys())] = row.map_rows(decode)
85
+ return row
86
+
87
+ def decode_column(self, column: "pl.Series", column_name: str) -> "pl.Series":
88
+ decode = (
89
+ no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name]))
90
+ if self.features and column_name in self.features and self.features._column_requires_decoding[column_name]
91
+ else None
92
+ )
93
+ if decode:
94
+ column = column.map_elements(decode)
95
+ return column
96
+
97
+ def decode_batch(self, batch: "pl.DataFrame") -> "pl.DataFrame":
98
+ return self.decode_row(batch)
99
+
100
+
101
+ class PolarsFormatter(TensorFormatter[Mapping, "pl.DataFrame", Mapping]):
102
+ def __init__(self, features=None, **np_array_kwargs):
103
+ super().__init__(features=features)
104
+ self.np_array_kwargs = np_array_kwargs
105
+ self.polars_arrow_extractor = PolarsArrowExtractor
106
+ self.polars_features_decoder = PolarsFeaturesDecoder(features)
107
+ import polars as pl # noqa: F401 - import pl at initialization
108
+
109
+ def format_row(self, pa_table: pa.Table) -> "pl.DataFrame":
110
+ row = self.polars_arrow_extractor().extract_row(pa_table)
111
+ row = self.polars_features_decoder.decode_row(row)
112
+ return row
113
+
114
+ def format_column(self, pa_table: pa.Table) -> "pl.Series":
115
+ column = self.polars_arrow_extractor().extract_column(pa_table)
116
+ column = self.polars_features_decoder.decode_column(column, pa_table.column_names[0])
117
+ return column
118
+
119
+ def format_batch(self, pa_table: pa.Table) -> "pl.DataFrame":
120
+ row = self.polars_arrow_extractor().extract_batch(pa_table)
121
+ row = self.polars_features_decoder.decode_batch(row)
122
+ return row
llmeval-env/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ import sys
17
+ from collections.abc import Mapping
18
+ from typing import TYPE_CHECKING
19
+
20
+ import numpy as np
21
+ import pyarrow as pa
22
+
23
+ from .. import config
24
+ from ..utils.py_utils import map_nested
25
+ from .formatting import TensorFormatter
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ import tensorflow as tf
30
+
31
+
32
+ class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]):
33
+ def __init__(self, features=None, **tf_tensor_kwargs):
34
+ super().__init__(features=features)
35
+ self.tf_tensor_kwargs = tf_tensor_kwargs
36
+ import tensorflow as tf # noqa: F401 - import tf at initialization
37
+
38
+ def _consolidate(self, column):
39
+ import tensorflow as tf
40
+
41
+ if isinstance(column, list) and column:
42
+ if all(
43
+ isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
44
+ ):
45
+ return tf.stack(column)
46
+ elif all(
47
+ isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype
48
+ for x in column
49
+ ):
50
+ # only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated
51
+ return tf.ragged.stack(column)
52
+
53
+ return column
54
+
55
+ def _tensorize(self, value):
56
+ import tensorflow as tf
57
+
58
+ if value is None:
59
+ return value
60
+
61
+ default_dtype = {}
62
+
63
+ if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
64
+ default_dtype = {"dtype": tf.int64}
65
+ elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
66
+ default_dtype = {"dtype": tf.float32}
67
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules:
68
+ import PIL.Image
69
+
70
+ if isinstance(value, PIL.Image.Image):
71
+ value = np.asarray(value)
72
+
73
+ return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs})
74
+
75
+ def _recursive_tensorize(self, data_struct):
76
+ import tensorflow as tf
77
+
78
+ # support for torch, tf, jax etc.
79
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
80
+ import torch
81
+
82
+ if isinstance(data_struct, torch.Tensor):
83
+ return self._tensorize(data_struct.detach().cpu().numpy()[()])
84
+ if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor):
85
+ data_struct = data_struct.__array__()
86
+ # support for nested types like struct of list of struct
87
+ if isinstance(data_struct, np.ndarray):
88
+ if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects
89
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
90
+ elif isinstance(data_struct, (list, tuple)):
91
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
92
+ return self._tensorize(data_struct)
93
+
94
+ def recursive_tensorize(self, data_struct: dict):
95
+ return map_nested(self._recursive_tensorize, data_struct, map_list=False)
96
+
97
+ def format_row(self, pa_table: pa.Table) -> Mapping:
98
+ row = self.numpy_arrow_extractor().extract_row(pa_table)
99
+ row = self.python_features_decoder.decode_row(row)
100
+ return self.recursive_tensorize(row)
101
+
102
+ def format_column(self, pa_table: pa.Table) -> "tf.Tensor":
103
+ column = self.numpy_arrow_extractor().extract_column(pa_table)
104
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
105
+ column = self.recursive_tensorize(column)
106
+ column = self._consolidate(column)
107
+ return column
108
+
109
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
110
+ batch = self.numpy_arrow_extractor().extract_batch(pa_table)
111
+ batch = self.python_features_decoder.decode_batch(batch)
112
+ batch = self.recursive_tensorize(batch)
113
+ for column_name in batch:
114
+ batch[column_name] = self._consolidate(batch[column_name])
115
+ return batch
llmeval-env/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ import sys
17
+ from collections.abc import Mapping
18
+ from typing import TYPE_CHECKING
19
+
20
+ import numpy as np
21
+ import pyarrow as pa
22
+
23
+ from .. import config
24
+ from ..utils.py_utils import map_nested
25
+ from .formatting import TensorFormatter
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ import torch
30
+
31
+
32
+ class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]):
33
+ def __init__(self, features=None, **torch_tensor_kwargs):
34
+ super().__init__(features=features)
35
+ self.torch_tensor_kwargs = torch_tensor_kwargs
36
+ import torch # noqa import torch at initialization
37
+
38
+ def _consolidate(self, column):
39
+ import torch
40
+
41
+ if isinstance(column, list) and column:
42
+ if all(
43
+ isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
44
+ for x in column
45
+ ):
46
+ return torch.stack(column)
47
+ return column
48
+
49
+ def _tensorize(self, value):
50
+ import torch
51
+
52
+ if isinstance(value, (str, bytes, type(None))):
53
+ return value
54
+ elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
55
+ return value.tolist()
56
+
57
+ default_dtype = {}
58
+
59
+ if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
60
+ default_dtype = {"dtype": torch.int64}
61
+
62
+ # Convert dtype to np.int64 if it's either np.uint16 or np.uint32 to ensure compatibility.
63
+ # np.uint64 is excluded from this conversion as there is no compatible PyTorch dtype that can handle it without loss.
64
+ if value.dtype in [np.uint16, np.uint32]:
65
+ value = value.astype(np.int64)
66
+
67
+ elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
68
+ default_dtype = {"dtype": torch.float32}
69
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules:
70
+ import PIL.Image
71
+
72
+ if isinstance(value, PIL.Image.Image):
73
+ value = np.asarray(value)
74
+ if value.ndim == 2:
75
+ value = value[:, :, np.newaxis]
76
+
77
+ value = value.transpose((2, 0, 1))
78
+ return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs})
79
+
80
+ def _recursive_tensorize(self, data_struct):
81
+ import torch
82
+
83
+ # support for torch, tf, jax etc.
84
+ if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor):
85
+ data_struct = data_struct.__array__()
86
+ # support for nested types like struct of list of struct
87
+ if isinstance(data_struct, np.ndarray):
88
+ if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
89
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
90
+ elif isinstance(data_struct, (list, tuple)):
91
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
92
+ return self._tensorize(data_struct)
93
+
94
+ def recursive_tensorize(self, data_struct: dict):
95
+ return map_nested(self._recursive_tensorize, data_struct, map_list=False)
96
+
97
+ def format_row(self, pa_table: pa.Table) -> Mapping:
98
+ row = self.numpy_arrow_extractor().extract_row(pa_table)
99
+ row = self.python_features_decoder.decode_row(row)
100
+ return self.recursive_tensorize(row)
101
+
102
+ def format_column(self, pa_table: pa.Table) -> "torch.Tensor":
103
+ column = self.numpy_arrow_extractor().extract_column(pa_table)
104
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
105
+ column = self.recursive_tensorize(column)
106
+ column = self._consolidate(column)
107
+ return column
108
+
109
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
110
+ batch = self.numpy_arrow_extractor().extract_batch(pa_table)
111
+ batch = self.python_features_decoder.decode_batch(batch)
112
+ batch = self.recursive_tensorize(batch)
113
+ for column_name in batch:
114
+ batch[column_name] = self._consolidate(batch[column_name])
115
+ return batch
llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/automatic_speech_recognition.cpython-310.pyc ADDED
Binary file (1.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/base.cpython-310.pyc ADDED
Binary file (1.97 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/question_answering.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/text_classification.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (585 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/_dataset_viewer.cpython-310.pyc ADDED
Binary file (3.22 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/cache.cpython-310.pyc ADDED
Binary file (6.45 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/deprecation_utils.cpython-310.pyc ADDED
Binary file (3.63 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/doc_utils.cpython-310.pyc ADDED
Binary file (697 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/hub.cpython-310.pyc ADDED
Binary file (325 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/logging.cpython-310.pyc ADDED
Binary file (5.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/readme.cpython-310.pyc ADDED
Binary file (8.96 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/tqdm.cpython-310.pyc ADDED
Binary file (4.03 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/version.cpython-310.pyc ADDED
Binary file (4.03 kB). View file
 
llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/LICENSE ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2016 Andrew Svetlov and aio-libs contributors
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/METADATA ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: multidict
3
+ Version: 6.0.5
4
+ Summary: multidict implementation
5
+ Home-page: https://github.com/aio-libs/multidict
6
+ Author: Andrew Svetlov
7
+ Author-email: [email protected]
8
+ License: Apache 2
9
+ Project-URL: Chat: Gitter, https://gitter.im/aio-libs/Lobby
10
+ Project-URL: CI: GitHub, https://github.com/aio-libs/multidict/actions
11
+ Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/multidict
12
+ Project-URL: Docs: RTD, https://multidict.aio-libs.org
13
+ Project-URL: GitHub: issues, https://github.com/aio-libs/multidict/issues
14
+ Project-URL: GitHub: repo, https://github.com/aio-libs/multidict
15
+ Classifier: Development Status :: 5 - Production/Stable
16
+ Classifier: Intended Audience :: Developers
17
+ Classifier: License :: OSI Approved :: Apache Software License
18
+ Classifier: Programming Language :: Python
19
+ Classifier: Programming Language :: Python :: 3
20
+ Classifier: Programming Language :: Python :: 3.7
21
+ Classifier: Programming Language :: Python :: 3.8
22
+ Classifier: Programming Language :: Python :: 3.9
23
+ Classifier: Programming Language :: Python :: 3.10
24
+ Classifier: Programming Language :: Python :: 3.11
25
+ Classifier: Programming Language :: Python :: 3.12
26
+ Requires-Python: >=3.7
27
+ Description-Content-Type: text/x-rst
28
+ License-File: LICENSE
29
+
30
+ =========
31
+ multidict
32
+ =========
33
+
34
+ .. image:: https://github.com/aio-libs/multidict/workflows/CI/badge.svg
35
+ :target: https://github.com/aio-libs/multidict/actions?query=workflow%3ACI
36
+ :alt: GitHub status for master branch
37
+
38
+ .. image:: https://codecov.io/gh/aio-libs/multidict/branch/master/graph/badge.svg
39
+ :target: https://codecov.io/gh/aio-libs/multidict
40
+ :alt: Coverage metrics
41
+
42
+ .. image:: https://img.shields.io/pypi/v/multidict.svg
43
+ :target: https://pypi.org/project/multidict
44
+ :alt: PyPI
45
+
46
+ .. image:: https://readthedocs.org/projects/multidict/badge/?version=latest
47
+ :target: http://multidict.aio-libs.org/en/latest/?badge=latest
48
+ :alt: Documentation
49
+
50
+ .. image:: https://img.shields.io/pypi/pyversions/multidict.svg
51
+ :target: https://pypi.org/project/multidict
52
+ :alt: Python versions
53
+
54
+ .. image:: https://badges.gitter.im/Join%20Chat.svg
55
+ :target: https://gitter.im/aio-libs/Lobby
56
+ :alt: Chat on Gitter
57
+
58
+ Multidict is dict-like collection of *key-value pairs* where key
59
+ might occur more than once in the container.
60
+
61
+ Introduction
62
+ ------------
63
+
64
+ *HTTP Headers* and *URL query string* require specific data structure:
65
+ *multidict*. It behaves mostly like a regular ``dict`` but it may have
66
+ several *values* for the same *key* and *preserves insertion ordering*.
67
+
68
+ The *key* is ``str`` (or ``istr`` for case-insensitive dictionaries).
69
+
70
+ ``multidict`` has four multidict classes:
71
+ ``MultiDict``, ``MultiDictProxy``, ``CIMultiDict``
72
+ and ``CIMultiDictProxy``.
73
+
74
+ Immutable proxies (``MultiDictProxy`` and
75
+ ``CIMultiDictProxy``) provide a dynamic view for the
76
+ proxied multidict, the view reflects underlying collection changes. They
77
+ implement the ``collections.abc.Mapping`` interface.
78
+
79
+ Regular mutable (``MultiDict`` and ``CIMultiDict``) classes
80
+ implement ``collections.abc.MutableMapping`` and allows them to change
81
+ their own content.
82
+
83
+
84
+ *Case insensitive* (``CIMultiDict`` and
85
+ ``CIMultiDictProxy``) assume the *keys* are case
86
+ insensitive, e.g.::
87
+
88
+ >>> dct = CIMultiDict(key='val')
89
+ >>> 'Key' in dct
90
+ True
91
+ >>> dct['Key']
92
+ 'val'
93
+
94
+ *Keys* should be ``str`` or ``istr`` instances.
95
+
96
+ The library has optional C Extensions for speed.
97
+
98
+
99
+ License
100
+ -------
101
+
102
+ Apache 2
103
+
104
+ Library Installation
105
+ --------------------
106
+
107
+ .. code-block:: bash
108
+
109
+ $ pip install multidict
110
+
111
+ The library is Python 3 only!
112
+
113
+ PyPI contains binary wheels for Linux, Windows and MacOS. If you want to install
114
+ ``multidict`` on another operating system (or *Alpine Linux* inside a Docker) the
115
+ tarball will be used to compile the library from source. It requires a C compiler and
116
+ Python headers to be installed.
117
+
118
+ To skip the compilation, please use the `MULTIDICT_NO_EXTENSIONS` environment variable,
119
+ e.g.:
120
+
121
+ .. code-block:: bash
122
+
123
+ $ MULTIDICT_NO_EXTENSIONS=1 pip install multidict
124
+
125
+ Please note, the pure Python (uncompiled) version is about 20-50 times slower depending on
126
+ the usage scenario!!!
127
+
128
+
129
+
130
+ Changelog
131
+ ---------
132
+ See `RTD page <http://multidict.aio-libs.org/en/latest/changes>`_.
llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/RECORD ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ multidict-6.0.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ multidict-6.0.5.dist-info/LICENSE,sha256=k9Ealo4vDzY3PECBH_bSDhc_WMPKtYhM1mF7v9eVSSo,611
3
+ multidict-6.0.5.dist-info/METADATA,sha256=fGbYCQYEMcDtxEz2H6GLf1np9JtMhNTaLVzgAhsQYzU,4214
4
+ multidict-6.0.5.dist-info/RECORD,,
5
+ multidict-6.0.5.dist-info/WHEEL,sha256=1FEjxEYgybphwh9S0FO9IcZ0B-NIeM2ko8OzhFZeOeQ,152
6
+ multidict-6.0.5.dist-info/top_level.txt,sha256=-euDElkk5_qkmfIJ7WiqCab02ZlSFZWynejKg59qZQQ,10
7
+ multidict/__init__.py,sha256=psbRrP64CD22Wjoc_OoqG9QlkRGcaZfOFCoPmoUiMig,928
8
+ multidict/__init__.pyi,sha256=SbgC2ew1NvNXWlRKs9o0KhW4moozgMqgQ0OA4Re5JQQ,4840
9
+ multidict/__pycache__/__init__.cpython-310.pyc,,
10
+ multidict/__pycache__/_abc.cpython-310.pyc,,
11
+ multidict/__pycache__/_compat.cpython-310.pyc,,
12
+ multidict/__pycache__/_multidict_base.cpython-310.pyc,,
13
+ multidict/__pycache__/_multidict_py.cpython-310.pyc,,
14
+ multidict/_abc.py,sha256=Zvnrn4SBkrv4QTD7-ZzqNcoxw0f8KStLMPzGvBuGT2w,1190
15
+ multidict/_compat.py,sha256=tjUGdP9ooiH6c2KJrvUbPRwcvjWerKlKU6InIviwh7w,316
16
+ multidict/_multidict.cpython-310-x86_64-linux-gnu.so,sha256=BmNKiShRM0HlwDp_fjuenfzDjBlo6C0BEfpi55TRp-k,394656
17
+ multidict/_multidict_base.py,sha256=XugkE78fXBmtzDdg2Yi9TrEhDexmL-6qJbFIG0viLMg,3791
18
+ multidict/_multidict_py.py,sha256=57h4sYrRIu7EjMX4YpHVIZVrV9-q1KCW3F6rao10D3U,15050
19
+ multidict/py.typed,sha256=e9bmbH3UFxsabQrnNFPG9qxIXztwbcM6IKDYnvZwprY,15
llmeval-env/lib/python3.10/site-packages/multidict-6.0.5.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.42.0)
3
+ Root-Is-Purelib: false
4
+ Tag: cp310-cp310-manylinux_2_17_x86_64
5
+ Tag: cp310-cp310-manylinux2014_x86_64
6
+
llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/COPYING ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2006-2008, R Oudkerk
2
+
3
+ All rights reserved.
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions
7
+ are met:
8
+
9
+ 1. Redistributions of source code must retain the above copyright
10
+ notice, this list of conditions and the following disclaimer.
11
+ 2. Redistributions in binary form must reproduce the above copyright
12
+ notice, this list of conditions and the following disclaimer in the
13
+ documentation and/or other materials provided with the distribution.
14
+ 3. Neither the name of author nor the names of any contributors may be
15
+ used to endorse or promote products derived from this software
16
+ without specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
19
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
+ OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
+ OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
+ SUCH DAMAGE.
llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/LICENSE ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2008-2016 California Institute of Technology.
2
+ Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
3
+ All rights reserved.
4
+
5
+ This software forks the python package "multiprocessing". Licence and
6
+ copyright information for multiprocessing can be found in "COPYING".
7
+
8
+ This software is available subject to the conditions and terms laid
9
+ out below. By downloading and using this software you are agreeing
10
+ to the following conditions.
11
+
12
+ Redistribution and use in source and binary forms, with or without
13
+ modification, are permitted provided that the following conditions
14
+ are met:
15
+
16
+ - Redistributions of source code must retain the above copyright
17
+ notice, this list of conditions and the following disclaimer.
18
+
19
+ - Redistributions in binary form must reproduce the above copyright
20
+ notice, this list of conditions and the following disclaimer in the
21
+ documentation and/or other materials provided with the distribution.
22
+
23
+ - Neither the names of the copyright holders nor the names of any of
24
+ the contributors may be used to endorse or promote products derived
25
+ from this software without specific prior written permission.
26
+
27
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29
+ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
34
+ OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
35
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
36
+ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
37
+ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38
+
llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/METADATA ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: multiprocess
3
+ Version: 0.70.16
4
+ Summary: better multiprocessing and multithreading in Python
5
+ Home-page: https://github.com/uqfoundation/multiprocess
6
+ Download-URL: https://pypi.org/project/multiprocess/#files
7
+ Author: Mike McKerns
8
+ Author-email: [email protected]
9
+ Maintainer: Mike McKerns
10
+ Maintainer-email: [email protected]
11
+ License: BSD-3-Clause
12
+ Project-URL: Documentation, http://multiprocess.rtfd.io
13
+ Project-URL: Source Code, https://github.com/uqfoundation/multiprocess
14
+ Project-URL: Bug Tracker, https://github.com/uqfoundation/multiprocess/issues
15
+ Platform: Linux
16
+ Platform: Windows
17
+ Platform: Mac
18
+ Classifier: Development Status :: 5 - Production/Stable
19
+ Classifier: Intended Audience :: Developers
20
+ Classifier: Intended Audience :: Science/Research
21
+ Classifier: License :: OSI Approved :: BSD License
22
+ Classifier: Programming Language :: Python :: 3
23
+ Classifier: Programming Language :: Python :: 3.8
24
+ Classifier: Programming Language :: Python :: 3.9
25
+ Classifier: Programming Language :: Python :: 3.10
26
+ Classifier: Programming Language :: Python :: 3.11
27
+ Classifier: Programming Language :: Python :: 3.12
28
+ Classifier: Programming Language :: Python :: Implementation :: CPython
29
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
30
+ Classifier: Topic :: Scientific/Engineering
31
+ Classifier: Topic :: Software Development
32
+ Requires-Python: >=3.8
33
+ License-File: LICENSE
34
+ License-File: COPYING
35
+ Requires-Dist: dill (>=0.3.8)
36
+
37
+ -----------------------------------------------------------------
38
+ multiprocess: better multiprocessing and multithreading in Python
39
+ -----------------------------------------------------------------
40
+
41
+ About Multiprocess
42
+ ==================
43
+
44
+ ``multiprocess`` is a fork of ``multiprocessing``. ``multiprocess`` extends ``multiprocessing`` to provide enhanced serialization, using `dill`. ``multiprocess`` leverages ``multiprocessing`` to support the spawning of processes using the API of the Python standard library's ``threading`` module. ``multiprocessing`` has been distributed as part of the standard library since Python 2.6.
45
+
46
+ ``multiprocess`` is part of ``pathos``, a Python framework for heterogeneous computing.
47
+ ``multiprocess`` is in active development, so any user feedback, bug reports, comments,
48
+ or suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/multiprocess/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query.
49
+
50
+
51
+ Major Features
52
+ ==============
53
+
54
+ ``multiprocess`` enables:
55
+
56
+ - objects to be transferred between processes using pipes or multi-producer/multi-consumer queues
57
+ - objects to be shared between processes using a server process or (for simple data) shared memory
58
+
59
+ ``multiprocess`` provides:
60
+
61
+ - equivalents of all the synchronization primitives in ``threading``
62
+ - a ``Pool`` class to facilitate submitting tasks to worker processes
63
+ - enhanced serialization, using ``dill``
64
+
65
+
66
+ Current Release
67
+ ===============
68
+
69
+ The latest released version of ``multiprocess`` is available from:
70
+
71
+ https://pypi.org/project/multiprocess
72
+
73
+ ``multiprocess`` is distributed under a 3-clause BSD license, and is a fork of ``multiprocessing``.
74
+
75
+
76
+ Development Version
77
+ ===================
78
+
79
+ You can get the latest development version with all the shiny new features at:
80
+
81
+ https://github.com/uqfoundation
82
+
83
+ If you have a new contribution, please submit a pull request.
84
+
85
+
86
+ Installation
87
+ ============
88
+
89
+ ``multiprocess`` can be installed with ``pip``::
90
+
91
+ $ pip install multiprocess
92
+
93
+ For Python 2, a C compiler is required to build the included extension module from source. Python 3 and binary installs do not require a C compiler.
94
+
95
+
96
+ Requirements
97
+ ============
98
+
99
+ ``multiprocess`` requires:
100
+
101
+ - ``python`` (or ``pypy``), **>=3.8**
102
+ - ``setuptools``, **>=42**
103
+ - ``dill``, **>=0.3.8**
104
+
105
+
106
+ Basic Usage
107
+ ===========
108
+
109
+ The ``multiprocess.Process`` class follows the API of ``threading.Thread``.
110
+ For example ::
111
+
112
+ from multiprocess import Process, Queue
113
+
114
+ def f(q):
115
+ q.put('hello world')
116
+
117
+ if __name__ == '__main__':
118
+ q = Queue()
119
+ p = Process(target=f, args=[q])
120
+ p.start()
121
+ print (q.get())
122
+ p.join()
123
+
124
+ Synchronization primitives like locks, semaphores and conditions are
125
+ available, for example ::
126
+
127
+ >>> from multiprocess import Condition
128
+ >>> c = Condition()
129
+ >>> print (c)
130
+ <Condition(<RLock(None, 0)>), 0>
131
+ >>> c.acquire()
132
+ True
133
+ >>> print (c)
134
+ <Condition(<RLock(MainProcess, 1)>), 0>
135
+
136
+ One can also use a manager to create shared objects either in shared
137
+ memory or in a server process, for example ::
138
+
139
+ >>> from multiprocess import Manager
140
+ >>> manager = Manager()
141
+ >>> l = manager.list(range(10))
142
+ >>> l.reverse()
143
+ >>> print (l)
144
+ [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
145
+ >>> print (repr(l))
146
+ <Proxy[list] object at 0x00E1B3B0>
147
+
148
+ Tasks can be offloaded to a pool of worker processes in various ways,
149
+ for example ::
150
+
151
+ >>> from multiprocess import Pool
152
+ >>> def f(x): return x*x
153
+ ...
154
+ >>> p = Pool(4)
155
+ >>> result = p.map_async(f, range(10))
156
+ >>> print (result.get(timeout=1))
157
+ [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
158
+
159
+ When ``dill`` is installed, serialization is extended to most objects,
160
+ for example ::
161
+
162
+ >>> from multiprocess import Pool
163
+ >>> p = Pool(4)
164
+ >>> print (p.map(lambda x: (lambda y:y**2)(x) + x, xrange(10)))
165
+ [0, 2, 6, 12, 20, 30, 42, 56, 72, 90]
166
+
167
+
168
+ More Information
169
+ ================
170
+
171
+ Probably the best way to get started is to look at the documentation at
172
+ http://multiprocess.rtfd.io. Also see ``multiprocess.tests`` for scripts that
173
+ demonstrate how ``multiprocess`` can be used to leverge multiple processes
174
+ to execute Python in parallel. You can run the test suite with
175
+ ``python -m multiprocess.tests``. As ``multiprocess`` conforms to the
176
+ ``multiprocessing`` interface, the examples and documentation found at
177
+ http://docs.python.org/library/multiprocessing.html also apply to
178
+ ``multiprocess`` if one will ``import multiprocessing as multiprocess``.
179
+ See https://github.com/uqfoundation/multiprocess/tree/master/py3.12/examples
180
+ for a set of examples that demonstrate some basic use cases and benchmarking
181
+ for running Python code in parallel. Please feel free to submit a ticket on
182
+ github, or ask a question on stackoverflow (**@Mike McKerns**). If you would
183
+ like to share how you use ``multiprocess`` in your work, please send an email
184
+ (to **mmckerns at uqfoundation dot org**).
185
+
186
+
187
+ Citation
188
+ ========
189
+
190
+ If you use ``multiprocess`` to do research that leads to publication, we ask that you
191
+ acknowledge use of ``multiprocess`` by citing the following in your publication::
192
+
193
+ M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis,
194
+ "Building a framework for predictive science", Proceedings of
195
+ the 10th Python in Science Conference, 2011;
196
+ http://arxiv.org/pdf/1202.1056
197
+
198
+ Michael McKerns and Michael Aivazis,
199
+ "pathos: a framework for heterogeneous computing", 2010- ;
200
+ https://uqfoundation.github.io/project/pathos
201
+
202
+ Please see https://uqfoundation.github.io/project/pathos or
203
+ http://arxiv.org/pdf/1202.1056 for further information.
llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/RECORD ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _multiprocess/__init__.py,sha256=zX5_h36TGSL0brHRtBvCL5E59ccW7yjL79i-Y399ODM,321
2
+ _multiprocess/__pycache__/__init__.cpython-310.pyc,,
3
+ multiprocess-0.70.16.dist-info/COPYING,sha256=n3_yfLkw0sMgLuB-PS1hRvTeZ20GmjPaMWbJjNuoOpU,1493
4
+ multiprocess-0.70.16.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
5
+ multiprocess-0.70.16.dist-info/LICENSE,sha256=6XUJedJKg2dhI98BD3PMtVtZvRFT-oGczkOr5B4tEEA,1930
6
+ multiprocess-0.70.16.dist-info/METADATA,sha256=Sv2eH2CjjyjVYaryTKqHkbJTlxlVA-SbmziCgkBJeQ0,7151
7
+ multiprocess-0.70.16.dist-info/RECORD,,
8
+ multiprocess-0.70.16.dist-info/WHEEL,sha256=KxatxaZA14OswIJTdImHhiM2tdZgU-xLZEzs-sYveVc,94
9
+ multiprocess-0.70.16.dist-info/top_level.txt,sha256=qtJc8GNdvi6suNpISX0Myln9AXJBYrNuas1MCqRPPqg,27
10
+ multiprocess/__info__.py,sha256=84TUBn1oJMNpbVvXKs0lKyfLYaZvRr-ZVh1zHM9VeCY,7997
11
+ multiprocess/__init__.py,sha256=XWUBDGorUkDW04h64xe51pUV9N5gzvSDj3tNT2ekifw,1856
12
+ multiprocess/__pycache__/__info__.cpython-310.pyc,,
13
+ multiprocess/__pycache__/__init__.cpython-310.pyc,,
14
+ multiprocess/__pycache__/connection.cpython-310.pyc,,
15
+ multiprocess/__pycache__/context.cpython-310.pyc,,
16
+ multiprocess/__pycache__/forkserver.cpython-310.pyc,,
17
+ multiprocess/__pycache__/heap.cpython-310.pyc,,
18
+ multiprocess/__pycache__/managers.cpython-310.pyc,,
19
+ multiprocess/__pycache__/pool.cpython-310.pyc,,
20
+ multiprocess/__pycache__/popen_fork.cpython-310.pyc,,
21
+ multiprocess/__pycache__/popen_forkserver.cpython-310.pyc,,
22
+ multiprocess/__pycache__/popen_spawn_posix.cpython-310.pyc,,
23
+ multiprocess/__pycache__/popen_spawn_win32.cpython-310.pyc,,
24
+ multiprocess/__pycache__/process.cpython-310.pyc,,
25
+ multiprocess/__pycache__/queues.cpython-310.pyc,,
26
+ multiprocess/__pycache__/reduction.cpython-310.pyc,,
27
+ multiprocess/__pycache__/resource_sharer.cpython-310.pyc,,
28
+ multiprocess/__pycache__/resource_tracker.cpython-310.pyc,,
29
+ multiprocess/__pycache__/shared_memory.cpython-310.pyc,,
30
+ multiprocess/__pycache__/sharedctypes.cpython-310.pyc,,
31
+ multiprocess/__pycache__/spawn.cpython-310.pyc,,
32
+ multiprocess/__pycache__/synchronize.cpython-310.pyc,,
33
+ multiprocess/__pycache__/util.cpython-310.pyc,,
34
+ multiprocess/connection.py,sha256=TO9BbLVlLVjTjr0fP7lIumBgiLwaFVnpqMBgFG6iL9s,31843
35
+ multiprocess/context.py,sha256=2fYvgfnu3B8wj8UyNndHUHgeuVDoVxlkFFKryycstaU,11610
36
+ multiprocess/dummy/__init__.py,sha256=kSekDqD_NCy0FDg7XnxZSgW-Ldg1_iRr07sNwDajKpA,3061
37
+ multiprocess/dummy/__pycache__/__init__.cpython-310.pyc,,
38
+ multiprocess/dummy/__pycache__/connection.cpython-310.pyc,,
39
+ multiprocess/dummy/connection.py,sha256=1j3Rl5_enBM-_kMO6HDmum3kPAoFE4Zs485HV5H-V6s,1598
40
+ multiprocess/forkserver.py,sha256=hiltKfLImDYJyAcezNAgMDaQznB2LtYWgwre0QroLRg,12138
41
+ multiprocess/heap.py,sha256=9rt5u5m5rkhJNfDWiCLpYDoWIt0LbElmx52yMqk7phQ,11626
42
+ multiprocess/managers.py,sha256=Y5m_aCdLE4mSCuyVrwMWg5Nh9f4OdSHDlSajyOgyGao,47562
43
+ multiprocess/pool.py,sha256=FTmtfoqkuN8Dd48f5TgdkokoxYN75xcnR78Hw-bLSng,32759
44
+ multiprocess/popen_fork.py,sha256=Nvq5vVId24UfkOQxXhxZbcXuo8d6YMc409yRXAamTd0,2374
45
+ multiprocess/popen_forkserver.py,sha256=SrEbV8Wv0Uu_UegkaW-cayXRdjTGXr560Yyy90pj-yE,2227
46
+ multiprocess/popen_spawn_posix.py,sha256=l7XSWqR5UWiUSJh35qeSElLuNfUeEYwvH5HzKRnnyqg,2029
47
+ multiprocess/popen_spawn_win32.py,sha256=A9uvlPmhO8JBzNcEU_Gmix2Q_qYJW1NXZgXPwtN5Ao0,4011
48
+ multiprocess/process.py,sha256=GIIo2NiBsX1r_m0J1TcnbdeSulGLWHElRCuYRkkdgQ4,12083
49
+ multiprocess/queues.py,sha256=sgXCXnIOVrPScqI3lwRD9t3IshqIBMEksLtouPH9Nzc,12139
50
+ multiprocess/reduction.py,sha256=NQQ6KbDhmuAyaDeWaIarTZQokGPhcFda1poNnPm5uNc,9637
51
+ multiprocess/resource_sharer.py,sha256=nEApLhMQqd8KunfaNKl3n8vdeiCGPxKrSL1Ja0nNAEk,5132
52
+ multiprocess/resource_tracker.py,sha256=_D2iX4IWRe3dOwLoLjfCnXNbDAM4pRzA8qEMTcRfutw,9056
53
+ multiprocess/shared_memory.py,sha256=UTAecHECIOHElP9Tg6yURCo4pKZiLy65TkASjEXeGus,18458
54
+ multiprocess/sharedctypes.py,sha256=d-9SKRJHRlJJC331IxEoWOUXIeY9zxCbhWejXOmzGw0,6306
55
+ multiprocess/spawn.py,sha256=cgtV66HhV_yIVzvdblc8bVdSpem16Ks0BOFu_bV5PDQ,9293
56
+ multiprocess/synchronize.py,sha256=6q1ijwWyWLWLO8uUtaYT9MKepAYKfdzWPSEZGyJFP4s,11829
57
+ multiprocess/tests/__init__.py,sha256=k00IjwhAUV_O1bp81895vN1gLnFzBM3iM-QTn5VrQnU,199087
58
+ multiprocess/tests/__main__.py,sha256=RauIRQrO0HwRq_clLqbBk4gwo5Xw3-ASLuC029XaHeA,912
59
+ multiprocess/tests/__pycache__/__init__.cpython-310.pyc,,
60
+ multiprocess/tests/__pycache__/__main__.cpython-310.pyc,,
61
+ multiprocess/tests/__pycache__/mp_fork_bomb.cpython-310.pyc,,
62
+ multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc,,
63
+ multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc,,
64
+ multiprocess/tests/__pycache__/test_multiprocessing_forkserver.cpython-310.pyc,,
65
+ multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc,,
66
+ multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc,,
67
+ multiprocess/tests/mp_fork_bomb.py,sha256=6ADOEzh1aXHZ21aOGoBPhKcgB5sj15G9tQVgSc6GrlY,448
68
+ multiprocess/tests/mp_preload.py,sha256=1-WvLFMaPoH-vZbpUaJvvZHFxTpA9tgmct2vblQy99M,365
69
+ multiprocess/tests/test_multiprocessing_fork.py,sha256=ue1SQLJFxm1oc_3F2gR_WRtt39jhaj0l_Ht6Y1MBmFo,476
70
+ multiprocess/tests/test_multiprocessing_forkserver.py,sha256=VFlUuZI60gyRbNxfHWDlgmy3zm-dPTldLWuKQZ8KObs,391
71
+ multiprocess/tests/test_multiprocessing_main_handling.py,sha256=mtmN0K-spqZCcZVNLf_HrhP186-knpY6eaoFonL1U4U,12018
72
+ multiprocess/tests/test_multiprocessing_spawn.py,sha256=2UAisJX58GZCbYuDFay_x97R9akhzzjIA4VuUUzITOY,276
73
+ multiprocess/util.py,sha256=OPI3CZ34BNwwwa7AqW-eGhnuSUsu-ozy2NRU8BYKuwg,14012
llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py310-none-any
5
+
llmeval-env/lib/python3.10/site-packages/multiprocess-0.70.16.dist-info/top_level.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ _multiprocess
2
+ multiprocess
llmeval-env/lib/python3.10/site-packages/pytablewriter/__init__.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ from dataproperty import LineBreakHandling
6
+
7
+ from .__version__ import __author__, __copyright__, __email__, __license__, __version__
8
+ from ._factory import TableWriterFactory
9
+ from ._function import dumps_tabledata
10
+ from ._logger import set_logger
11
+ from ._table_format import FormatAttr, TableFormat
12
+ from .error import (
13
+ EmptyTableDataError,
14
+ EmptyTableNameError,
15
+ EmptyValueError,
16
+ NotSupportedError,
17
+ WriterNotFoundError,
18
+ )
19
+ from .style import Align, Format
20
+ from .typehint import (
21
+ Bool,
22
+ DateTime,
23
+ Dictionary,
24
+ Infinity,
25
+ Integer,
26
+ IpAddress,
27
+ List,
28
+ Nan,
29
+ NoneType,
30
+ NullString,
31
+ RealNumber,
32
+ String,
33
+ )
34
+ from .writer import (
35
+ AbstractTableWriter,
36
+ AsciiDocTableWriter,
37
+ BoldUnicodeTableWriter,
38
+ BorderlessTableWriter,
39
+ CssTableWriter,
40
+ CsvTableWriter,
41
+ ElasticsearchWriter,
42
+ ExcelXlsTableWriter,
43
+ ExcelXlsxTableWriter,
44
+ HtmlTableWriter,
45
+ JavaScriptTableWriter,
46
+ JsonLinesTableWriter,
47
+ JsonTableWriter,
48
+ LatexMatrixWriter,
49
+ LatexTableWriter,
50
+ LtsvTableWriter,
51
+ MarkdownTableWriter,
52
+ MediaWikiTableWriter,
53
+ NullTableWriter,
54
+ NumpyTableWriter,
55
+ PandasDataFramePickleWriter,
56
+ PandasDataFrameWriter,
57
+ PythonCodeTableWriter,
58
+ RstCsvTableWriter,
59
+ RstGridTableWriter,
60
+ RstSimpleTableWriter,
61
+ SpaceAlignedTableWriter,
62
+ SqliteTableWriter,
63
+ TomlTableWriter,
64
+ TsvTableWriter,
65
+ UnicodeTableWriter,
66
+ YamlTableWriter,
67
+ )
68
+
69
+
70
+ __all__ = (
71
+ "__author__",
72
+ "__copyright__",
73
+ "__email__",
74
+ "__license__",
75
+ "__version__",
76
+ "LineBreakHandling",
77
+ "TableWriterFactory",
78
+ "dumps_tabledata",
79
+ "set_logger",
80
+ "FormatAttr",
81
+ "TableFormat",
82
+ "Align",
83
+ "Format",
84
+ "Bool",
85
+ "DateTime",
86
+ "Dictionary",
87
+ "Infinity",
88
+ "Integer",
89
+ "IpAddress",
90
+ "List",
91
+ "Nan",
92
+ "NoneType",
93
+ "NullString",
94
+ "RealNumber",
95
+ "String",
96
+ "EmptyTableDataError",
97
+ "EmptyTableNameError",
98
+ "EmptyValueError",
99
+ "NotSupportedError",
100
+ "WriterNotFoundError",
101
+ "AbstractTableWriter",
102
+ "AsciiDocTableWriter",
103
+ "BoldUnicodeTableWriter",
104
+ "BorderlessTableWriter",
105
+ "CssTableWriter",
106
+ "CsvTableWriter",
107
+ "ElasticsearchWriter",
108
+ "ExcelXlsTableWriter",
109
+ "ExcelXlsxTableWriter",
110
+ "HtmlTableWriter",
111
+ "JavaScriptTableWriter",
112
+ "JsonLinesTableWriter",
113
+ "JsonTableWriter",
114
+ "LatexMatrixWriter",
115
+ "LatexTableWriter",
116
+ "LtsvTableWriter",
117
+ "MarkdownTableWriter",
118
+ "MediaWikiTableWriter",
119
+ "NullTableWriter",
120
+ "NumpyTableWriter",
121
+ "PandasDataFramePickleWriter",
122
+ "PandasDataFrameWriter",
123
+ "PythonCodeTableWriter",
124
+ "RstCsvTableWriter",
125
+ "RstGridTableWriter",
126
+ "RstSimpleTableWriter",
127
+ "SpaceAlignedTableWriter",
128
+ "SqliteTableWriter",
129
+ "TomlTableWriter",
130
+ "TsvTableWriter",
131
+ "UnicodeTableWriter",
132
+ "YamlTableWriter",
133
+ )
llmeval-env/lib/python3.10/site-packages/pytablewriter/__version__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ __author__ = "Tsuyoshi Hombashi"
2
+ __copyright__ = f"Copyright 2016, {__author__}"
3
+ __license__ = "MIT License"
4
+ __version__ = "1.2.0"
5
+ __maintainer__ = __author__
6
+ __email__ = "[email protected]"
llmeval-env/lib/python3.10/site-packages/pytablewriter/_converter.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ import re
6
+
7
+
8
+ def strip_quote(text: str, value: str) -> str:
9
+ re_replace = re.compile(f"[\"']{value:s}[\"']", re.MULTILINE)
10
+
11
+ return re_replace.sub(value, text)
llmeval-env/lib/python3.10/site-packages/pytablewriter/_factory.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ import os
6
+ from itertools import chain
7
+ from typing import Any, List
8
+
9
+ import typepy
10
+
11
+ from ._logger import logger
12
+ from ._table_format import FormatAttr, TableFormat
13
+ from .error import WriterNotFoundError
14
+ from .writer import AbstractTableWriter
15
+
16
+
17
+ class TableWriterFactory:
18
+ """
19
+ A factory class of table writer classes.
20
+ """
21
+
22
+ @classmethod
23
+ def create_from_file_extension(cls, file_extension: str, **kwargs: Any) -> AbstractTableWriter:
24
+ """
25
+ Create a table writer class instance from a file extension.
26
+ Supported file extensions are as follows:
27
+
28
+ ================== ===================================
29
+ Extension Writer Class
30
+ ================== ===================================
31
+ ``".adoc"`` :py:class:`~.AsciiDocTableWriter`
32
+ ``".asciidoc"`` :py:class:`~.AsciiDocTableWriter`
33
+ ``".asc"`` :py:class:`~.AsciiDocTableWriter`
34
+ ``".css"`` :py:class:`~.CssTableWriter`
35
+ ``".csv"`` :py:class:`~.CsvTableWriter`
36
+ ``".htm"`` :py:class:`~.HtmlTableWriter`
37
+ ``".html"`` :py:class:`~.HtmlTableWriter`
38
+ ``".js"`` :py:class:`~.JavaScriptTableWriter`
39
+ ``".json"`` :py:class:`~.JsonTableWriter`
40
+ ``".jsonl"`` :py:class:`~.JsonLinesTableWriter`
41
+ ``".ltsv"`` :py:class:`~.LtsvTableWriter`
42
+ ``".ldjson"`` :py:class:`~.JsonLinesTableWriter`
43
+ ``".md"`` :py:class:`~.MarkdownTableWriter`
44
+ ``".ndjson"`` :py:class:`~.JsonLinesTableWriter`
45
+ ``".py"`` :py:class:`~.PythonCodeTableWriter`
46
+ ``".rst"`` :py:class:`~.RstGridTableWriter`
47
+ ``".tsv"`` :py:class:`~.TsvTableWriter`
48
+ ``".xls"`` :py:class:`~.ExcelXlsTableWriter`
49
+ ``".xlsx"`` :py:class:`~.ExcelXlsxTableWriter`
50
+ ``".sqlite"`` :py:class:`~.SqliteTableWriter`
51
+ ``".sqlite3"`` :py:class:`~.SqliteTableWriter`
52
+ ``".tsv"`` :py:class:`~.TsvTableWriter`
53
+ ``".toml"`` :py:class:`~.TomlTableWriter`
54
+ ``".yml"`` :py:class:`~.YamlTableWriter`
55
+ ================== ===================================
56
+
57
+ :param str file_extension:
58
+ File extension string (case insensitive).
59
+ :param kwargs:
60
+ Keyword arguments that pass to a writer class constructor.
61
+ :return:
62
+ Writer instance that coincides with the ``file_extension``.
63
+ :rtype:
64
+ :py:class:`~pytablewriter.writer._table_writer.TableWriterInterface`
65
+ :raises pytablewriter.WriterNotFoundError:
66
+ |WriterNotFoundError_desc| the file extension.
67
+ """
68
+
69
+ ext = os.path.splitext(file_extension)[1]
70
+ if typepy.is_null_string(ext):
71
+ file_extension = file_extension
72
+ else:
73
+ file_extension = ext
74
+
75
+ file_extension = file_extension.lstrip(".").lower()
76
+
77
+ for table_format in TableFormat:
78
+ if file_extension not in table_format.file_extensions:
79
+ continue
80
+
81
+ if table_format.format_attribute & FormatAttr.SECONDARY_EXT:
82
+ continue
83
+
84
+ logger.debug(f"create a {table_format.writer_class} instance")
85
+
86
+ return table_format.writer_class(**kwargs) # type: ignore
87
+
88
+ raise WriterNotFoundError(
89
+ "\n".join(
90
+ [
91
+ f"{file_extension:s} (unknown file extension).",
92
+ "",
93
+ "acceptable file extensions are: {}.".format(", ".join(cls.get_extensions())),
94
+ ]
95
+ )
96
+ )
97
+
98
+ @classmethod
99
+ def create_from_format_name(cls, format_name: str, **kwargs: Any) -> AbstractTableWriter:
100
+ """
101
+ Create a table writer class instance from a format name.
102
+ Supported file format names are as follows:
103
+
104
+ ============================================= ===================================
105
+ Format name Writer Class
106
+ ============================================= ===================================
107
+ ``"adoc"`` :py:class:`~.AsciiDocTableWriter`
108
+ ``"asciidoc"`` :py:class:`~.AsciiDocTableWriter`
109
+ ``"css"`` :py:class:`~.CssTableWriter`
110
+ ``"csv"`` :py:class:`~.CsvTableWriter`
111
+ ``"elasticsearch"`` :py:class:`~.ElasticsearchWriter`
112
+ ``"excel"`` :py:class:`~.ExcelXlsxTableWriter`
113
+ ``"html"``/``"htm"`` :py:class:`~.HtmlTableWriter`
114
+ ``"javascript"``/``"js"`` :py:class:`~.JavaScriptTableWriter`
115
+ ``"json"`` :py:class:`~.JsonTableWriter`
116
+ ``"json_lines"`` :py:class:`~.JsonLinesTableWriter`
117
+ ``"latex_matrix"`` :py:class:`~.LatexMatrixWriter`
118
+ ``"latex_table"`` :py:class:`~.LatexTableWriter`
119
+ ``"ldjson"`` :py:class:`~.JsonLinesTableWriter`
120
+ ``"ltsv"`` :py:class:`~.LtsvTableWriter`
121
+ ``"markdown"``/``"md"`` :py:class:`~.MarkdownTableWriter`
122
+ ``"mediawiki"`` :py:class:`~.MediaWikiTableWriter`
123
+ ``"null"`` :py:class:`~.NullTableWriter`
124
+ ``"pandas"`` :py:class:`~.PandasDataFrameWriter`
125
+ ``"py"``/``"python"`` :py:class:`~.PythonCodeTableWriter`
126
+ ``"rst"``/``"rst_grid"``/``"rst_grid_table"`` :py:class:`~.RstGridTableWriter`
127
+ ``"rst_simple"``/``"rst_simple_table"`` :py:class:`~.RstSimpleTableWriter`
128
+ ``"rst_csv"``/``"rst_csv_table"`` :py:class:`~.RstCsvTableWriter`
129
+ ``"sqlite"`` :py:class:`~.SqliteTableWriter`
130
+ ``"ssv"`` :py:class:`~.SpaceAlignedTableWriter`
131
+ ``"tsv"`` :py:class:`~.TsvTableWriter`
132
+ ``"toml"`` :py:class:`~.TomlTableWriter`
133
+ ``"unicode"`` :py:class:`~.UnicodeTableWriter`
134
+ ``"yaml"`` :py:class:`~.YamlTableWriter`
135
+ ============================================= ===================================
136
+
137
+ :param str format_name:
138
+ Format name string (case insensitive).
139
+ :param kwargs:
140
+ Keyword arguments that pass to a writer class constructor.
141
+ :return:
142
+ Writer instance that coincides with the ``format_name``:
143
+ :rtype:
144
+ :py:class:`~pytablewriter.writer._table_writer.TableWriterInterface`
145
+ :raises pytablewriter.WriterNotFoundError:
146
+ |WriterNotFoundError_desc| for the format.
147
+ """
148
+
149
+ format_name = format_name.casefold()
150
+
151
+ for table_format in TableFormat:
152
+ if format_name in table_format.names and not (
153
+ table_format.format_attribute & FormatAttr.SECONDARY_NAME
154
+ ):
155
+ writer = table_format.writer_class(**kwargs) # type: ignore
156
+ logger.debug(f"create a {writer.FORMAT_NAME} instance")
157
+
158
+ return writer
159
+
160
+ raise WriterNotFoundError(
161
+ "\n".join(
162
+ [
163
+ f"{format_name} (unknown format name).",
164
+ "acceptable format names are: {}.".format(", ".join(cls.get_format_names())),
165
+ ]
166
+ )
167
+ )
168
+
169
+ @classmethod
170
+ def get_format_names(cls) -> List[str]:
171
+ """
172
+ :return: Available format names.
173
+ :rtype: list
174
+
175
+ :Example:
176
+ .. code:: python
177
+
178
+ >>> import pytablewriter as ptw
179
+ >>> for name in ptw.TableWriterFactory.get_format_names():
180
+ ... print(name)
181
+ ...
182
+ adoc
183
+ asciidoc
184
+ bold_unicode
185
+ borderless
186
+ css
187
+ csv
188
+ elasticsearch
189
+ excel
190
+ htm
191
+ html
192
+ javascript
193
+ js
194
+ json
195
+ json_lines
196
+ jsonl
197
+ latex_matrix
198
+ latex_table
199
+ ldjson
200
+ ltsv
201
+ markdown
202
+ md
203
+ mediawiki
204
+ ndjson
205
+ null
206
+ numpy
207
+ pandas
208
+ pandas_pickle
209
+ py
210
+ python
211
+ rst
212
+ rst_csv
213
+ rst_csv_table
214
+ rst_grid
215
+ rst_grid_table
216
+ rst_simple
217
+ rst_simple_table
218
+ space_aligned
219
+ sqlite
220
+ ssv
221
+ toml
222
+ tsv
223
+ unicode
224
+ yaml
225
+
226
+ """
227
+
228
+ return sorted(list(set(chain(*(table_format.names for table_format in TableFormat)))))
229
+
230
+ @classmethod
231
+ def get_extensions(cls) -> List[str]:
232
+ """
233
+ :return: Available file extensions.
234
+ :rtype: list
235
+
236
+ :Example:
237
+ .. code:: python
238
+
239
+ >>> import pytablewriter as ptw
240
+ >>> for name in ptw.TableWriterFactory.get_extensions():
241
+ ... print(name)
242
+ ...
243
+ adoc
244
+ asc
245
+ asciidoc
246
+ css
247
+ csv
248
+ htm
249
+ html
250
+ js
251
+ json
252
+ jsonl
253
+ ldjson
254
+ ltsv
255
+ md
256
+ ndjson
257
+ py
258
+ rst
259
+ sqlite
260
+ sqlite3
261
+ tex
262
+ toml
263
+ tsv
264
+ xls
265
+ xlsx
266
+ yml
267
+ """
268
+
269
+ file_extension_set = set()
270
+ for table_format in TableFormat:
271
+ for file_extension in table_format.file_extensions:
272
+ file_extension_set.add(file_extension)
273
+
274
+ return sorted(list(file_extension_set))
llmeval-env/lib/python3.10/site-packages/pytablewriter/_function.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ from datetime import datetime
6
+ from enum import Enum
7
+ from typing import Any, Optional, Type
8
+
9
+ import dataproperty
10
+ from pathvalidate import replace_symbol
11
+ from tabledata._core import TableData
12
+
13
+
14
+ def quote_datetime_formatter(value: datetime) -> str:
15
+ return f'"{value.strftime(dataproperty.DefaultValue.DATETIME_FORMAT):s}"'
16
+
17
+
18
+ def dateutil_datetime_formatter(value: datetime) -> str:
19
+ return 'dateutil.parser.parse("{:s}")'.format(
20
+ value.strftime(dataproperty.DefaultValue.DATETIME_FORMAT)
21
+ )
22
+
23
+
24
+ def dumps_tabledata(value: TableData, format_name: str = "rst_grid_table", **kwargs: Any) -> str:
25
+ """
26
+ :param tabledata.TableData value: Tabular data to dump.
27
+ :param str format_name:
28
+ Dumped format name of tabular data.
29
+ Available formats are described in
30
+ :py:meth:`~pytablewriter.TableWriterFactory.create_from_format_name`
31
+
32
+ :Example:
33
+ .. code:: python
34
+
35
+ >>> dumps_tabledata(value)
36
+ .. table:: sample_data
37
+
38
+ ====== ====== ======
39
+ attr_a attr_b attr_c
40
+ ====== ====== ======
41
+ 1 4.0 a
42
+ 2 2.1 bb
43
+ 3 120.9 ccc
44
+ ====== ====== ======
45
+ """
46
+
47
+ from ._factory import TableWriterFactory
48
+
49
+ if not value:
50
+ raise TypeError("value must be a tabledata.TableData instance")
51
+
52
+ writer = TableWriterFactory.create_from_format_name(format_name)
53
+
54
+ for attr_name, attr_value in kwargs.items():
55
+ setattr(writer, attr_name, attr_value)
56
+
57
+ writer.from_tabledata(value)
58
+
59
+ return writer.dumps()
60
+
61
+
62
+ def normalize_enum(
63
+ value: Any, enum_class: Type[Enum], validate: bool = True, default: Optional[Enum] = None
64
+ ) -> Any:
65
+ if value is None:
66
+ return default
67
+
68
+ if isinstance(value, enum_class):
69
+ return value
70
+
71
+ try:
72
+ return enum_class[replace_symbol(value.strip(), "_").upper()]
73
+ except AttributeError:
74
+ if validate:
75
+ raise TypeError(f"value must be a {enum_class} or a str: actual={type(value)}")
76
+ except KeyError:
77
+ if validate:
78
+ raise ValueError(
79
+ "invalid valid found: expected={}, actual={}".format(
80
+ "/".join(item.name for item in enum_class), value
81
+ )
82
+ )
83
+
84
+ return value
llmeval-env/lib/python3.10/site-packages/pytablewriter/_table_format.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ import enum
6
+ from typing import List, Optional, Sequence
7
+
8
+ from .writer import (
9
+ AbstractTableWriter,
10
+ AsciiDocTableWriter,
11
+ BoldUnicodeTableWriter,
12
+ BorderlessTableWriter,
13
+ CssTableWriter,
14
+ CsvTableWriter,
15
+ ElasticsearchWriter,
16
+ ExcelXlsTableWriter,
17
+ ExcelXlsxTableWriter,
18
+ HtmlTableWriter,
19
+ JavaScriptTableWriter,
20
+ JsonLinesTableWriter,
21
+ JsonTableWriter,
22
+ LatexMatrixWriter,
23
+ LatexTableWriter,
24
+ LtsvTableWriter,
25
+ MarkdownTableWriter,
26
+ MediaWikiTableWriter,
27
+ NullTableWriter,
28
+ NumpyTableWriter,
29
+ PandasDataFramePickleWriter,
30
+ PandasDataFrameWriter,
31
+ PythonCodeTableWriter,
32
+ RstCsvTableWriter,
33
+ RstGridTableWriter,
34
+ RstSimpleTableWriter,
35
+ SpaceAlignedTableWriter,
36
+ SqliteTableWriter,
37
+ TomlTableWriter,
38
+ TsvTableWriter,
39
+ UnicodeTableWriter,
40
+ YamlTableWriter,
41
+ )
42
+
43
+
44
+ class FormatAttr:
45
+ """
46
+ Bitmaps to represent table attributes.
47
+ """
48
+
49
+ NONE = 1 << 1
50
+
51
+ #: Can create a file with the format.
52
+ FILE = 1 << 2
53
+
54
+ #: Table format that can represent as a text.
55
+ TEXT = 1 << 3
56
+
57
+ #: Table format that can represent as a binary file.
58
+ BIN = 1 << 4
59
+
60
+ #: Can create a source code (variables definition)
61
+ #: one of the programming language.
62
+ SOURCECODE = 1 << 5
63
+
64
+ #: Can call API for external service.
65
+ API = 1 << 6
66
+
67
+ SECONDARY_EXT = 1 << 10
68
+ SECONDARY_NAME = 1 << 11
69
+
70
+
71
+ @enum.unique
72
+ class TableFormat(enum.Enum):
73
+ """
74
+ Enum to represent table format attributes.
75
+ """
76
+
77
+ ASCIIDOC = (
78
+ [AsciiDocTableWriter.FORMAT_NAME, "adoc"],
79
+ AsciiDocTableWriter,
80
+ FormatAttr.FILE | FormatAttr.TEXT,
81
+ ["adoc", "asciidoc", "asc"],
82
+ )
83
+ CSV = ([CsvTableWriter.FORMAT_NAME], CsvTableWriter, FormatAttr.FILE | FormatAttr.TEXT, ["csv"])
84
+ CSS = (
85
+ [CssTableWriter.FORMAT_NAME],
86
+ CssTableWriter,
87
+ FormatAttr.FILE | FormatAttr.TEXT,
88
+ ["css"],
89
+ )
90
+ ELASTICSEARCH = (
91
+ [ElasticsearchWriter.FORMAT_NAME], # type: ignore
92
+ ElasticsearchWriter,
93
+ FormatAttr.API,
94
+ [],
95
+ )
96
+ EXCEL_XLSX = (
97
+ [ExcelXlsxTableWriter.FORMAT_NAME],
98
+ ExcelXlsxTableWriter,
99
+ FormatAttr.FILE | FormatAttr.BIN,
100
+ ["xlsx"],
101
+ )
102
+ EXCEL_XLS = (
103
+ [ExcelXlsTableWriter.FORMAT_NAME],
104
+ ExcelXlsTableWriter,
105
+ FormatAttr.FILE | FormatAttr.BIN | FormatAttr.SECONDARY_NAME,
106
+ ["xls"],
107
+ )
108
+ HTML = (
109
+ [HtmlTableWriter.FORMAT_NAME, "htm"],
110
+ HtmlTableWriter,
111
+ FormatAttr.FILE | FormatAttr.TEXT,
112
+ ["html", "htm"],
113
+ )
114
+ JAVASCRIPT = (
115
+ [JavaScriptTableWriter.FORMAT_NAME, "js"],
116
+ JavaScriptTableWriter,
117
+ FormatAttr.FILE | FormatAttr.TEXT | FormatAttr.SOURCECODE,
118
+ ["js"],
119
+ )
120
+ JSON = (
121
+ [JsonTableWriter.FORMAT_NAME],
122
+ JsonTableWriter,
123
+ FormatAttr.FILE | FormatAttr.TEXT,
124
+ ["json"],
125
+ )
126
+ JSON_LINES = (
127
+ [JsonLinesTableWriter.FORMAT_NAME, "jsonl", "ldjson", "ndjson"],
128
+ JsonLinesTableWriter,
129
+ FormatAttr.FILE | FormatAttr.TEXT,
130
+ ["jsonl", "ldjson", "ndjson"],
131
+ )
132
+ LATEX_MATRIX = (
133
+ [LatexMatrixWriter.FORMAT_NAME],
134
+ LatexMatrixWriter,
135
+ FormatAttr.FILE | FormatAttr.TEXT,
136
+ ["tex"],
137
+ )
138
+ LATEX_TABLE = (
139
+ [LatexTableWriter.FORMAT_NAME],
140
+ LatexTableWriter,
141
+ FormatAttr.FILE | FormatAttr.TEXT | FormatAttr.SECONDARY_EXT,
142
+ ["tex"],
143
+ )
144
+ LTSV = (
145
+ [LtsvTableWriter.FORMAT_NAME],
146
+ LtsvTableWriter,
147
+ FormatAttr.FILE | FormatAttr.TEXT,
148
+ ["ltsv"],
149
+ )
150
+ MARKDOWN = (
151
+ [MarkdownTableWriter.FORMAT_NAME, "md"],
152
+ MarkdownTableWriter,
153
+ FormatAttr.FILE | FormatAttr.TEXT,
154
+ ["md"],
155
+ )
156
+ MEDIAWIKI = (
157
+ [MediaWikiTableWriter.FORMAT_NAME], # type: ignore
158
+ MediaWikiTableWriter,
159
+ FormatAttr.FILE | FormatAttr.TEXT,
160
+ [],
161
+ )
162
+ NULL = (
163
+ [NullTableWriter.FORMAT_NAME], # type: ignore
164
+ NullTableWriter,
165
+ FormatAttr.NONE,
166
+ [],
167
+ )
168
+ NUMPY = (
169
+ [NumpyTableWriter.FORMAT_NAME],
170
+ NumpyTableWriter,
171
+ FormatAttr.FILE | FormatAttr.TEXT | FormatAttr.SOURCECODE | FormatAttr.SECONDARY_EXT,
172
+ ["py"],
173
+ )
174
+ PANDAS = (
175
+ [PandasDataFrameWriter.FORMAT_NAME],
176
+ PandasDataFrameWriter,
177
+ FormatAttr.FILE | FormatAttr.TEXT | FormatAttr.SOURCECODE | FormatAttr.SECONDARY_EXT,
178
+ ["py"],
179
+ )
180
+ PANDAS_PICKLE = (
181
+ [PandasDataFramePickleWriter.FORMAT_NAME], # type: ignore
182
+ PandasDataFramePickleWriter,
183
+ FormatAttr.FILE | FormatAttr.BIN,
184
+ [],
185
+ )
186
+ PYTHON = (
187
+ [PythonCodeTableWriter.FORMAT_NAME, "py"],
188
+ PythonCodeTableWriter,
189
+ FormatAttr.FILE | FormatAttr.TEXT | FormatAttr.SOURCECODE,
190
+ ["py"],
191
+ )
192
+ RST_CSV_TABLE = (
193
+ [RstCsvTableWriter.FORMAT_NAME, "rst_csv"],
194
+ RstCsvTableWriter,
195
+ FormatAttr.FILE | FormatAttr.TEXT | FormatAttr.SECONDARY_EXT,
196
+ ["rst"],
197
+ )
198
+ RST_GRID_TABLE = (
199
+ [RstGridTableWriter.FORMAT_NAME, "rst_grid", "rst"],
200
+ RstGridTableWriter,
201
+ FormatAttr.FILE | FormatAttr.TEXT,
202
+ ["rst"],
203
+ )
204
+ RST_SIMPLE_TABLE = (
205
+ [RstSimpleTableWriter.FORMAT_NAME, "rst_simple"],
206
+ RstSimpleTableWriter,
207
+ FormatAttr.FILE | FormatAttr.TEXT | FormatAttr.SECONDARY_EXT,
208
+ ["rst"],
209
+ )
210
+ SPACE_ALIGNED = (
211
+ [SpaceAlignedTableWriter.FORMAT_NAME, "ssv"], # type: ignore
212
+ SpaceAlignedTableWriter,
213
+ FormatAttr.FILE | FormatAttr.TEXT,
214
+ [],
215
+ )
216
+ SQLITE = (
217
+ [SqliteTableWriter.FORMAT_NAME],
218
+ SqliteTableWriter,
219
+ FormatAttr.FILE | FormatAttr.BIN,
220
+ ["sqlite", "sqlite3"],
221
+ )
222
+ TOML = (
223
+ [TomlTableWriter.FORMAT_NAME],
224
+ TomlTableWriter,
225
+ FormatAttr.FILE | FormatAttr.TEXT,
226
+ ["toml"],
227
+ )
228
+ TSV = ([TsvTableWriter.FORMAT_NAME], TsvTableWriter, FormatAttr.FILE | FormatAttr.TEXT, ["tsv"])
229
+ UNICODE = (
230
+ [UnicodeTableWriter.FORMAT_NAME], # type: ignore
231
+ UnicodeTableWriter,
232
+ FormatAttr.TEXT,
233
+ [],
234
+ )
235
+ YAML = (
236
+ [YamlTableWriter.FORMAT_NAME],
237
+ YamlTableWriter,
238
+ FormatAttr.FILE | FormatAttr.TEXT,
239
+ ["yml"],
240
+ )
241
+ BOLD_UNICODE = (
242
+ [BoldUnicodeTableWriter.FORMAT_NAME], # type: ignore
243
+ BoldUnicodeTableWriter,
244
+ FormatAttr.TEXT,
245
+ [],
246
+ )
247
+ BORDERLESS = (
248
+ [BorderlessTableWriter.FORMAT_NAME], # type: ignore
249
+ BorderlessTableWriter,
250
+ FormatAttr.TEXT,
251
+ [],
252
+ )
253
+
254
+ @property
255
+ def names(self) -> List[str]:
256
+ """
257
+ List[str]: Names associated with the table format.
258
+ """
259
+
260
+ return self.__names
261
+
262
+ @property
263
+ def writer_class(self) -> AbstractTableWriter:
264
+ """
265
+ Type[AbstractTableWriter]: Table writer class object associated with the table format.
266
+ """
267
+
268
+ return self.__writer_class
269
+
270
+ @property
271
+ def format_attribute(self) -> int:
272
+ """
273
+ FormatAttr: Table attributes bitmap.
274
+ """
275
+
276
+ return self.__format_attribute
277
+
278
+ @property
279
+ def file_extensions(self) -> List[str]:
280
+ """
281
+ List[str]: File extensions associated with the table format.
282
+ """
283
+
284
+ return self.__file_extensions
285
+
286
+ def __init__(
287
+ self,
288
+ names: Sequence[str],
289
+ writer_class: AbstractTableWriter,
290
+ format_attribute: int,
291
+ file_extensions: Sequence[str],
292
+ ) -> None:
293
+ self.__names = list(names)
294
+ self.__writer_class = writer_class
295
+ self.__format_attribute = format_attribute
296
+ self.__file_extensions = list(file_extensions)
297
+
298
+ @classmethod
299
+ def find_all_attr(cls, format_attribute: int) -> List["TableFormat"]:
300
+ """Searching table formats that have specific attributes.
301
+
302
+ Args:
303
+ format_attribute (FormatAttr):
304
+ Table format attributes to look for.
305
+
306
+ Returns:
307
+ List[TableFormat]: Table formats that matched the attribute.
308
+ """
309
+
310
+ return [
311
+ table_format
312
+ for table_format in TableFormat
313
+ if table_format.format_attribute & format_attribute
314
+ ]
315
+
316
+ @classmethod
317
+ def from_name(cls, format_name: str) -> Optional["TableFormat"]:
318
+ """Get a table format from a format name.
319
+
320
+ Args:
321
+ format_name (str): Table format specifier.
322
+
323
+ Returns:
324
+ Optional[TableFormat]: A table format enum value corresponding to the ``format_name``.
325
+ """
326
+
327
+ format_name = format_name.casefold().strip()
328
+
329
+ for table_format in TableFormat:
330
+ if format_name in table_format.names:
331
+ return table_format
332
+
333
+ return None
334
+
335
+ @classmethod
336
+ def from_file_extension(cls, file_extension: str) -> Optional["TableFormat"]:
337
+ """Get a table format from a file extension.
338
+
339
+ Args:
340
+ file_extension (str): File extension.
341
+
342
+ Returns:
343
+ Optional[TableFormat]:
344
+ A table format enum value corresponding to the ``file_extension``.
345
+ """
346
+
347
+ ext = file_extension.lower().strip().lstrip(".")
348
+
349
+ for table_format in TableFormat:
350
+ if ext in table_format.file_extensions:
351
+ return table_format
352
+
353
+ return None
llmeval-env/lib/python3.10/site-packages/pytablewriter/_typing.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/pytablewriter/error.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+
6
+ class NotSupportedError(Exception):
7
+ pass
8
+
9
+
10
+ class EmptyTableNameError(Exception):
11
+ """
12
+ Exception raised when a table writer class of the |table_name| attribute
13
+ is null and the class is not accepted null |table_name|.
14
+ """
15
+
16
+
17
+ class EmptyValueError(Exception):
18
+ """
19
+ Exception raised when a table writer class of the |value_matrix| attribute
20
+ is null, and the class is not accepted null |value_matrix|.
21
+ """
22
+
23
+
24
+ class EmptyTableDataError(Exception):
25
+ """
26
+ Exception raised when a table writer class of the |headers| and
27
+ |value_matrix| attributes are null.
28
+ """
29
+
30
+
31
+ class WriterNotFoundError(Exception):
32
+ """
33
+ Exception raised when appropriate loader writer found.
34
+ """
llmeval-env/lib/python3.10/site-packages/pytablewriter/py.typed ADDED
File without changes