Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/charset_normalizer/__init__.py +46 -0
- env-llmeval/lib/python3.10/site-packages/charset_normalizer/cd.py +395 -0
- env-llmeval/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/charset_normalizer/models.py +340 -0
- env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/INSTALLER +1 -0
- env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/METADATA +441 -0
- env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/RECORD +31 -0
- env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/WHEEL +5 -0
- env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt +27 -0
- env-llmeval/lib/python3.10/site-packages/jsonlines/__init__.py +20 -0
- env-llmeval/lib/python3.10/site-packages/jsonlines/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/jsonlines/__pycache__/jsonlines.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/jsonlines/jsonlines.py +665 -0
- env-llmeval/lib/python3.10/site-packages/jsonlines/py.typed +0 -0
- env-llmeval/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/REQUESTED +0 -0
- env-llmeval/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/WHEEL +5 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/__about__.py +6 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/__init__.py +76 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/__main__.py +98 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__about__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__main__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/constants.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/exceptions.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/portalocker.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/redis.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/constants.py +58 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/exceptions.py +27 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/portalocker.py +117 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/py.typed +0 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/redis.py +236 -0
- env-llmeval/lib/python3.10/site-packages/portalocker/utils.py +563 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.pyx +529 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.pxd +70 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_compute_docstrings.py +56 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.pyx +1542 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_cuda.pxd +67 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_dlpack.pxi +46 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_gcsfs.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfs.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfsio.pyx +478 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_json.pxd +36 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd +56 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd +33 -0
env-llmeval/lib/python3.10/site-packages/charset_normalizer/__init__.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""
|
3 |
+
Charset-Normalizer
|
4 |
+
~~~~~~~~~~~~~~
|
5 |
+
The Real First Universal Charset Detector.
|
6 |
+
A library that helps you read text from an unknown charset encoding.
|
7 |
+
Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
|
8 |
+
All IANA character set names for which the Python core library provides codecs are supported.
|
9 |
+
|
10 |
+
Basic usage:
|
11 |
+
>>> from charset_normalizer import from_bytes
|
12 |
+
>>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
|
13 |
+
>>> best_guess = results.best()
|
14 |
+
>>> str(best_guess)
|
15 |
+
'Bсеки човек има право на образование. Oбразованието!'
|
16 |
+
|
17 |
+
Others methods and usages are available - see the full documentation
|
18 |
+
at <https://github.com/Ousret/charset_normalizer>.
|
19 |
+
:copyright: (c) 2021 by Ahmed TAHRI
|
20 |
+
:license: MIT, see LICENSE for more details.
|
21 |
+
"""
|
22 |
+
import logging
|
23 |
+
|
24 |
+
from .api import from_bytes, from_fp, from_path, is_binary
|
25 |
+
from .legacy import detect
|
26 |
+
from .models import CharsetMatch, CharsetMatches
|
27 |
+
from .utils import set_logging_handler
|
28 |
+
from .version import VERSION, __version__
|
29 |
+
|
30 |
+
__all__ = (
|
31 |
+
"from_fp",
|
32 |
+
"from_path",
|
33 |
+
"from_bytes",
|
34 |
+
"is_binary",
|
35 |
+
"detect",
|
36 |
+
"CharsetMatch",
|
37 |
+
"CharsetMatches",
|
38 |
+
"__version__",
|
39 |
+
"VERSION",
|
40 |
+
"set_logging_handler",
|
41 |
+
)
|
42 |
+
|
43 |
+
# Attach a NullHandler to the top level logger by default
|
44 |
+
# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
|
45 |
+
|
46 |
+
logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
|
env-llmeval/lib/python3.10/site-packages/charset_normalizer/cd.py
ADDED
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
from codecs import IncrementalDecoder
|
3 |
+
from collections import Counter
|
4 |
+
from functools import lru_cache
|
5 |
+
from typing import Counter as TypeCounter, Dict, List, Optional, Tuple
|
6 |
+
|
7 |
+
from .constant import (
|
8 |
+
FREQUENCIES,
|
9 |
+
KO_NAMES,
|
10 |
+
LANGUAGE_SUPPORTED_COUNT,
|
11 |
+
TOO_SMALL_SEQUENCE,
|
12 |
+
ZH_NAMES,
|
13 |
+
)
|
14 |
+
from .md import is_suspiciously_successive_range
|
15 |
+
from .models import CoherenceMatches
|
16 |
+
from .utils import (
|
17 |
+
is_accentuated,
|
18 |
+
is_latin,
|
19 |
+
is_multi_byte_encoding,
|
20 |
+
is_unicode_range_secondary,
|
21 |
+
unicode_range,
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
def encoding_unicode_range(iana_name: str) -> List[str]:
|
26 |
+
"""
|
27 |
+
Return associated unicode ranges in a single byte code page.
|
28 |
+
"""
|
29 |
+
if is_multi_byte_encoding(iana_name):
|
30 |
+
raise IOError("Function not supported on multi-byte code page")
|
31 |
+
|
32 |
+
decoder = importlib.import_module(
|
33 |
+
"encodings.{}".format(iana_name)
|
34 |
+
).IncrementalDecoder
|
35 |
+
|
36 |
+
p: IncrementalDecoder = decoder(errors="ignore")
|
37 |
+
seen_ranges: Dict[str, int] = {}
|
38 |
+
character_count: int = 0
|
39 |
+
|
40 |
+
for i in range(0x40, 0xFF):
|
41 |
+
chunk: str = p.decode(bytes([i]))
|
42 |
+
|
43 |
+
if chunk:
|
44 |
+
character_range: Optional[str] = unicode_range(chunk)
|
45 |
+
|
46 |
+
if character_range is None:
|
47 |
+
continue
|
48 |
+
|
49 |
+
if is_unicode_range_secondary(character_range) is False:
|
50 |
+
if character_range not in seen_ranges:
|
51 |
+
seen_ranges[character_range] = 0
|
52 |
+
seen_ranges[character_range] += 1
|
53 |
+
character_count += 1
|
54 |
+
|
55 |
+
return sorted(
|
56 |
+
[
|
57 |
+
character_range
|
58 |
+
for character_range in seen_ranges
|
59 |
+
if seen_ranges[character_range] / character_count >= 0.15
|
60 |
+
]
|
61 |
+
)
|
62 |
+
|
63 |
+
|
64 |
+
def unicode_range_languages(primary_range: str) -> List[str]:
|
65 |
+
"""
|
66 |
+
Return inferred languages used with a unicode range.
|
67 |
+
"""
|
68 |
+
languages: List[str] = []
|
69 |
+
|
70 |
+
for language, characters in FREQUENCIES.items():
|
71 |
+
for character in characters:
|
72 |
+
if unicode_range(character) == primary_range:
|
73 |
+
languages.append(language)
|
74 |
+
break
|
75 |
+
|
76 |
+
return languages
|
77 |
+
|
78 |
+
|
79 |
+
@lru_cache()
|
80 |
+
def encoding_languages(iana_name: str) -> List[str]:
|
81 |
+
"""
|
82 |
+
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
|
83 |
+
This function does the correspondence.
|
84 |
+
"""
|
85 |
+
unicode_ranges: List[str] = encoding_unicode_range(iana_name)
|
86 |
+
primary_range: Optional[str] = None
|
87 |
+
|
88 |
+
for specified_range in unicode_ranges:
|
89 |
+
if "Latin" not in specified_range:
|
90 |
+
primary_range = specified_range
|
91 |
+
break
|
92 |
+
|
93 |
+
if primary_range is None:
|
94 |
+
return ["Latin Based"]
|
95 |
+
|
96 |
+
return unicode_range_languages(primary_range)
|
97 |
+
|
98 |
+
|
99 |
+
@lru_cache()
|
100 |
+
def mb_encoding_languages(iana_name: str) -> List[str]:
|
101 |
+
"""
|
102 |
+
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
|
103 |
+
This function does the correspondence.
|
104 |
+
"""
|
105 |
+
if (
|
106 |
+
iana_name.startswith("shift_")
|
107 |
+
or iana_name.startswith("iso2022_jp")
|
108 |
+
or iana_name.startswith("euc_j")
|
109 |
+
or iana_name == "cp932"
|
110 |
+
):
|
111 |
+
return ["Japanese"]
|
112 |
+
if iana_name.startswith("gb") or iana_name in ZH_NAMES:
|
113 |
+
return ["Chinese"]
|
114 |
+
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
|
115 |
+
return ["Korean"]
|
116 |
+
|
117 |
+
return []
|
118 |
+
|
119 |
+
|
120 |
+
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
|
121 |
+
def get_target_features(language: str) -> Tuple[bool, bool]:
|
122 |
+
"""
|
123 |
+
Determine main aspects from a supported language if it contains accents and if is pure Latin.
|
124 |
+
"""
|
125 |
+
target_have_accents: bool = False
|
126 |
+
target_pure_latin: bool = True
|
127 |
+
|
128 |
+
for character in FREQUENCIES[language]:
|
129 |
+
if not target_have_accents and is_accentuated(character):
|
130 |
+
target_have_accents = True
|
131 |
+
if target_pure_latin and is_latin(character) is False:
|
132 |
+
target_pure_latin = False
|
133 |
+
|
134 |
+
return target_have_accents, target_pure_latin
|
135 |
+
|
136 |
+
|
137 |
+
def alphabet_languages(
|
138 |
+
characters: List[str], ignore_non_latin: bool = False
|
139 |
+
) -> List[str]:
|
140 |
+
"""
|
141 |
+
Return associated languages associated to given characters.
|
142 |
+
"""
|
143 |
+
languages: List[Tuple[str, float]] = []
|
144 |
+
|
145 |
+
source_have_accents = any(is_accentuated(character) for character in characters)
|
146 |
+
|
147 |
+
for language, language_characters in FREQUENCIES.items():
|
148 |
+
target_have_accents, target_pure_latin = get_target_features(language)
|
149 |
+
|
150 |
+
if ignore_non_latin and target_pure_latin is False:
|
151 |
+
continue
|
152 |
+
|
153 |
+
if target_have_accents is False and source_have_accents:
|
154 |
+
continue
|
155 |
+
|
156 |
+
character_count: int = len(language_characters)
|
157 |
+
|
158 |
+
character_match_count: int = len(
|
159 |
+
[c for c in language_characters if c in characters]
|
160 |
+
)
|
161 |
+
|
162 |
+
ratio: float = character_match_count / character_count
|
163 |
+
|
164 |
+
if ratio >= 0.2:
|
165 |
+
languages.append((language, ratio))
|
166 |
+
|
167 |
+
languages = sorted(languages, key=lambda x: x[1], reverse=True)
|
168 |
+
|
169 |
+
return [compatible_language[0] for compatible_language in languages]
|
170 |
+
|
171 |
+
|
172 |
+
def characters_popularity_compare(
|
173 |
+
language: str, ordered_characters: List[str]
|
174 |
+
) -> float:
|
175 |
+
"""
|
176 |
+
Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
|
177 |
+
The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
|
178 |
+
Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
|
179 |
+
"""
|
180 |
+
if language not in FREQUENCIES:
|
181 |
+
raise ValueError("{} not available".format(language))
|
182 |
+
|
183 |
+
character_approved_count: int = 0
|
184 |
+
FREQUENCIES_language_set = set(FREQUENCIES[language])
|
185 |
+
|
186 |
+
ordered_characters_count: int = len(ordered_characters)
|
187 |
+
target_language_characters_count: int = len(FREQUENCIES[language])
|
188 |
+
|
189 |
+
large_alphabet: bool = target_language_characters_count > 26
|
190 |
+
|
191 |
+
for character, character_rank in zip(
|
192 |
+
ordered_characters, range(0, ordered_characters_count)
|
193 |
+
):
|
194 |
+
if character not in FREQUENCIES_language_set:
|
195 |
+
continue
|
196 |
+
|
197 |
+
character_rank_in_language: int = FREQUENCIES[language].index(character)
|
198 |
+
expected_projection_ratio: float = (
|
199 |
+
target_language_characters_count / ordered_characters_count
|
200 |
+
)
|
201 |
+
character_rank_projection: int = int(character_rank * expected_projection_ratio)
|
202 |
+
|
203 |
+
if (
|
204 |
+
large_alphabet is False
|
205 |
+
and abs(character_rank_projection - character_rank_in_language) > 4
|
206 |
+
):
|
207 |
+
continue
|
208 |
+
|
209 |
+
if (
|
210 |
+
large_alphabet is True
|
211 |
+
and abs(character_rank_projection - character_rank_in_language)
|
212 |
+
< target_language_characters_count / 3
|
213 |
+
):
|
214 |
+
character_approved_count += 1
|
215 |
+
continue
|
216 |
+
|
217 |
+
characters_before_source: List[str] = FREQUENCIES[language][
|
218 |
+
0:character_rank_in_language
|
219 |
+
]
|
220 |
+
characters_after_source: List[str] = FREQUENCIES[language][
|
221 |
+
character_rank_in_language:
|
222 |
+
]
|
223 |
+
characters_before: List[str] = ordered_characters[0:character_rank]
|
224 |
+
characters_after: List[str] = ordered_characters[character_rank:]
|
225 |
+
|
226 |
+
before_match_count: int = len(
|
227 |
+
set(characters_before) & set(characters_before_source)
|
228 |
+
)
|
229 |
+
|
230 |
+
after_match_count: int = len(
|
231 |
+
set(characters_after) & set(characters_after_source)
|
232 |
+
)
|
233 |
+
|
234 |
+
if len(characters_before_source) == 0 and before_match_count <= 4:
|
235 |
+
character_approved_count += 1
|
236 |
+
continue
|
237 |
+
|
238 |
+
if len(characters_after_source) == 0 and after_match_count <= 4:
|
239 |
+
character_approved_count += 1
|
240 |
+
continue
|
241 |
+
|
242 |
+
if (
|
243 |
+
before_match_count / len(characters_before_source) >= 0.4
|
244 |
+
or after_match_count / len(characters_after_source) >= 0.4
|
245 |
+
):
|
246 |
+
character_approved_count += 1
|
247 |
+
continue
|
248 |
+
|
249 |
+
return character_approved_count / len(ordered_characters)
|
250 |
+
|
251 |
+
|
252 |
+
def alpha_unicode_split(decoded_sequence: str) -> List[str]:
|
253 |
+
"""
|
254 |
+
Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
|
255 |
+
Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
|
256 |
+
One containing the latin letters and the other hebrew.
|
257 |
+
"""
|
258 |
+
layers: Dict[str, str] = {}
|
259 |
+
|
260 |
+
for character in decoded_sequence:
|
261 |
+
if character.isalpha() is False:
|
262 |
+
continue
|
263 |
+
|
264 |
+
character_range: Optional[str] = unicode_range(character)
|
265 |
+
|
266 |
+
if character_range is None:
|
267 |
+
continue
|
268 |
+
|
269 |
+
layer_target_range: Optional[str] = None
|
270 |
+
|
271 |
+
for discovered_range in layers:
|
272 |
+
if (
|
273 |
+
is_suspiciously_successive_range(discovered_range, character_range)
|
274 |
+
is False
|
275 |
+
):
|
276 |
+
layer_target_range = discovered_range
|
277 |
+
break
|
278 |
+
|
279 |
+
if layer_target_range is None:
|
280 |
+
layer_target_range = character_range
|
281 |
+
|
282 |
+
if layer_target_range not in layers:
|
283 |
+
layers[layer_target_range] = character.lower()
|
284 |
+
continue
|
285 |
+
|
286 |
+
layers[layer_target_range] += character.lower()
|
287 |
+
|
288 |
+
return list(layers.values())
|
289 |
+
|
290 |
+
|
291 |
+
def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
|
292 |
+
"""
|
293 |
+
This function merge results previously given by the function coherence_ratio.
|
294 |
+
The return type is the same as coherence_ratio.
|
295 |
+
"""
|
296 |
+
per_language_ratios: Dict[str, List[float]] = {}
|
297 |
+
for result in results:
|
298 |
+
for sub_result in result:
|
299 |
+
language, ratio = sub_result
|
300 |
+
if language not in per_language_ratios:
|
301 |
+
per_language_ratios[language] = [ratio]
|
302 |
+
continue
|
303 |
+
per_language_ratios[language].append(ratio)
|
304 |
+
|
305 |
+
merge = [
|
306 |
+
(
|
307 |
+
language,
|
308 |
+
round(
|
309 |
+
sum(per_language_ratios[language]) / len(per_language_ratios[language]),
|
310 |
+
4,
|
311 |
+
),
|
312 |
+
)
|
313 |
+
for language in per_language_ratios
|
314 |
+
]
|
315 |
+
|
316 |
+
return sorted(merge, key=lambda x: x[1], reverse=True)
|
317 |
+
|
318 |
+
|
319 |
+
def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches:
|
320 |
+
"""
|
321 |
+
We shall NOT return "English—" in CoherenceMatches because it is an alternative
|
322 |
+
of "English". This function only keeps the best match and remove the em-dash in it.
|
323 |
+
"""
|
324 |
+
index_results: Dict[str, List[float]] = dict()
|
325 |
+
|
326 |
+
for result in results:
|
327 |
+
language, ratio = result
|
328 |
+
no_em_name: str = language.replace("—", "")
|
329 |
+
|
330 |
+
if no_em_name not in index_results:
|
331 |
+
index_results[no_em_name] = []
|
332 |
+
|
333 |
+
index_results[no_em_name].append(ratio)
|
334 |
+
|
335 |
+
if any(len(index_results[e]) > 1 for e in index_results):
|
336 |
+
filtered_results: CoherenceMatches = []
|
337 |
+
|
338 |
+
for language in index_results:
|
339 |
+
filtered_results.append((language, max(index_results[language])))
|
340 |
+
|
341 |
+
return filtered_results
|
342 |
+
|
343 |
+
return results
|
344 |
+
|
345 |
+
|
346 |
+
@lru_cache(maxsize=2048)
|
347 |
+
def coherence_ratio(
|
348 |
+
decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
|
349 |
+
) -> CoherenceMatches:
|
350 |
+
"""
|
351 |
+
Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
|
352 |
+
A layer = Character extraction by alphabets/ranges.
|
353 |
+
"""
|
354 |
+
|
355 |
+
results: List[Tuple[str, float]] = []
|
356 |
+
ignore_non_latin: bool = False
|
357 |
+
|
358 |
+
sufficient_match_count: int = 0
|
359 |
+
|
360 |
+
lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
|
361 |
+
if "Latin Based" in lg_inclusion_list:
|
362 |
+
ignore_non_latin = True
|
363 |
+
lg_inclusion_list.remove("Latin Based")
|
364 |
+
|
365 |
+
for layer in alpha_unicode_split(decoded_sequence):
|
366 |
+
sequence_frequencies: TypeCounter[str] = Counter(layer)
|
367 |
+
most_common = sequence_frequencies.most_common()
|
368 |
+
|
369 |
+
character_count: int = sum(o for c, o in most_common)
|
370 |
+
|
371 |
+
if character_count <= TOO_SMALL_SEQUENCE:
|
372 |
+
continue
|
373 |
+
|
374 |
+
popular_character_ordered: List[str] = [c for c, o in most_common]
|
375 |
+
|
376 |
+
for language in lg_inclusion_list or alphabet_languages(
|
377 |
+
popular_character_ordered, ignore_non_latin
|
378 |
+
):
|
379 |
+
ratio: float = characters_popularity_compare(
|
380 |
+
language, popular_character_ordered
|
381 |
+
)
|
382 |
+
|
383 |
+
if ratio < threshold:
|
384 |
+
continue
|
385 |
+
elif ratio >= 0.8:
|
386 |
+
sufficient_match_count += 1
|
387 |
+
|
388 |
+
results.append((language, round(ratio, 4)))
|
389 |
+
|
390 |
+
if sufficient_match_count >= 3:
|
391 |
+
break
|
392 |
+
|
393 |
+
return sorted(
|
394 |
+
filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True
|
395 |
+
)
|
env-llmeval/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (16.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/charset_normalizer/models.py
ADDED
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from encodings.aliases import aliases
|
2 |
+
from hashlib import sha256
|
3 |
+
from json import dumps
|
4 |
+
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
|
5 |
+
|
6 |
+
from .constant import TOO_BIG_SEQUENCE
|
7 |
+
from .utils import iana_name, is_multi_byte_encoding, unicode_range
|
8 |
+
|
9 |
+
|
10 |
+
class CharsetMatch:
|
11 |
+
def __init__(
|
12 |
+
self,
|
13 |
+
payload: bytes,
|
14 |
+
guessed_encoding: str,
|
15 |
+
mean_mess_ratio: float,
|
16 |
+
has_sig_or_bom: bool,
|
17 |
+
languages: "CoherenceMatches",
|
18 |
+
decoded_payload: Optional[str] = None,
|
19 |
+
):
|
20 |
+
self._payload: bytes = payload
|
21 |
+
|
22 |
+
self._encoding: str = guessed_encoding
|
23 |
+
self._mean_mess_ratio: float = mean_mess_ratio
|
24 |
+
self._languages: CoherenceMatches = languages
|
25 |
+
self._has_sig_or_bom: bool = has_sig_or_bom
|
26 |
+
self._unicode_ranges: Optional[List[str]] = None
|
27 |
+
|
28 |
+
self._leaves: List[CharsetMatch] = []
|
29 |
+
self._mean_coherence_ratio: float = 0.0
|
30 |
+
|
31 |
+
self._output_payload: Optional[bytes] = None
|
32 |
+
self._output_encoding: Optional[str] = None
|
33 |
+
|
34 |
+
self._string: Optional[str] = decoded_payload
|
35 |
+
|
36 |
+
def __eq__(self, other: object) -> bool:
|
37 |
+
if not isinstance(other, CharsetMatch):
|
38 |
+
raise TypeError(
|
39 |
+
"__eq__ cannot be invoked on {} and {}.".format(
|
40 |
+
str(other.__class__), str(self.__class__)
|
41 |
+
)
|
42 |
+
)
|
43 |
+
return self.encoding == other.encoding and self.fingerprint == other.fingerprint
|
44 |
+
|
45 |
+
def __lt__(self, other: object) -> bool:
|
46 |
+
"""
|
47 |
+
Implemented to make sorted available upon CharsetMatches items.
|
48 |
+
"""
|
49 |
+
if not isinstance(other, CharsetMatch):
|
50 |
+
raise ValueError
|
51 |
+
|
52 |
+
chaos_difference: float = abs(self.chaos - other.chaos)
|
53 |
+
coherence_difference: float = abs(self.coherence - other.coherence)
|
54 |
+
|
55 |
+
# Below 1% difference --> Use Coherence
|
56 |
+
if chaos_difference < 0.01 and coherence_difference > 0.02:
|
57 |
+
return self.coherence > other.coherence
|
58 |
+
elif chaos_difference < 0.01 and coherence_difference <= 0.02:
|
59 |
+
# When having a difficult decision, use the result that decoded as many multi-byte as possible.
|
60 |
+
# preserve RAM usage!
|
61 |
+
if len(self._payload) >= TOO_BIG_SEQUENCE:
|
62 |
+
return self.chaos < other.chaos
|
63 |
+
return self.multi_byte_usage > other.multi_byte_usage
|
64 |
+
|
65 |
+
return self.chaos < other.chaos
|
66 |
+
|
67 |
+
@property
|
68 |
+
def multi_byte_usage(self) -> float:
|
69 |
+
return 1.0 - (len(str(self)) / len(self.raw))
|
70 |
+
|
71 |
+
def __str__(self) -> str:
|
72 |
+
# Lazy Str Loading
|
73 |
+
if self._string is None:
|
74 |
+
self._string = str(self._payload, self._encoding, "strict")
|
75 |
+
return self._string
|
76 |
+
|
77 |
+
def __repr__(self) -> str:
|
78 |
+
return "<CharsetMatch '{}' bytes({})>".format(self.encoding, self.fingerprint)
|
79 |
+
|
80 |
+
def add_submatch(self, other: "CharsetMatch") -> None:
|
81 |
+
if not isinstance(other, CharsetMatch) or other == self:
|
82 |
+
raise ValueError(
|
83 |
+
"Unable to add instance <{}> as a submatch of a CharsetMatch".format(
|
84 |
+
other.__class__
|
85 |
+
)
|
86 |
+
)
|
87 |
+
|
88 |
+
other._string = None # Unload RAM usage; dirty trick.
|
89 |
+
self._leaves.append(other)
|
90 |
+
|
91 |
+
@property
|
92 |
+
def encoding(self) -> str:
|
93 |
+
return self._encoding
|
94 |
+
|
95 |
+
@property
|
96 |
+
def encoding_aliases(self) -> List[str]:
|
97 |
+
"""
|
98 |
+
Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
|
99 |
+
"""
|
100 |
+
also_known_as: List[str] = []
|
101 |
+
for u, p in aliases.items():
|
102 |
+
if self.encoding == u:
|
103 |
+
also_known_as.append(p)
|
104 |
+
elif self.encoding == p:
|
105 |
+
also_known_as.append(u)
|
106 |
+
return also_known_as
|
107 |
+
|
108 |
+
@property
|
109 |
+
def bom(self) -> bool:
|
110 |
+
return self._has_sig_or_bom
|
111 |
+
|
112 |
+
@property
|
113 |
+
def byte_order_mark(self) -> bool:
|
114 |
+
return self._has_sig_or_bom
|
115 |
+
|
116 |
+
@property
|
117 |
+
def languages(self) -> List[str]:
|
118 |
+
"""
|
119 |
+
Return the complete list of possible languages found in decoded sequence.
|
120 |
+
Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
|
121 |
+
"""
|
122 |
+
return [e[0] for e in self._languages]
|
123 |
+
|
124 |
+
@property
|
125 |
+
def language(self) -> str:
|
126 |
+
"""
|
127 |
+
Most probable language found in decoded sequence. If none were detected or inferred, the property will return
|
128 |
+
"Unknown".
|
129 |
+
"""
|
130 |
+
if not self._languages:
|
131 |
+
# Trying to infer the language based on the given encoding
|
132 |
+
# Its either English or we should not pronounce ourselves in certain cases.
|
133 |
+
if "ascii" in self.could_be_from_charset:
|
134 |
+
return "English"
|
135 |
+
|
136 |
+
# doing it there to avoid circular import
|
137 |
+
from charset_normalizer.cd import encoding_languages, mb_encoding_languages
|
138 |
+
|
139 |
+
languages = (
|
140 |
+
mb_encoding_languages(self.encoding)
|
141 |
+
if is_multi_byte_encoding(self.encoding)
|
142 |
+
else encoding_languages(self.encoding)
|
143 |
+
)
|
144 |
+
|
145 |
+
if len(languages) == 0 or "Latin Based" in languages:
|
146 |
+
return "Unknown"
|
147 |
+
|
148 |
+
return languages[0]
|
149 |
+
|
150 |
+
return self._languages[0][0]
|
151 |
+
|
152 |
+
@property
|
153 |
+
def chaos(self) -> float:
|
154 |
+
return self._mean_mess_ratio
|
155 |
+
|
156 |
+
@property
|
157 |
+
def coherence(self) -> float:
|
158 |
+
if not self._languages:
|
159 |
+
return 0.0
|
160 |
+
return self._languages[0][1]
|
161 |
+
|
162 |
+
@property
|
163 |
+
def percent_chaos(self) -> float:
|
164 |
+
return round(self.chaos * 100, ndigits=3)
|
165 |
+
|
166 |
+
@property
|
167 |
+
def percent_coherence(self) -> float:
|
168 |
+
return round(self.coherence * 100, ndigits=3)
|
169 |
+
|
170 |
+
@property
|
171 |
+
def raw(self) -> bytes:
|
172 |
+
"""
|
173 |
+
Original untouched bytes.
|
174 |
+
"""
|
175 |
+
return self._payload
|
176 |
+
|
177 |
+
@property
|
178 |
+
def submatch(self) -> List["CharsetMatch"]:
|
179 |
+
return self._leaves
|
180 |
+
|
181 |
+
@property
|
182 |
+
def has_submatch(self) -> bool:
|
183 |
+
return len(self._leaves) > 0
|
184 |
+
|
185 |
+
@property
|
186 |
+
def alphabets(self) -> List[str]:
|
187 |
+
if self._unicode_ranges is not None:
|
188 |
+
return self._unicode_ranges
|
189 |
+
# list detected ranges
|
190 |
+
detected_ranges: List[Optional[str]] = [
|
191 |
+
unicode_range(char) for char in str(self)
|
192 |
+
]
|
193 |
+
# filter and sort
|
194 |
+
self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
|
195 |
+
return self._unicode_ranges
|
196 |
+
|
197 |
+
@property
|
198 |
+
def could_be_from_charset(self) -> List[str]:
|
199 |
+
"""
|
200 |
+
The complete list of encoding that output the exact SAME str result and therefore could be the originating
|
201 |
+
encoding.
|
202 |
+
This list does include the encoding available in property 'encoding'.
|
203 |
+
"""
|
204 |
+
return [self._encoding] + [m.encoding for m in self._leaves]
|
205 |
+
|
206 |
+
def output(self, encoding: str = "utf_8") -> bytes:
|
207 |
+
"""
|
208 |
+
Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
|
209 |
+
Any errors will be simply ignored by the encoder NOT replaced.
|
210 |
+
"""
|
211 |
+
if self._output_encoding is None or self._output_encoding != encoding:
|
212 |
+
self._output_encoding = encoding
|
213 |
+
self._output_payload = str(self).encode(encoding, "replace")
|
214 |
+
|
215 |
+
return self._output_payload # type: ignore
|
216 |
+
|
217 |
+
@property
|
218 |
+
def fingerprint(self) -> str:
|
219 |
+
"""
|
220 |
+
Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
|
221 |
+
"""
|
222 |
+
return sha256(self.output()).hexdigest()
|
223 |
+
|
224 |
+
|
225 |
+
class CharsetMatches:
|
226 |
+
"""
|
227 |
+
Container with every CharsetMatch items ordered by default from most probable to the less one.
|
228 |
+
Act like a list(iterable) but does not implements all related methods.
|
229 |
+
"""
|
230 |
+
|
231 |
+
def __init__(self, results: Optional[List[CharsetMatch]] = None):
|
232 |
+
self._results: List[CharsetMatch] = sorted(results) if results else []
|
233 |
+
|
234 |
+
def __iter__(self) -> Iterator[CharsetMatch]:
|
235 |
+
yield from self._results
|
236 |
+
|
237 |
+
def __getitem__(self, item: Union[int, str]) -> CharsetMatch:
|
238 |
+
"""
|
239 |
+
Retrieve a single item either by its position or encoding name (alias may be used here).
|
240 |
+
Raise KeyError upon invalid index or encoding not present in results.
|
241 |
+
"""
|
242 |
+
if isinstance(item, int):
|
243 |
+
return self._results[item]
|
244 |
+
if isinstance(item, str):
|
245 |
+
item = iana_name(item, False)
|
246 |
+
for result in self._results:
|
247 |
+
if item in result.could_be_from_charset:
|
248 |
+
return result
|
249 |
+
raise KeyError
|
250 |
+
|
251 |
+
def __len__(self) -> int:
|
252 |
+
return len(self._results)
|
253 |
+
|
254 |
+
def __bool__(self) -> bool:
|
255 |
+
return len(self._results) > 0
|
256 |
+
|
257 |
+
def append(self, item: CharsetMatch) -> None:
|
258 |
+
"""
|
259 |
+
Insert a single match. Will be inserted accordingly to preserve sort.
|
260 |
+
Can be inserted as a submatch.
|
261 |
+
"""
|
262 |
+
if not isinstance(item, CharsetMatch):
|
263 |
+
raise ValueError(
|
264 |
+
"Cannot append instance '{}' to CharsetMatches".format(
|
265 |
+
str(item.__class__)
|
266 |
+
)
|
267 |
+
)
|
268 |
+
# We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
|
269 |
+
if len(item.raw) <= TOO_BIG_SEQUENCE:
|
270 |
+
for match in self._results:
|
271 |
+
if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
|
272 |
+
match.add_submatch(item)
|
273 |
+
return
|
274 |
+
self._results.append(item)
|
275 |
+
self._results = sorted(self._results)
|
276 |
+
|
277 |
+
def best(self) -> Optional["CharsetMatch"]:
|
278 |
+
"""
|
279 |
+
Simply return the first match. Strict equivalent to matches[0].
|
280 |
+
"""
|
281 |
+
if not self._results:
|
282 |
+
return None
|
283 |
+
return self._results[0]
|
284 |
+
|
285 |
+
def first(self) -> Optional["CharsetMatch"]:
|
286 |
+
"""
|
287 |
+
Redundant method, call the method best(). Kept for BC reasons.
|
288 |
+
"""
|
289 |
+
return self.best()
|
290 |
+
|
291 |
+
|
292 |
+
CoherenceMatch = Tuple[str, float]
|
293 |
+
CoherenceMatches = List[CoherenceMatch]
|
294 |
+
|
295 |
+
|
296 |
+
class CliDetectionResult:
|
297 |
+
def __init__(
|
298 |
+
self,
|
299 |
+
path: str,
|
300 |
+
encoding: Optional[str],
|
301 |
+
encoding_aliases: List[str],
|
302 |
+
alternative_encodings: List[str],
|
303 |
+
language: str,
|
304 |
+
alphabets: List[str],
|
305 |
+
has_sig_or_bom: bool,
|
306 |
+
chaos: float,
|
307 |
+
coherence: float,
|
308 |
+
unicode_path: Optional[str],
|
309 |
+
is_preferred: bool,
|
310 |
+
):
|
311 |
+
self.path: str = path
|
312 |
+
self.unicode_path: Optional[str] = unicode_path
|
313 |
+
self.encoding: Optional[str] = encoding
|
314 |
+
self.encoding_aliases: List[str] = encoding_aliases
|
315 |
+
self.alternative_encodings: List[str] = alternative_encodings
|
316 |
+
self.language: str = language
|
317 |
+
self.alphabets: List[str] = alphabets
|
318 |
+
self.has_sig_or_bom: bool = has_sig_or_bom
|
319 |
+
self.chaos: float = chaos
|
320 |
+
self.coherence: float = coherence
|
321 |
+
self.is_preferred: bool = is_preferred
|
322 |
+
|
323 |
+
@property
|
324 |
+
def __dict__(self) -> Dict[str, Any]: # type: ignore
|
325 |
+
return {
|
326 |
+
"path": self.path,
|
327 |
+
"encoding": self.encoding,
|
328 |
+
"encoding_aliases": self.encoding_aliases,
|
329 |
+
"alternative_encodings": self.alternative_encodings,
|
330 |
+
"language": self.language,
|
331 |
+
"alphabets": self.alphabets,
|
332 |
+
"has_sig_or_bom": self.has_sig_or_bom,
|
333 |
+
"chaos": self.chaos,
|
334 |
+
"coherence": self.coherence,
|
335 |
+
"unicode_path": self.unicode_path,
|
336 |
+
"is_preferred": self.is_preferred,
|
337 |
+
}
|
338 |
+
|
339 |
+
def to_json(self) -> str:
|
340 |
+
return dumps(self.__dict__, ensure_ascii=True, indent=4)
|
env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip
|
env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/METADATA
ADDED
@@ -0,0 +1,441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: colorama
|
3 |
+
Version: 0.4.6
|
4 |
+
Summary: Cross-platform colored terminal text.
|
5 |
+
Project-URL: Homepage, https://github.com/tartley/colorama
|
6 |
+
Author-email: Jonathan Hartley <[email protected]>
|
7 |
+
License-File: LICENSE.txt
|
8 |
+
Keywords: ansi,color,colour,crossplatform,terminal,text,windows,xplatform
|
9 |
+
Classifier: Development Status :: 5 - Production/Stable
|
10 |
+
Classifier: Environment :: Console
|
11 |
+
Classifier: Intended Audience :: Developers
|
12 |
+
Classifier: License :: OSI Approved :: BSD License
|
13 |
+
Classifier: Operating System :: OS Independent
|
14 |
+
Classifier: Programming Language :: Python
|
15 |
+
Classifier: Programming Language :: Python :: 2
|
16 |
+
Classifier: Programming Language :: Python :: 2.7
|
17 |
+
Classifier: Programming Language :: Python :: 3
|
18 |
+
Classifier: Programming Language :: Python :: 3.7
|
19 |
+
Classifier: Programming Language :: Python :: 3.8
|
20 |
+
Classifier: Programming Language :: Python :: 3.9
|
21 |
+
Classifier: Programming Language :: Python :: 3.10
|
22 |
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
23 |
+
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
24 |
+
Classifier: Topic :: Terminals
|
25 |
+
Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7
|
26 |
+
Description-Content-Type: text/x-rst
|
27 |
+
|
28 |
+
.. image:: https://img.shields.io/pypi/v/colorama.svg
|
29 |
+
:target: https://pypi.org/project/colorama/
|
30 |
+
:alt: Latest Version
|
31 |
+
|
32 |
+
.. image:: https://img.shields.io/pypi/pyversions/colorama.svg
|
33 |
+
:target: https://pypi.org/project/colorama/
|
34 |
+
:alt: Supported Python versions
|
35 |
+
|
36 |
+
.. image:: https://github.com/tartley/colorama/actions/workflows/test.yml/badge.svg
|
37 |
+
:target: https://github.com/tartley/colorama/actions/workflows/test.yml
|
38 |
+
:alt: Build Status
|
39 |
+
|
40 |
+
Colorama
|
41 |
+
========
|
42 |
+
|
43 |
+
Makes ANSI escape character sequences (for producing colored terminal text and
|
44 |
+
cursor positioning) work under MS Windows.
|
45 |
+
|
46 |
+
.. |donate| image:: https://www.paypalobjects.com/en_US/i/btn/btn_donate_SM.gif
|
47 |
+
:target: https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=2MZ9D2GMLYCUJ&item_name=Colorama¤cy_code=USD
|
48 |
+
:alt: Donate with Paypal
|
49 |
+
|
50 |
+
`PyPI for releases <https://pypi.org/project/colorama/>`_ |
|
51 |
+
`Github for source <https://github.com/tartley/colorama>`_ |
|
52 |
+
`Colorama for enterprise on Tidelift <https://github.com/tartley/colorama/blob/master/ENTERPRISE.md>`_
|
53 |
+
|
54 |
+
If you find Colorama useful, please |donate| to the authors. Thank you!
|
55 |
+
|
56 |
+
Installation
|
57 |
+
------------
|
58 |
+
|
59 |
+
Tested on CPython 2.7, 3.7, 3.8, 3.9 and 3.10 and Pypy 2.7 and 3.8.
|
60 |
+
|
61 |
+
No requirements other than the standard library.
|
62 |
+
|
63 |
+
.. code-block:: bash
|
64 |
+
|
65 |
+
pip install colorama
|
66 |
+
# or
|
67 |
+
conda install -c anaconda colorama
|
68 |
+
|
69 |
+
Description
|
70 |
+
-----------
|
71 |
+
|
72 |
+
ANSI escape character sequences have long been used to produce colored terminal
|
73 |
+
text and cursor positioning on Unix and Macs. Colorama makes this work on
|
74 |
+
Windows, too, by wrapping ``stdout``, stripping ANSI sequences it finds (which
|
75 |
+
would appear as gobbledygook in the output), and converting them into the
|
76 |
+
appropriate win32 calls to modify the state of the terminal. On other platforms,
|
77 |
+
Colorama does nothing.
|
78 |
+
|
79 |
+
This has the upshot of providing a simple cross-platform API for printing
|
80 |
+
colored terminal text from Python, and has the happy side-effect that existing
|
81 |
+
applications or libraries which use ANSI sequences to produce colored output on
|
82 |
+
Linux or Macs can now also work on Windows, simply by calling
|
83 |
+
``colorama.just_fix_windows_console()`` (since v0.4.6) or ``colorama.init()``
|
84 |
+
(all versions, but may have other side-effects – see below).
|
85 |
+
|
86 |
+
An alternative approach is to install ``ansi.sys`` on Windows machines, which
|
87 |
+
provides the same behaviour for all applications running in terminals. Colorama
|
88 |
+
is intended for situations where that isn't easy (e.g., maybe your app doesn't
|
89 |
+
have an installer.)
|
90 |
+
|
91 |
+
Demo scripts in the source code repository print some colored text using
|
92 |
+
ANSI sequences. Compare their output under Gnome-terminal's built in ANSI
|
93 |
+
handling, versus on Windows Command-Prompt using Colorama:
|
94 |
+
|
95 |
+
.. image:: https://github.com/tartley/colorama/raw/master/screenshots/ubuntu-demo.png
|
96 |
+
:width: 661
|
97 |
+
:height: 357
|
98 |
+
:alt: ANSI sequences on Ubuntu under gnome-terminal.
|
99 |
+
|
100 |
+
.. image:: https://github.com/tartley/colorama/raw/master/screenshots/windows-demo.png
|
101 |
+
:width: 668
|
102 |
+
:height: 325
|
103 |
+
:alt: Same ANSI sequences on Windows, using Colorama.
|
104 |
+
|
105 |
+
These screenshots show that, on Windows, Colorama does not support ANSI 'dim
|
106 |
+
text'; it looks the same as 'normal text'.
|
107 |
+
|
108 |
+
Usage
|
109 |
+
-----
|
110 |
+
|
111 |
+
Initialisation
|
112 |
+
..............
|
113 |
+
|
114 |
+
If the only thing you want from Colorama is to get ANSI escapes to work on
|
115 |
+
Windows, then run:
|
116 |
+
|
117 |
+
.. code-block:: python
|
118 |
+
|
119 |
+
from colorama import just_fix_windows_console
|
120 |
+
just_fix_windows_console()
|
121 |
+
|
122 |
+
If you're on a recent version of Windows 10 or better, and your stdout/stderr
|
123 |
+
are pointing to a Windows console, then this will flip the magic configuration
|
124 |
+
switch to enable Windows' built-in ANSI support.
|
125 |
+
|
126 |
+
If you're on an older version of Windows, and your stdout/stderr are pointing to
|
127 |
+
a Windows console, then this will wrap ``sys.stdout`` and/or ``sys.stderr`` in a
|
128 |
+
magic file object that intercepts ANSI escape sequences and issues the
|
129 |
+
appropriate Win32 calls to emulate them.
|
130 |
+
|
131 |
+
In all other circumstances, it does nothing whatsoever. Basically the idea is
|
132 |
+
that this makes Windows act like Unix with respect to ANSI escape handling.
|
133 |
+
|
134 |
+
It's safe to call this function multiple times. It's safe to call this function
|
135 |
+
on non-Windows platforms, but it won't do anything. It's safe to call this
|
136 |
+
function when one or both of your stdout/stderr are redirected to a file – it
|
137 |
+
won't do anything to those streams.
|
138 |
+
|
139 |
+
Alternatively, you can use the older interface with more features (but also more
|
140 |
+
potential footguns):
|
141 |
+
|
142 |
+
.. code-block:: python
|
143 |
+
|
144 |
+
from colorama import init
|
145 |
+
init()
|
146 |
+
|
147 |
+
This does the same thing as ``just_fix_windows_console``, except for the
|
148 |
+
following differences:
|
149 |
+
|
150 |
+
- It's not safe to call ``init`` multiple times; you can end up with multiple
|
151 |
+
layers of wrapping and broken ANSI support.
|
152 |
+
|
153 |
+
- Colorama will apply a heuristic to guess whether stdout/stderr support ANSI,
|
154 |
+
and if it thinks they don't, then it will wrap ``sys.stdout`` and
|
155 |
+
``sys.stderr`` in a magic file object that strips out ANSI escape sequences
|
156 |
+
before printing them. This happens on all platforms, and can be convenient if
|
157 |
+
you want to write your code to emit ANSI escape sequences unconditionally, and
|
158 |
+
let Colorama decide whether they should actually be output. But note that
|
159 |
+
Colorama's heuristic is not particularly clever.
|
160 |
+
|
161 |
+
- ``init`` also accepts explicit keyword args to enable/disable various
|
162 |
+
functionality – see below.
|
163 |
+
|
164 |
+
To stop using Colorama before your program exits, simply call ``deinit()``.
|
165 |
+
This will restore ``stdout`` and ``stderr`` to their original values, so that
|
166 |
+
Colorama is disabled. To resume using Colorama again, call ``reinit()``; it is
|
167 |
+
cheaper than calling ``init()`` again (but does the same thing).
|
168 |
+
|
169 |
+
Most users should depend on ``colorama >= 0.4.6``, and use
|
170 |
+
``just_fix_windows_console``. The old ``init`` interface will be supported
|
171 |
+
indefinitely for backwards compatibility, but we don't plan to fix any issues
|
172 |
+
with it, also for backwards compatibility.
|
173 |
+
|
174 |
+
Colored Output
|
175 |
+
..............
|
176 |
+
|
177 |
+
Cross-platform printing of colored text can then be done using Colorama's
|
178 |
+
constant shorthand for ANSI escape sequences. These are deliberately
|
179 |
+
rudimentary, see below.
|
180 |
+
|
181 |
+
.. code-block:: python
|
182 |
+
|
183 |
+
from colorama import Fore, Back, Style
|
184 |
+
print(Fore.RED + 'some red text')
|
185 |
+
print(Back.GREEN + 'and with a green background')
|
186 |
+
print(Style.DIM + 'and in dim text')
|
187 |
+
print(Style.RESET_ALL)
|
188 |
+
print('back to normal now')
|
189 |
+
|
190 |
+
...or simply by manually printing ANSI sequences from your own code:
|
191 |
+
|
192 |
+
.. code-block:: python
|
193 |
+
|
194 |
+
print('\033[31m' + 'some red text')
|
195 |
+
print('\033[39m') # and reset to default color
|
196 |
+
|
197 |
+
...or, Colorama can be used in conjunction with existing ANSI libraries
|
198 |
+
such as the venerable `Termcolor <https://pypi.org/project/termcolor/>`_
|
199 |
+
the fabulous `Blessings <https://pypi.org/project/blessings/>`_,
|
200 |
+
or the incredible `_Rich <https://pypi.org/project/rich/>`_.
|
201 |
+
|
202 |
+
If you wish Colorama's Fore, Back and Style constants were more capable,
|
203 |
+
then consider using one of the above highly capable libraries to generate
|
204 |
+
colors, etc, and use Colorama just for its primary purpose: to convert
|
205 |
+
those ANSI sequences to also work on Windows:
|
206 |
+
|
207 |
+
SIMILARLY, do not send PRs adding the generation of new ANSI types to Colorama.
|
208 |
+
We are only interested in converting ANSI codes to win32 API calls, not
|
209 |
+
shortcuts like the above to generate ANSI characters.
|
210 |
+
|
211 |
+
.. code-block:: python
|
212 |
+
|
213 |
+
from colorama import just_fix_windows_console
|
214 |
+
from termcolor import colored
|
215 |
+
|
216 |
+
# use Colorama to make Termcolor work on Windows too
|
217 |
+
just_fix_windows_console()
|
218 |
+
|
219 |
+
# then use Termcolor for all colored text output
|
220 |
+
print(colored('Hello, World!', 'green', 'on_red'))
|
221 |
+
|
222 |
+
Available formatting constants are::
|
223 |
+
|
224 |
+
Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
|
225 |
+
Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
|
226 |
+
Style: DIM, NORMAL, BRIGHT, RESET_ALL
|
227 |
+
|
228 |
+
``Style.RESET_ALL`` resets foreground, background, and brightness. Colorama will
|
229 |
+
perform this reset automatically on program exit.
|
230 |
+
|
231 |
+
These are fairly well supported, but not part of the standard::
|
232 |
+
|
233 |
+
Fore: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX
|
234 |
+
Back: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX
|
235 |
+
|
236 |
+
Cursor Positioning
|
237 |
+
..................
|
238 |
+
|
239 |
+
ANSI codes to reposition the cursor are supported. See ``demos/demo06.py`` for
|
240 |
+
an example of how to generate them.
|
241 |
+
|
242 |
+
Init Keyword Args
|
243 |
+
.................
|
244 |
+
|
245 |
+
``init()`` accepts some ``**kwargs`` to override default behaviour.
|
246 |
+
|
247 |
+
init(autoreset=False):
|
248 |
+
If you find yourself repeatedly sending reset sequences to turn off color
|
249 |
+
changes at the end of every print, then ``init(autoreset=True)`` will
|
250 |
+
automate that:
|
251 |
+
|
252 |
+
.. code-block:: python
|
253 |
+
|
254 |
+
from colorama import init
|
255 |
+
init(autoreset=True)
|
256 |
+
print(Fore.RED + 'some red text')
|
257 |
+
print('automatically back to default color again')
|
258 |
+
|
259 |
+
init(strip=None):
|
260 |
+
Pass ``True`` or ``False`` to override whether ANSI codes should be
|
261 |
+
stripped from the output. The default behaviour is to strip if on Windows
|
262 |
+
or if output is redirected (not a tty).
|
263 |
+
|
264 |
+
init(convert=None):
|
265 |
+
Pass ``True`` or ``False`` to override whether to convert ANSI codes in the
|
266 |
+
output into win32 calls. The default behaviour is to convert if on Windows
|
267 |
+
and output is to a tty (terminal).
|
268 |
+
|
269 |
+
init(wrap=True):
|
270 |
+
On Windows, Colorama works by replacing ``sys.stdout`` and ``sys.stderr``
|
271 |
+
with proxy objects, which override the ``.write()`` method to do their work.
|
272 |
+
If this wrapping causes you problems, then this can be disabled by passing
|
273 |
+
``init(wrap=False)``. The default behaviour is to wrap if ``autoreset`` or
|
274 |
+
``strip`` or ``convert`` are True.
|
275 |
+
|
276 |
+
When wrapping is disabled, colored printing on non-Windows platforms will
|
277 |
+
continue to work as normal. To do cross-platform colored output, you can
|
278 |
+
use Colorama's ``AnsiToWin32`` proxy directly:
|
279 |
+
|
280 |
+
.. code-block:: python
|
281 |
+
|
282 |
+
import sys
|
283 |
+
from colorama import init, AnsiToWin32
|
284 |
+
init(wrap=False)
|
285 |
+
stream = AnsiToWin32(sys.stderr).stream
|
286 |
+
|
287 |
+
# Python 2
|
288 |
+
print >>stream, Fore.BLUE + 'blue text on stderr'
|
289 |
+
|
290 |
+
# Python 3
|
291 |
+
print(Fore.BLUE + 'blue text on stderr', file=stream)
|
292 |
+
|
293 |
+
Recognised ANSI Sequences
|
294 |
+
.........................
|
295 |
+
|
296 |
+
ANSI sequences generally take the form::
|
297 |
+
|
298 |
+
ESC [ <param> ; <param> ... <command>
|
299 |
+
|
300 |
+
Where ``<param>`` is an integer, and ``<command>`` is a single letter. Zero or
|
301 |
+
more params are passed to a ``<command>``. If no params are passed, it is
|
302 |
+
generally synonymous with passing a single zero. No spaces exist in the
|
303 |
+
sequence; they have been inserted here simply to read more easily.
|
304 |
+
|
305 |
+
The only ANSI sequences that Colorama converts into win32 calls are::
|
306 |
+
|
307 |
+
ESC [ 0 m # reset all (colors and brightness)
|
308 |
+
ESC [ 1 m # bright
|
309 |
+
ESC [ 2 m # dim (looks same as normal brightness)
|
310 |
+
ESC [ 22 m # normal brightness
|
311 |
+
|
312 |
+
# FOREGROUND:
|
313 |
+
ESC [ 30 m # black
|
314 |
+
ESC [ 31 m # red
|
315 |
+
ESC [ 32 m # green
|
316 |
+
ESC [ 33 m # yellow
|
317 |
+
ESC [ 34 m # blue
|
318 |
+
ESC [ 35 m # magenta
|
319 |
+
ESC [ 36 m # cyan
|
320 |
+
ESC [ 37 m # white
|
321 |
+
ESC [ 39 m # reset
|
322 |
+
|
323 |
+
# BACKGROUND
|
324 |
+
ESC [ 40 m # black
|
325 |
+
ESC [ 41 m # red
|
326 |
+
ESC [ 42 m # green
|
327 |
+
ESC [ 43 m # yellow
|
328 |
+
ESC [ 44 m # blue
|
329 |
+
ESC [ 45 m # magenta
|
330 |
+
ESC [ 46 m # cyan
|
331 |
+
ESC [ 47 m # white
|
332 |
+
ESC [ 49 m # reset
|
333 |
+
|
334 |
+
# cursor positioning
|
335 |
+
ESC [ y;x H # position cursor at x across, y down
|
336 |
+
ESC [ y;x f # position cursor at x across, y down
|
337 |
+
ESC [ n A # move cursor n lines up
|
338 |
+
ESC [ n B # move cursor n lines down
|
339 |
+
ESC [ n C # move cursor n characters forward
|
340 |
+
ESC [ n D # move cursor n characters backward
|
341 |
+
|
342 |
+
# clear the screen
|
343 |
+
ESC [ mode J # clear the screen
|
344 |
+
|
345 |
+
# clear the line
|
346 |
+
ESC [ mode K # clear the line
|
347 |
+
|
348 |
+
Multiple numeric params to the ``'m'`` command can be combined into a single
|
349 |
+
sequence::
|
350 |
+
|
351 |
+
ESC [ 36 ; 45 ; 1 m # bright cyan text on magenta background
|
352 |
+
|
353 |
+
All other ANSI sequences of the form ``ESC [ <param> ; <param> ... <command>``
|
354 |
+
are silently stripped from the output on Windows.
|
355 |
+
|
356 |
+
Any other form of ANSI sequence, such as single-character codes or alternative
|
357 |
+
initial characters, are not recognised or stripped. It would be cool to add
|
358 |
+
them though. Let me know if it would be useful for you, via the Issues on
|
359 |
+
GitHub.
|
360 |
+
|
361 |
+
Status & Known Problems
|
362 |
+
-----------------------
|
363 |
+
|
364 |
+
I've personally only tested it on Windows XP (CMD, Console2), Ubuntu
|
365 |
+
(gnome-terminal, xterm), and OS X.
|
366 |
+
|
367 |
+
Some valid ANSI sequences aren't recognised.
|
368 |
+
|
369 |
+
If you're hacking on the code, see `README-hacking.md`_. ESPECIALLY, see the
|
370 |
+
explanation there of why we do not want PRs that allow Colorama to generate new
|
371 |
+
types of ANSI codes.
|
372 |
+
|
373 |
+
See outstanding issues and wish-list:
|
374 |
+
https://github.com/tartley/colorama/issues
|
375 |
+
|
376 |
+
If anything doesn't work for you, or doesn't do what you expected or hoped for,
|
377 |
+
I'd love to hear about it on that issues list, would be delighted by patches,
|
378 |
+
and would be happy to grant commit access to anyone who submits a working patch
|
379 |
+
or two.
|
380 |
+
|
381 |
+
.. _README-hacking.md: README-hacking.md
|
382 |
+
|
383 |
+
License
|
384 |
+
-------
|
385 |
+
|
386 |
+
Copyright Jonathan Hartley & Arnon Yaari, 2013-2020. BSD 3-Clause license; see
|
387 |
+
LICENSE file.
|
388 |
+
|
389 |
+
Professional support
|
390 |
+
--------------------
|
391 |
+
|
392 |
+
.. |tideliftlogo| image:: https://cdn2.hubspot.net/hubfs/4008838/website/logos/logos_for_download/Tidelift_primary-shorthand-logo.png
|
393 |
+
:alt: Tidelift
|
394 |
+
:target: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme
|
395 |
+
|
396 |
+
.. list-table::
|
397 |
+
:widths: 10 100
|
398 |
+
|
399 |
+
* - |tideliftlogo|
|
400 |
+
- Professional support for colorama is available as part of the
|
401 |
+
`Tidelift Subscription`_.
|
402 |
+
Tidelift gives software development teams a single source for purchasing
|
403 |
+
and maintaining their software, with professional grade assurances from
|
404 |
+
the experts who know it best, while seamlessly integrating with existing
|
405 |
+
tools.
|
406 |
+
|
407 |
+
.. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme
|
408 |
+
|
409 |
+
Thanks
|
410 |
+
------
|
411 |
+
|
412 |
+
See the CHANGELOG for more thanks!
|
413 |
+
|
414 |
+
* Marc Schlaich (schlamar) for a ``setup.py`` fix for Python2.5.
|
415 |
+
* Marc Abramowitz, reported & fixed a crash on exit with closed ``stdout``,
|
416 |
+
providing a solution to issue #7's setuptools/distutils debate,
|
417 |
+
and other fixes.
|
418 |
+
* User 'eryksun', for guidance on correctly instantiating ``ctypes.windll``.
|
419 |
+
* Matthew McCormick for politely pointing out a longstanding crash on non-Win.
|
420 |
+
* Ben Hoyt, for a magnificent fix under 64-bit Windows.
|
421 |
+
* Jesse at Empty Square for submitting a fix for examples in the README.
|
422 |
+
* User 'jamessp', an observant documentation fix for cursor positioning.
|
423 |
+
* User 'vaal1239', Dave Mckee & Lackner Kristof for a tiny but much-needed Win7
|
424 |
+
fix.
|
425 |
+
* Julien Stuyck, for wisely suggesting Python3 compatible updates to README.
|
426 |
+
* Daniel Griffith for multiple fabulous patches.
|
427 |
+
* Oscar Lesta for a valuable fix to stop ANSI chars being sent to non-tty
|
428 |
+
output.
|
429 |
+
* Roger Binns, for many suggestions, valuable feedback, & bug reports.
|
430 |
+
* Tim Golden for thought and much appreciated feedback on the initial idea.
|
431 |
+
* User 'Zearin' for updates to the README file.
|
432 |
+
* John Szakmeister for adding support for light colors
|
433 |
+
* Charles Merriam for adding documentation to demos
|
434 |
+
* Jurko for a fix on 64-bit Windows CPython2.5 w/o ctypes
|
435 |
+
* Florian Bruhin for a fix when stdout or stderr are None
|
436 |
+
* Thomas Weininger for fixing ValueError on Windows
|
437 |
+
* Remi Rampin for better Github integration and fixes to the README file
|
438 |
+
* Simeon Visser for closing a file handle using 'with' and updating classifiers
|
439 |
+
to include Python 3.3 and 3.4
|
440 |
+
* Andy Neff for fixing RESET of LIGHT_EX colors.
|
441 |
+
* Jonathan Hartley for the initial idea and implementation.
|
env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/RECORD
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
colorama-0.4.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
2 |
+
colorama-0.4.6.dist-info/METADATA,sha256=e67SnrUMOym9sz_4TjF3vxvAV4T3aF7NyqRHHH3YEMw,17158
|
3 |
+
colorama-0.4.6.dist-info/RECORD,,
|
4 |
+
colorama-0.4.6.dist-info/WHEEL,sha256=cdcF4Fbd0FPtw2EMIOwH-3rSOTUdTCeOSXRMD1iLUb8,105
|
5 |
+
colorama-0.4.6.dist-info/licenses/LICENSE.txt,sha256=ysNcAmhuXQSlpxQL-zs25zrtSWZW6JEQLkKIhteTAxg,1491
|
6 |
+
colorama/__init__.py,sha256=wePQA4U20tKgYARySLEC047ucNX-g8pRLpYBuiHlLb8,266
|
7 |
+
colorama/__pycache__/__init__.cpython-310.pyc,,
|
8 |
+
colorama/__pycache__/ansi.cpython-310.pyc,,
|
9 |
+
colorama/__pycache__/ansitowin32.cpython-310.pyc,,
|
10 |
+
colorama/__pycache__/initialise.cpython-310.pyc,,
|
11 |
+
colorama/__pycache__/win32.cpython-310.pyc,,
|
12 |
+
colorama/__pycache__/winterm.cpython-310.pyc,,
|
13 |
+
colorama/ansi.py,sha256=Top4EeEuaQdBWdteKMEcGOTeKeF19Q-Wo_6_Cj5kOzQ,2522
|
14 |
+
colorama/ansitowin32.py,sha256=vPNYa3OZbxjbuFyaVo0Tmhmy1FZ1lKMWCnT7odXpItk,11128
|
15 |
+
colorama/initialise.py,sha256=-hIny86ClXo39ixh5iSCfUIa2f_h_bgKRDW7gqs-KLU,3325
|
16 |
+
colorama/tests/__init__.py,sha256=MkgPAEzGQd-Rq0w0PZXSX2LadRWhUECcisJY8lSrm4Q,75
|
17 |
+
colorama/tests/__pycache__/__init__.cpython-310.pyc,,
|
18 |
+
colorama/tests/__pycache__/ansi_test.cpython-310.pyc,,
|
19 |
+
colorama/tests/__pycache__/ansitowin32_test.cpython-310.pyc,,
|
20 |
+
colorama/tests/__pycache__/initialise_test.cpython-310.pyc,,
|
21 |
+
colorama/tests/__pycache__/isatty_test.cpython-310.pyc,,
|
22 |
+
colorama/tests/__pycache__/utils.cpython-310.pyc,,
|
23 |
+
colorama/tests/__pycache__/winterm_test.cpython-310.pyc,,
|
24 |
+
colorama/tests/ansi_test.py,sha256=FeViDrUINIZcr505PAxvU4AjXz1asEiALs9GXMhwRaE,2839
|
25 |
+
colorama/tests/ansitowin32_test.py,sha256=RN7AIhMJ5EqDsYaCjVo-o4u8JzDD4ukJbmevWKS70rY,10678
|
26 |
+
colorama/tests/initialise_test.py,sha256=BbPy-XfyHwJ6zKozuQOvNvQZzsx9vdb_0bYXn7hsBTc,6741
|
27 |
+
colorama/tests/isatty_test.py,sha256=Pg26LRpv0yQDB5Ac-sxgVXG7hsA1NYvapFgApZfYzZg,1866
|
28 |
+
colorama/tests/utils.py,sha256=1IIRylG39z5-dzq09R_ngufxyPZxgldNbrxKxUGwGKE,1079
|
29 |
+
colorama/tests/winterm_test.py,sha256=qoWFPEjym5gm2RuMwpf3pOis3a5r_PJZFCzK254JL8A,3709
|
30 |
+
colorama/win32.py,sha256=YQOKwMTwtGBbsY4dL5HYTvwTeP9wIQra5MvPNddpxZs,6181
|
31 |
+
colorama/winterm.py,sha256=XCQFDHjPi6AHYNdZwy0tA02H-Jh48Jp-HvCjeLeLp3U,7134
|
env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/WHEEL
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: hatchling 1.11.1
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py2-none-any
|
5 |
+
Tag: py3-none-any
|
env-llmeval/lib/python3.10/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright (c) 2010 Jonathan Hartley
|
2 |
+
All rights reserved.
|
3 |
+
|
4 |
+
Redistribution and use in source and binary forms, with or without
|
5 |
+
modification, are permitted provided that the following conditions are met:
|
6 |
+
|
7 |
+
* Redistributions of source code must retain the above copyright notice, this
|
8 |
+
list of conditions and the following disclaimer.
|
9 |
+
|
10 |
+
* Redistributions in binary form must reproduce the above copyright notice,
|
11 |
+
this list of conditions and the following disclaimer in the documentation
|
12 |
+
and/or other materials provided with the distribution.
|
13 |
+
|
14 |
+
* Neither the name of the copyright holders, nor those of its contributors
|
15 |
+
may be used to endorse or promote products derived from this software without
|
16 |
+
specific prior written permission.
|
17 |
+
|
18 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
19 |
+
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
20 |
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
21 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
22 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
23 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
24 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
25 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
26 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
27 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
env-llmeval/lib/python3.10/site-packages/jsonlines/__init__.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Module for the jsonlines data format.
|
3 |
+
"""
|
4 |
+
|
5 |
+
# expose only public api
|
6 |
+
from .jsonlines import (
|
7 |
+
Error,
|
8 |
+
InvalidLineError,
|
9 |
+
Reader,
|
10 |
+
Writer,
|
11 |
+
open,
|
12 |
+
)
|
13 |
+
|
14 |
+
__all__ = [
|
15 |
+
"Error",
|
16 |
+
"InvalidLineError",
|
17 |
+
"Reader",
|
18 |
+
"Writer",
|
19 |
+
"open",
|
20 |
+
]
|
env-llmeval/lib/python3.10/site-packages/jsonlines/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (364 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/jsonlines/__pycache__/jsonlines.cpython-310.pyc
ADDED
Binary file (17 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/jsonlines/jsonlines.py
ADDED
@@ -0,0 +1,665 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
jsonlines implementation
|
3 |
+
"""
|
4 |
+
|
5 |
+
import builtins
|
6 |
+
import codecs
|
7 |
+
import enum
|
8 |
+
import io
|
9 |
+
import json
|
10 |
+
import os
|
11 |
+
import types
|
12 |
+
import typing
|
13 |
+
from typing import (
|
14 |
+
Any,
|
15 |
+
Callable,
|
16 |
+
Dict,
|
17 |
+
Iterable,
|
18 |
+
Iterator,
|
19 |
+
List,
|
20 |
+
Literal,
|
21 |
+
Optional,
|
22 |
+
Tuple,
|
23 |
+
Type,
|
24 |
+
TypeVar,
|
25 |
+
Union,
|
26 |
+
cast,
|
27 |
+
overload,
|
28 |
+
)
|
29 |
+
|
30 |
+
import attr
|
31 |
+
|
32 |
+
orjson: Optional[types.ModuleType]
|
33 |
+
try:
|
34 |
+
import orjson
|
35 |
+
except ImportError:
|
36 |
+
orjson = None
|
37 |
+
|
38 |
+
ujson: Optional[types.ModuleType]
|
39 |
+
try:
|
40 |
+
import ujson
|
41 |
+
except ImportError:
|
42 |
+
ujson = None
|
43 |
+
|
44 |
+
|
45 |
+
VALID_TYPES = {
|
46 |
+
bool,
|
47 |
+
dict,
|
48 |
+
float,
|
49 |
+
int,
|
50 |
+
list,
|
51 |
+
str,
|
52 |
+
}
|
53 |
+
|
54 |
+
# Characters to skip at the beginning of a line. Note: at most one such
|
55 |
+
# character is skipped per line.
|
56 |
+
SKIPPABLE_SINGLE_INITIAL_CHARS = (
|
57 |
+
"\x1e", # RFC7464 text sequence
|
58 |
+
codecs.BOM_UTF8.decode(),
|
59 |
+
)
|
60 |
+
|
61 |
+
|
62 |
+
class DumpsResultConversion(enum.Enum):
|
63 |
+
LeaveAsIs = enum.auto()
|
64 |
+
EncodeToBytes = enum.auto()
|
65 |
+
DecodeToString = enum.auto()
|
66 |
+
|
67 |
+
|
68 |
+
# https://docs.python.org/3/library/functions.html#open
|
69 |
+
Openable = Union[str, bytes, int, os.PathLike]
|
70 |
+
|
71 |
+
LoadsCallable = Callable[[Union[str, bytes]], Any]
|
72 |
+
DumpsCallable = Callable[[Any], Union[str, bytes]]
|
73 |
+
|
74 |
+
# Currently, JSON structures cannot be typed properly:
|
75 |
+
# - https://github.com/python/typing/issues/182
|
76 |
+
# - https://github.com/python/mypy/issues/731
|
77 |
+
JSONCollection = Union[Dict[str, Any], List[Any]]
|
78 |
+
JSONScalar = Union[bool, float, int, str]
|
79 |
+
JSONValue = Union[JSONCollection, JSONScalar]
|
80 |
+
TJSONValue = TypeVar("TJSONValue", bound=JSONValue)
|
81 |
+
|
82 |
+
TRW = TypeVar("TRW", bound="ReaderWriterBase")
|
83 |
+
|
84 |
+
# Default to using the fastest JSON library for reading, falling back to the
|
85 |
+
# standard library (always available) if none are installed.
|
86 |
+
if orjson is not None:
|
87 |
+
default_loads = orjson.loads
|
88 |
+
elif ujson is not None:
|
89 |
+
default_loads = ujson.loads
|
90 |
+
else:
|
91 |
+
default_loads = json.loads
|
92 |
+
|
93 |
+
|
94 |
+
# For writing, use the stdlib. Other packages may be faster but their behaviour
|
95 |
+
# (supported types etc.) and output (whitespace etc.) are not the same as the
|
96 |
+
# stdlib json module, so this should be opt-in via the ‘dumps=’ arg.
|
97 |
+
def default_dumps(obj: Any) -> str:
|
98 |
+
"""
|
99 |
+
Fake ``dumps()`` function to use as a default marker.
|
100 |
+
"""
|
101 |
+
raise NotImplementedError # pragma: no cover
|
102 |
+
|
103 |
+
|
104 |
+
@attr.s(auto_exc=True, auto_attribs=True)
|
105 |
+
class Error(Exception):
|
106 |
+
"""
|
107 |
+
Base error class.
|
108 |
+
"""
|
109 |
+
|
110 |
+
message: str
|
111 |
+
|
112 |
+
|
113 |
+
@attr.s(auto_exc=True, auto_attribs=True, init=False)
|
114 |
+
class InvalidLineError(Error, ValueError):
|
115 |
+
"""
|
116 |
+
Error raised when an invalid line is encountered.
|
117 |
+
|
118 |
+
This happens when the line does not contain valid JSON, or if a
|
119 |
+
specific data type has been requested, and the line contained a
|
120 |
+
different data type.
|
121 |
+
|
122 |
+
The original line itself is stored on the exception instance as the
|
123 |
+
``.line`` attribute, and the line number as ``.lineno``.
|
124 |
+
|
125 |
+
This class subclasses both ``jsonlines.Error`` and the built-in
|
126 |
+
``ValueError``.
|
127 |
+
"""
|
128 |
+
|
129 |
+
#: The invalid line
|
130 |
+
line: Union[str, bytes]
|
131 |
+
|
132 |
+
#: The line number
|
133 |
+
lineno: int
|
134 |
+
|
135 |
+
def __init__(self, message: str, line: Union[str, bytes], lineno: int) -> None:
|
136 |
+
self.line = line.rstrip()
|
137 |
+
self.lineno = lineno
|
138 |
+
super().__init__(f"{message} (line {lineno})")
|
139 |
+
|
140 |
+
|
141 |
+
@attr.s(auto_attribs=True, repr=False)
|
142 |
+
class ReaderWriterBase:
|
143 |
+
"""
|
144 |
+
Base class with shared behaviour for both the reader and writer.
|
145 |
+
"""
|
146 |
+
|
147 |
+
_fp: Union[typing.IO[str], typing.IO[bytes], None] = attr.ib(
|
148 |
+
default=None, init=False
|
149 |
+
)
|
150 |
+
_closed: bool = attr.ib(default=False, init=False)
|
151 |
+
_should_close_fp: bool = attr.ib(default=False, init=False)
|
152 |
+
|
153 |
+
def close(self) -> None:
|
154 |
+
"""
|
155 |
+
Close this reader/writer.
|
156 |
+
|
157 |
+
This closes the underlying file if that file has been opened by
|
158 |
+
this reader/writer. When an already opened file-like object was
|
159 |
+
provided, the caller is responsible for closing it.
|
160 |
+
"""
|
161 |
+
if self._closed:
|
162 |
+
return
|
163 |
+
self._closed = True
|
164 |
+
if self._fp is not None and self._should_close_fp:
|
165 |
+
self._fp.close()
|
166 |
+
|
167 |
+
def __repr__(self) -> str:
|
168 |
+
cls_name = type(self).__name__
|
169 |
+
wrapped = self._repr_for_wrapped()
|
170 |
+
return f"<jsonlines.{cls_name} at 0x{id(self):x} wrapping {wrapped}>"
|
171 |
+
|
172 |
+
def _repr_for_wrapped(self) -> str:
|
173 |
+
raise NotImplementedError # pragma: no cover
|
174 |
+
|
175 |
+
def __enter__(self: TRW) -> TRW:
|
176 |
+
return self
|
177 |
+
|
178 |
+
def __exit__(
|
179 |
+
self,
|
180 |
+
exc_type: Optional[Type[BaseException]],
|
181 |
+
exc_val: Optional[BaseException],
|
182 |
+
exc_tb: Optional[types.TracebackType],
|
183 |
+
) -> None:
|
184 |
+
self.close()
|
185 |
+
|
186 |
+
|
187 |
+
@attr.s(auto_attribs=True, repr=False)
|
188 |
+
class Reader(ReaderWriterBase):
|
189 |
+
"""
|
190 |
+
Reader for the jsonlines format.
|
191 |
+
|
192 |
+
The first argument must be an iterable that yields JSON encoded
|
193 |
+
strings. Usually this will be a readable file-like object, such as
|
194 |
+
an open file or an ``io.TextIO`` instance, but it can also be
|
195 |
+
something else as long as it yields strings when iterated over.
|
196 |
+
|
197 |
+
Instances are iterable and can be used as a context manager.
|
198 |
+
|
199 |
+
The `loads` argument can be used to replace the standard json
|
200 |
+
decoder. If specified, it must be a callable that accepts a
|
201 |
+
(unicode) string and returns the decoded object.
|
202 |
+
|
203 |
+
:param file_or_iterable: file-like object or iterable yielding lines as
|
204 |
+
strings
|
205 |
+
:param loads: custom json decoder callable
|
206 |
+
"""
|
207 |
+
|
208 |
+
_file_or_iterable: Union[
|
209 |
+
typing.IO[str], typing.IO[bytes], Iterable[Union[str, bytes]]
|
210 |
+
]
|
211 |
+
_line_iter: Iterator[Tuple[int, Union[bytes, str]]] = attr.ib(init=False)
|
212 |
+
_loads: LoadsCallable = attr.ib(default=default_loads, kw_only=True)
|
213 |
+
|
214 |
+
def __attrs_post_init__(self) -> None:
|
215 |
+
if isinstance(self._file_or_iterable, io.IOBase):
|
216 |
+
self._fp = cast(
|
217 |
+
Union[typing.IO[str], typing.IO[bytes]],
|
218 |
+
self._file_or_iterable,
|
219 |
+
)
|
220 |
+
|
221 |
+
self._line_iter = enumerate(self._file_or_iterable, 1)
|
222 |
+
|
223 |
+
# No type specified, None not allowed
|
224 |
+
@overload
|
225 |
+
def read(
|
226 |
+
self,
|
227 |
+
*,
|
228 |
+
type: Literal[None] = ...,
|
229 |
+
allow_none: Literal[False] = ...,
|
230 |
+
skip_empty: bool = ...,
|
231 |
+
) -> JSONValue:
|
232 |
+
... # pragma: no cover
|
233 |
+
|
234 |
+
# No type specified, None allowed
|
235 |
+
@overload
|
236 |
+
def read(
|
237 |
+
self,
|
238 |
+
*,
|
239 |
+
type: Literal[None] = ...,
|
240 |
+
allow_none: Literal[True],
|
241 |
+
skip_empty: bool = ...,
|
242 |
+
) -> Optional[JSONValue]:
|
243 |
+
... # pragma: no cover
|
244 |
+
|
245 |
+
# Type specified, None not allowed
|
246 |
+
@overload
|
247 |
+
def read(
|
248 |
+
self,
|
249 |
+
*,
|
250 |
+
type: Type[TJSONValue],
|
251 |
+
allow_none: Literal[False] = ...,
|
252 |
+
skip_empty: bool = ...,
|
253 |
+
) -> TJSONValue:
|
254 |
+
... # pragma: no cover
|
255 |
+
|
256 |
+
# Type specified, None allowed
|
257 |
+
@overload
|
258 |
+
def read(
|
259 |
+
self,
|
260 |
+
*,
|
261 |
+
type: Type[TJSONValue],
|
262 |
+
allow_none: Literal[True],
|
263 |
+
skip_empty: bool = ...,
|
264 |
+
) -> Optional[TJSONValue]:
|
265 |
+
... # pragma: no cover
|
266 |
+
|
267 |
+
# Generic definition
|
268 |
+
@overload
|
269 |
+
def read(
|
270 |
+
self,
|
271 |
+
*,
|
272 |
+
type: Optional[Type[Any]] = ...,
|
273 |
+
allow_none: bool = ...,
|
274 |
+
skip_empty: bool = ...,
|
275 |
+
) -> Optional[JSONValue]:
|
276 |
+
... # pragma: no cover
|
277 |
+
|
278 |
+
def read(
|
279 |
+
self,
|
280 |
+
*,
|
281 |
+
type: Optional[Type[Any]] = None,
|
282 |
+
allow_none: bool = False,
|
283 |
+
skip_empty: bool = False,
|
284 |
+
) -> Optional[JSONValue]:
|
285 |
+
"""
|
286 |
+
Read and decode a line.
|
287 |
+
|
288 |
+
The optional `type` argument specifies the expected data type.
|
289 |
+
Supported types are ``dict``, ``list``, ``str``, ``int``,
|
290 |
+
``float``, and ``bool``. When specified, non-conforming lines
|
291 |
+
result in :py:exc:`InvalidLineError`.
|
292 |
+
|
293 |
+
By default, input lines containing ``null`` (in JSON) are
|
294 |
+
considered invalid, and will cause :py:exc:`InvalidLineError`.
|
295 |
+
The `allow_none` argument can be used to change this behaviour,
|
296 |
+
in which case ``None`` will be returned instead.
|
297 |
+
|
298 |
+
If `skip_empty` is set to ``True``, empty lines and lines
|
299 |
+
containing only whitespace are silently skipped.
|
300 |
+
"""
|
301 |
+
if self._closed:
|
302 |
+
raise RuntimeError("reader is closed")
|
303 |
+
if type is not None and type not in VALID_TYPES:
|
304 |
+
raise ValueError("invalid type specified")
|
305 |
+
|
306 |
+
try:
|
307 |
+
lineno, line = next(self._line_iter)
|
308 |
+
while skip_empty and not line.rstrip():
|
309 |
+
lineno, line = next(self._line_iter)
|
310 |
+
except StopIteration:
|
311 |
+
raise EOFError from None
|
312 |
+
|
313 |
+
if isinstance(line, bytes):
|
314 |
+
try:
|
315 |
+
line = line.decode("utf-8")
|
316 |
+
except UnicodeDecodeError as orig_exc:
|
317 |
+
exc = InvalidLineError(
|
318 |
+
f"line is not valid utf-8: {orig_exc}", line, lineno
|
319 |
+
)
|
320 |
+
raise exc from orig_exc
|
321 |
+
|
322 |
+
if line.startswith(SKIPPABLE_SINGLE_INITIAL_CHARS):
|
323 |
+
line = line[1:]
|
324 |
+
|
325 |
+
try:
|
326 |
+
value: JSONValue = self._loads(line)
|
327 |
+
except ValueError as orig_exc:
|
328 |
+
exc = InvalidLineError(
|
329 |
+
f"line contains invalid json: {orig_exc}", line, lineno
|
330 |
+
)
|
331 |
+
raise exc from orig_exc
|
332 |
+
|
333 |
+
if value is None:
|
334 |
+
if allow_none:
|
335 |
+
return None
|
336 |
+
raise InvalidLineError("line contains null value", line, lineno)
|
337 |
+
|
338 |
+
if type is not None:
|
339 |
+
valid = isinstance(value, type)
|
340 |
+
if type is int and isinstance(value, bool):
|
341 |
+
# isinstance() is not sufficient, since bool is an int subclass
|
342 |
+
valid = False
|
343 |
+
if not valid:
|
344 |
+
raise InvalidLineError(
|
345 |
+
"line does not match requested type", line, lineno
|
346 |
+
)
|
347 |
+
|
348 |
+
return value
|
349 |
+
|
350 |
+
# No type specified, None not allowed
|
351 |
+
@overload
|
352 |
+
def iter(
|
353 |
+
self,
|
354 |
+
*,
|
355 |
+
type: Literal[None] = ...,
|
356 |
+
allow_none: Literal[False] = ...,
|
357 |
+
skip_empty: bool = ...,
|
358 |
+
skip_invalid: bool = ...,
|
359 |
+
) -> Iterator[JSONValue]:
|
360 |
+
... # pragma: no cover
|
361 |
+
|
362 |
+
# No type specified, None allowed
|
363 |
+
@overload
|
364 |
+
def iter(
|
365 |
+
self,
|
366 |
+
*,
|
367 |
+
type: Literal[None] = ...,
|
368 |
+
allow_none: Literal[True],
|
369 |
+
skip_empty: bool = ...,
|
370 |
+
skip_invalid: bool = ...,
|
371 |
+
) -> Iterator[JSONValue]:
|
372 |
+
... # pragma: no cover
|
373 |
+
|
374 |
+
# Type specified, None not allowed
|
375 |
+
@overload
|
376 |
+
def iter(
|
377 |
+
self,
|
378 |
+
*,
|
379 |
+
type: Type[TJSONValue],
|
380 |
+
allow_none: Literal[False] = ...,
|
381 |
+
skip_empty: bool = ...,
|
382 |
+
skip_invalid: bool = ...,
|
383 |
+
) -> Iterator[TJSONValue]:
|
384 |
+
... # pragma: no cover
|
385 |
+
|
386 |
+
# Type specified, None allowed
|
387 |
+
@overload
|
388 |
+
def iter(
|
389 |
+
self,
|
390 |
+
*,
|
391 |
+
type: Type[TJSONValue],
|
392 |
+
allow_none: Literal[True],
|
393 |
+
skip_empty: bool = ...,
|
394 |
+
skip_invalid: bool = ...,
|
395 |
+
) -> Iterator[Optional[TJSONValue]]:
|
396 |
+
... # pragma: no cover
|
397 |
+
|
398 |
+
# Generic definition
|
399 |
+
@overload
|
400 |
+
def iter(
|
401 |
+
self,
|
402 |
+
*,
|
403 |
+
type: Optional[Type[TJSONValue]] = ...,
|
404 |
+
allow_none: bool = ...,
|
405 |
+
skip_empty: bool = ...,
|
406 |
+
skip_invalid: bool = ...,
|
407 |
+
) -> Iterator[Optional[TJSONValue]]:
|
408 |
+
... # pragma: no cover
|
409 |
+
|
410 |
+
def iter(
|
411 |
+
self,
|
412 |
+
type: Optional[Type[Any]] = None,
|
413 |
+
allow_none: bool = False,
|
414 |
+
skip_empty: bool = False,
|
415 |
+
skip_invalid: bool = False,
|
416 |
+
) -> Iterator[Optional[JSONValue]]:
|
417 |
+
"""
|
418 |
+
Iterate over all lines.
|
419 |
+
|
420 |
+
This is the iterator equivalent to repeatedly calling
|
421 |
+
:py:meth:`~Reader.read()`. If no arguments are specified, this
|
422 |
+
is the same as directly iterating over this :py:class:`Reader`
|
423 |
+
instance.
|
424 |
+
|
425 |
+
When `skip_invalid` is set to ``True``, invalid lines will be
|
426 |
+
silently ignored.
|
427 |
+
|
428 |
+
See :py:meth:`~Reader.read()` for a description of the other
|
429 |
+
arguments.
|
430 |
+
"""
|
431 |
+
try:
|
432 |
+
while True:
|
433 |
+
try:
|
434 |
+
yield self.read(
|
435 |
+
type=type, allow_none=allow_none, skip_empty=skip_empty
|
436 |
+
)
|
437 |
+
except InvalidLineError:
|
438 |
+
if not skip_invalid:
|
439 |
+
raise
|
440 |
+
except EOFError:
|
441 |
+
pass
|
442 |
+
|
443 |
+
def __iter__(self) -> Iterator[Any]:
|
444 |
+
"""
|
445 |
+
See :py:meth:`~Reader.iter()`.
|
446 |
+
"""
|
447 |
+
return self.iter()
|
448 |
+
|
449 |
+
def _repr_for_wrapped(self) -> str:
|
450 |
+
if self._fp is not None:
|
451 |
+
return repr_for_fp(self._fp)
|
452 |
+
class_name = type(self._file_or_iterable).__name__
|
453 |
+
return f"<{class_name} at 0x{id(self._file_or_iterable):x}>"
|
454 |
+
|
455 |
+
|
456 |
+
@attr.s(auto_attribs=True, repr=False)
|
457 |
+
class Writer(ReaderWriterBase):
|
458 |
+
"""
|
459 |
+
Writer for the jsonlines format.
|
460 |
+
|
461 |
+
Instances can be used as a context manager.
|
462 |
+
|
463 |
+
The `fp` argument must be a file-like object with a ``.write()``
|
464 |
+
method accepting either text (unicode) or bytes.
|
465 |
+
|
466 |
+
The `compact` argument can be used to to produce smaller output.
|
467 |
+
|
468 |
+
The `sort_keys` argument can be used to sort keys in json objects,
|
469 |
+
and will produce deterministic output.
|
470 |
+
|
471 |
+
For more control, provide a a custom encoder callable using the
|
472 |
+
`dumps` argument. The callable must produce (unicode) string output.
|
473 |
+
If specified, the `compact` and `sort` arguments will be ignored.
|
474 |
+
|
475 |
+
When the `flush` argument is set to ``True``, the writer will call
|
476 |
+
``fp.flush()`` after each written line.
|
477 |
+
|
478 |
+
:param fp: writable file-like object
|
479 |
+
:param compact: whether to use a compact output format
|
480 |
+
:param sort_keys: whether to sort object keys
|
481 |
+
:param dumps: custom encoder callable
|
482 |
+
:param flush: whether to flush the file-like object after writing each line
|
483 |
+
"""
|
484 |
+
|
485 |
+
_fp: Union[typing.IO[str], typing.IO[bytes]] = attr.ib(default=None)
|
486 |
+
_fp_is_binary: bool = attr.ib(default=False, init=False)
|
487 |
+
_compact: bool = attr.ib(default=False, kw_only=True)
|
488 |
+
_sort_keys: bool = attr.ib(default=False, kw_only=True)
|
489 |
+
_flush: bool = attr.ib(default=False, kw_only=True)
|
490 |
+
_dumps: DumpsCallable = attr.ib(default=default_dumps, kw_only=True)
|
491 |
+
_dumps_result_conversion: DumpsResultConversion = attr.ib(
|
492 |
+
default=DumpsResultConversion.LeaveAsIs, init=False
|
493 |
+
)
|
494 |
+
|
495 |
+
def __attrs_post_init__(self) -> None:
|
496 |
+
if isinstance(self._fp, io.TextIOBase):
|
497 |
+
self._fp_is_binary = False
|
498 |
+
elif isinstance(self._fp, io.IOBase):
|
499 |
+
self._fp_is_binary = True
|
500 |
+
else:
|
501 |
+
try:
|
502 |
+
self._fp.write("") # type: ignore[call-overload]
|
503 |
+
except TypeError:
|
504 |
+
self._fp_is_binary = True
|
505 |
+
else:
|
506 |
+
self._fp_is_binary = False
|
507 |
+
|
508 |
+
if self._dumps is default_dumps:
|
509 |
+
self._dumps = json.JSONEncoder(
|
510 |
+
ensure_ascii=False,
|
511 |
+
separators=(",", ":") if self._compact else (", ", ": "),
|
512 |
+
sort_keys=self._sort_keys,
|
513 |
+
).encode
|
514 |
+
|
515 |
+
# Detect if str-to-bytes conversion (or vice versa) is needed for the
|
516 |
+
# combination of this file-like object and the used dumps() callable.
|
517 |
+
# This avoids checking this for each .write(). Note that this
|
518 |
+
# deliberately does not support ‘dynamic’ return types that depend on
|
519 |
+
# input and dump options, like simplejson on Python 2 in some cases.
|
520 |
+
sample_dumps_result = self._dumps({})
|
521 |
+
if isinstance(sample_dumps_result, str) and self._fp_is_binary:
|
522 |
+
self._dumps_result_conversion = DumpsResultConversion.EncodeToBytes
|
523 |
+
elif isinstance(sample_dumps_result, bytes) and not self._fp_is_binary:
|
524 |
+
self._dumps_result_conversion = DumpsResultConversion.DecodeToString
|
525 |
+
|
526 |
+
def write(self, obj: Any) -> int:
|
527 |
+
"""
|
528 |
+
Encode and write a single object.
|
529 |
+
|
530 |
+
:param obj: the object to encode and write
|
531 |
+
:return: number of characters or bytes written
|
532 |
+
"""
|
533 |
+
if self._closed:
|
534 |
+
raise RuntimeError("writer is closed")
|
535 |
+
|
536 |
+
line = self._dumps(obj)
|
537 |
+
|
538 |
+
# This handles either str or bytes, but the type checker does not know
|
539 |
+
# that this code always passes the right type of arguments.
|
540 |
+
if self._dumps_result_conversion == DumpsResultConversion.EncodeToBytes:
|
541 |
+
line = line.encode() # type: ignore[union-attr]
|
542 |
+
elif self._dumps_result_conversion == DumpsResultConversion.DecodeToString:
|
543 |
+
line = line.decode() # type: ignore[union-attr]
|
544 |
+
|
545 |
+
fp = self._fp
|
546 |
+
fp.write(line) # type: ignore[arg-type]
|
547 |
+
fp.write(b"\n" if self._fp_is_binary else "\n") # type: ignore[call-overload]
|
548 |
+
|
549 |
+
if self._flush:
|
550 |
+
fp.flush()
|
551 |
+
|
552 |
+
return len(line) + 1 # including newline
|
553 |
+
|
554 |
+
def write_all(self, iterable: Iterable[Any]) -> int:
|
555 |
+
"""
|
556 |
+
Encode and write multiple objects.
|
557 |
+
|
558 |
+
:param iterable: an iterable of objects
|
559 |
+
:return: number of characters or bytes written
|
560 |
+
"""
|
561 |
+
return sum(self.write(obj) for obj in iterable)
|
562 |
+
|
563 |
+
def _repr_for_wrapped(self) -> str:
|
564 |
+
return repr_for_fp(self._fp)
|
565 |
+
|
566 |
+
|
567 |
+
@overload
|
568 |
+
def open(
|
569 |
+
file: Openable,
|
570 |
+
mode: Literal["r"] = ...,
|
571 |
+
*,
|
572 |
+
loads: Optional[LoadsCallable] = ...,
|
573 |
+
) -> Reader:
|
574 |
+
... # pragma: no cover
|
575 |
+
|
576 |
+
|
577 |
+
@overload
|
578 |
+
def open(
|
579 |
+
file: Openable,
|
580 |
+
mode: Literal["w", "a", "x"],
|
581 |
+
*,
|
582 |
+
dumps: Optional[DumpsCallable] = ...,
|
583 |
+
compact: Optional[bool] = ...,
|
584 |
+
sort_keys: Optional[bool] = ...,
|
585 |
+
flush: Optional[bool] = ...,
|
586 |
+
) -> Writer:
|
587 |
+
... # pragma: no cover
|
588 |
+
|
589 |
+
|
590 |
+
@overload
|
591 |
+
def open(
|
592 |
+
file: Openable,
|
593 |
+
mode: str = ...,
|
594 |
+
*,
|
595 |
+
loads: Optional[LoadsCallable] = ...,
|
596 |
+
dumps: Optional[DumpsCallable] = ...,
|
597 |
+
compact: Optional[bool] = ...,
|
598 |
+
sort_keys: Optional[bool] = ...,
|
599 |
+
flush: Optional[bool] = ...,
|
600 |
+
) -> Union[Reader, Writer]:
|
601 |
+
... # pragma: no cover
|
602 |
+
|
603 |
+
|
604 |
+
def open(
|
605 |
+
file: Openable,
|
606 |
+
mode: str = "r",
|
607 |
+
*,
|
608 |
+
loads: Optional[LoadsCallable] = None,
|
609 |
+
dumps: Optional[DumpsCallable] = None,
|
610 |
+
compact: Optional[bool] = None,
|
611 |
+
sort_keys: Optional[bool] = None,
|
612 |
+
flush: Optional[bool] = None,
|
613 |
+
) -> Union[Reader, Writer]:
|
614 |
+
"""
|
615 |
+
Open a jsonlines file for reading or writing.
|
616 |
+
|
617 |
+
This is a convenience function to open a file and wrap it in either a
|
618 |
+
:py:class:`Reader` or :py:class:`Writer` instance, depending on the
|
619 |
+
specified `mode`.
|
620 |
+
|
621 |
+
Additional keyword arguments will be passed on to the reader and writer;
|
622 |
+
see their documentation for available options.
|
623 |
+
|
624 |
+
The resulting reader or writer must be closed after use by the
|
625 |
+
caller, which will also close the opened file. This can be done by
|
626 |
+
calling ``.close()``, but the easiest way to ensure proper resource
|
627 |
+
finalisation is to use a ``with`` block (context manager), e.g.
|
628 |
+
|
629 |
+
::
|
630 |
+
|
631 |
+
with jsonlines.open('out.jsonl', mode='w') as writer:
|
632 |
+
writer.write(...)
|
633 |
+
|
634 |
+
:param file: name or ‘path-like object’ of the file to open
|
635 |
+
:param mode: whether to open the file for reading (``r``),
|
636 |
+
writing (``w``), appending (``a``), or exclusive creation (``x``).
|
637 |
+
"""
|
638 |
+
if mode not in {"r", "w", "a", "x"}:
|
639 |
+
raise ValueError("'mode' must be either 'r', 'w', 'a', or 'x'")
|
640 |
+
|
641 |
+
cls = Reader if mode == "r" else Writer
|
642 |
+
encoding = "utf-8-sig" if mode == "r" else "utf-8"
|
643 |
+
fp = builtins.open(file, mode=mode + "t", encoding=encoding)
|
644 |
+
kwargs = dict(
|
645 |
+
loads=loads,
|
646 |
+
dumps=dumps,
|
647 |
+
compact=compact,
|
648 |
+
sort_keys=sort_keys,
|
649 |
+
flush=flush,
|
650 |
+
)
|
651 |
+
kwargs = {key: value for key, value in kwargs.items() if value is not None}
|
652 |
+
instance: Union[Reader, Writer] = cls(fp, **kwargs)
|
653 |
+
instance._should_close_fp = True
|
654 |
+
return instance
|
655 |
+
|
656 |
+
|
657 |
+
def repr_for_fp(fp: typing.IO[Any]) -> str:
|
658 |
+
"""
|
659 |
+
Helper to make a useful repr() for a file-like object.
|
660 |
+
"""
|
661 |
+
name = getattr(fp, "name", None)
|
662 |
+
if name is not None:
|
663 |
+
return repr(name)
|
664 |
+
else:
|
665 |
+
return repr(fp)
|
env-llmeval/lib/python3.10/site-packages/jsonlines/py.typed
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/REQUESTED
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/WHEEL
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: bdist_wheel (0.43.0)
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py3-none-any
|
5 |
+
|
env-llmeval/lib/python3.10/site-packages/portalocker/__about__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__package_name__ = 'portalocker'
|
2 |
+
__author__ = 'Rick van Hattem'
|
3 |
+
__email__ = '[email protected]'
|
4 |
+
__version__ = '2.8.2'
|
5 |
+
__description__ = '''Wraps the portalocker recipe for easy usage'''
|
6 |
+
__url__ = 'https://github.com/WoLpH/portalocker'
|
env-llmeval/lib/python3.10/site-packages/portalocker/__init__.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from . import __about__, constants, exceptions, portalocker, utils
|
2 |
+
|
3 |
+
try: # pragma: no cover
|
4 |
+
from .redis import RedisLock
|
5 |
+
except ImportError: # pragma: no cover
|
6 |
+
RedisLock = None # type: ignore
|
7 |
+
|
8 |
+
|
9 |
+
#: The package name on Pypi
|
10 |
+
__package_name__ = __about__.__package_name__
|
11 |
+
#: Current author and maintainer, view the git history for the previous ones
|
12 |
+
__author__ = __about__.__author__
|
13 |
+
#: Current author's email address
|
14 |
+
__email__ = __about__.__email__
|
15 |
+
#: Version number
|
16 |
+
__version__ = '2.8.2'
|
17 |
+
#: Package description for Pypi
|
18 |
+
__description__ = __about__.__description__
|
19 |
+
#: Package homepage
|
20 |
+
__url__ = __about__.__url__
|
21 |
+
|
22 |
+
|
23 |
+
#: Exception thrown when the file is already locked by someone else
|
24 |
+
AlreadyLocked = exceptions.AlreadyLocked
|
25 |
+
#: Exception thrown if an error occurred during locking
|
26 |
+
LockException = exceptions.LockException
|
27 |
+
|
28 |
+
|
29 |
+
#: Lock a file. Note that this is an advisory lock on Linux/Unix systems
|
30 |
+
lock = portalocker.lock
|
31 |
+
#: Unlock a file
|
32 |
+
unlock = portalocker.unlock
|
33 |
+
|
34 |
+
#: Place an exclusive lock.
|
35 |
+
#: Only one process may hold an exclusive lock for a given file at a given
|
36 |
+
#: time.
|
37 |
+
LOCK_EX: constants.LockFlags = constants.LockFlags.EXCLUSIVE
|
38 |
+
|
39 |
+
#: Place a shared lock.
|
40 |
+
#: More than one process may hold a shared lock for a given file at a given
|
41 |
+
#: time.
|
42 |
+
LOCK_SH: constants.LockFlags = constants.LockFlags.SHARED
|
43 |
+
|
44 |
+
#: Acquire the lock in a non-blocking fashion.
|
45 |
+
LOCK_NB: constants.LockFlags = constants.LockFlags.NON_BLOCKING
|
46 |
+
|
47 |
+
#: Remove an existing lock held by this process.
|
48 |
+
LOCK_UN: constants.LockFlags = constants.LockFlags.UNBLOCK
|
49 |
+
|
50 |
+
#: Locking flags enum
|
51 |
+
LockFlags = constants.LockFlags
|
52 |
+
|
53 |
+
#: Locking utility class to automatically handle opening with timeouts and
|
54 |
+
#: context wrappers
|
55 |
+
Lock = utils.Lock
|
56 |
+
RLock = utils.RLock
|
57 |
+
BoundedSemaphore = utils.BoundedSemaphore
|
58 |
+
TemporaryFileLock = utils.TemporaryFileLock
|
59 |
+
open_atomic = utils.open_atomic
|
60 |
+
|
61 |
+
__all__ = [
|
62 |
+
'lock',
|
63 |
+
'unlock',
|
64 |
+
'LOCK_EX',
|
65 |
+
'LOCK_SH',
|
66 |
+
'LOCK_NB',
|
67 |
+
'LOCK_UN',
|
68 |
+
'LockFlags',
|
69 |
+
'LockException',
|
70 |
+
'Lock',
|
71 |
+
'RLock',
|
72 |
+
'AlreadyLocked',
|
73 |
+
'BoundedSemaphore',
|
74 |
+
'open_atomic',
|
75 |
+
'RedisLock',
|
76 |
+
]
|
env-llmeval/lib/python3.10/site-packages/portalocker/__main__.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import logging
|
3 |
+
import os
|
4 |
+
import pathlib
|
5 |
+
import re
|
6 |
+
|
7 |
+
base_path = pathlib.Path(__file__).parent.parent
|
8 |
+
src_path = base_path / 'portalocker'
|
9 |
+
dist_path = base_path / 'dist'
|
10 |
+
_default_output_path = base_path / 'dist' / 'portalocker.py'
|
11 |
+
|
12 |
+
_RELATIVE_IMPORT_RE = re.compile(r'^from \. import (?P<names>.+)$')
|
13 |
+
_USELESS_ASSIGNMENT_RE = re.compile(r'^(?P<name>\w+) = \1\n$')
|
14 |
+
|
15 |
+
_TEXT_TEMPLATE = """'''
|
16 |
+
{}
|
17 |
+
'''
|
18 |
+
|
19 |
+
"""
|
20 |
+
|
21 |
+
logger = logging.getLogger(__name__)
|
22 |
+
|
23 |
+
|
24 |
+
def main(argv=None):
|
25 |
+
parser = argparse.ArgumentParser()
|
26 |
+
|
27 |
+
subparsers = parser.add_subparsers(required=True)
|
28 |
+
combine_parser = subparsers.add_parser(
|
29 |
+
'combine',
|
30 |
+
help='Combine all Python files into a single unified `portalocker.py` '
|
31 |
+
'file for easy distribution',
|
32 |
+
)
|
33 |
+
combine_parser.add_argument(
|
34 |
+
'--output-file',
|
35 |
+
'-o',
|
36 |
+
type=argparse.FileType('w'),
|
37 |
+
default=str(_default_output_path),
|
38 |
+
)
|
39 |
+
|
40 |
+
combine_parser.set_defaults(func=combine)
|
41 |
+
args = parser.parse_args(argv)
|
42 |
+
args.func(args)
|
43 |
+
|
44 |
+
|
45 |
+
def _read_file(path, seen_files):
|
46 |
+
if path in seen_files:
|
47 |
+
return
|
48 |
+
|
49 |
+
names = set()
|
50 |
+
seen_files.add(path)
|
51 |
+
for line in path.open():
|
52 |
+
if match := _RELATIVE_IMPORT_RE.match(line):
|
53 |
+
for name in match.group('names').split(','):
|
54 |
+
name = name.strip()
|
55 |
+
names.add(name)
|
56 |
+
yield from _read_file(src_path / f'{name}.py', seen_files)
|
57 |
+
else:
|
58 |
+
yield _clean_line(line, names)
|
59 |
+
|
60 |
+
|
61 |
+
def _clean_line(line, names):
|
62 |
+
# Replace `some_import.spam` with `spam`
|
63 |
+
if names:
|
64 |
+
joined_names = '|'.join(names)
|
65 |
+
line = re.sub(fr'\b({joined_names})\.', '', line)
|
66 |
+
|
67 |
+
# Replace useless assignments (e.g. `spam = spam`)
|
68 |
+
return _USELESS_ASSIGNMENT_RE.sub('', line)
|
69 |
+
|
70 |
+
|
71 |
+
def combine(args):
|
72 |
+
output_file = args.output_file
|
73 |
+
pathlib.Path(output_file.name).parent.mkdir(parents=True, exist_ok=True)
|
74 |
+
|
75 |
+
output_file.write(
|
76 |
+
_TEXT_TEMPLATE.format((base_path / 'README.rst').read_text()),
|
77 |
+
)
|
78 |
+
output_file.write(
|
79 |
+
_TEXT_TEMPLATE.format((base_path / 'LICENSE').read_text()),
|
80 |
+
)
|
81 |
+
|
82 |
+
seen_files = set()
|
83 |
+
for line in _read_file(src_path / '__init__.py', seen_files):
|
84 |
+
output_file.write(line)
|
85 |
+
|
86 |
+
output_file.flush()
|
87 |
+
output_file.close()
|
88 |
+
|
89 |
+
logger.info(f'Wrote combined file to {output_file.name}')
|
90 |
+
# Run black and ruff if available. If not then just run the file.
|
91 |
+
os.system(f'black {output_file.name}')
|
92 |
+
os.system(f'ruff --fix {output_file.name}')
|
93 |
+
os.system(f'python3 {output_file.name}')
|
94 |
+
|
95 |
+
|
96 |
+
if __name__ == '__main__':
|
97 |
+
logging.basicConfig(level=logging.INFO)
|
98 |
+
main()
|
env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__about__.cpython-310.pyc
ADDED
Binary file (422 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (996 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/__main__.cpython-310.pyc
ADDED
Binary file (2.45 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/constants.cpython-310.pyc
ADDED
Binary file (945 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/exceptions.cpython-310.pyc
ADDED
Binary file (1.07 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/portalocker.cpython-310.pyc
ADDED
Binary file (2.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/redis.cpython-310.pyc
ADDED
Binary file (6.63 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/portalocker/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (15.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/portalocker/constants.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
Locking constants
|
3 |
+
|
4 |
+
Lock types:
|
5 |
+
|
6 |
+
- `EXCLUSIVE` exclusive lock
|
7 |
+
- `SHARED` shared lock
|
8 |
+
|
9 |
+
Lock flags:
|
10 |
+
|
11 |
+
- `NON_BLOCKING` non-blocking
|
12 |
+
|
13 |
+
Manually unlock, only needed internally
|
14 |
+
|
15 |
+
- `UNBLOCK` unlock
|
16 |
+
'''
|
17 |
+
import enum
|
18 |
+
import os
|
19 |
+
|
20 |
+
# The actual tests will execute the code anyhow so the following code can
|
21 |
+
# safely be ignored from the coverage tests
|
22 |
+
if os.name == 'nt': # pragma: no cover
|
23 |
+
import msvcrt
|
24 |
+
|
25 |
+
#: exclusive lock
|
26 |
+
LOCK_EX = 0x1
|
27 |
+
#: shared lock
|
28 |
+
LOCK_SH = 0x2
|
29 |
+
#: non-blocking
|
30 |
+
LOCK_NB = 0x4
|
31 |
+
#: unlock
|
32 |
+
LOCK_UN = msvcrt.LK_UNLCK # type: ignore
|
33 |
+
|
34 |
+
elif os.name == 'posix': # pragma: no cover
|
35 |
+
import fcntl
|
36 |
+
|
37 |
+
#: exclusive lock
|
38 |
+
LOCK_EX = fcntl.LOCK_EX
|
39 |
+
#: shared lock
|
40 |
+
LOCK_SH = fcntl.LOCK_SH
|
41 |
+
#: non-blocking
|
42 |
+
LOCK_NB = fcntl.LOCK_NB
|
43 |
+
#: unlock
|
44 |
+
LOCK_UN = fcntl.LOCK_UN
|
45 |
+
|
46 |
+
else: # pragma: no cover
|
47 |
+
raise RuntimeError('PortaLocker only defined for nt and posix platforms')
|
48 |
+
|
49 |
+
|
50 |
+
class LockFlags(enum.IntFlag):
|
51 |
+
#: exclusive lock
|
52 |
+
EXCLUSIVE = LOCK_EX
|
53 |
+
#: shared lock
|
54 |
+
SHARED = LOCK_SH
|
55 |
+
#: non-blocking
|
56 |
+
NON_BLOCKING = LOCK_NB
|
57 |
+
#: unlock
|
58 |
+
UNBLOCK = LOCK_UN
|
env-llmeval/lib/python3.10/site-packages/portalocker/exceptions.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import typing
|
2 |
+
|
3 |
+
|
4 |
+
class BaseLockException(Exception): # noqa: N818
|
5 |
+
# Error codes:
|
6 |
+
LOCK_FAILED = 1
|
7 |
+
|
8 |
+
def __init__(
|
9 |
+
self,
|
10 |
+
*args: typing.Any,
|
11 |
+
fh: typing.Union[typing.IO, None, int] = None,
|
12 |
+
**kwargs: typing.Any,
|
13 |
+
) -> None:
|
14 |
+
self.fh = fh
|
15 |
+
Exception.__init__(self, *args)
|
16 |
+
|
17 |
+
|
18 |
+
class LockException(BaseLockException):
|
19 |
+
pass
|
20 |
+
|
21 |
+
|
22 |
+
class AlreadyLocked(LockException):
|
23 |
+
pass
|
24 |
+
|
25 |
+
|
26 |
+
class FileToLarge(LockException):
|
27 |
+
pass
|
env-llmeval/lib/python3.10/site-packages/portalocker/portalocker.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import contextlib
|
2 |
+
import os
|
3 |
+
import typing
|
4 |
+
|
5 |
+
from . import constants, exceptions
|
6 |
+
|
7 |
+
# Alias for readability. Due to import recursion issues we cannot do:
|
8 |
+
# from .constants import LockFlags
|
9 |
+
LockFlags = constants.LockFlags
|
10 |
+
|
11 |
+
|
12 |
+
if os.name == 'nt': # pragma: no cover
|
13 |
+
import msvcrt
|
14 |
+
|
15 |
+
import pywintypes
|
16 |
+
import win32con
|
17 |
+
import win32file
|
18 |
+
import winerror
|
19 |
+
|
20 |
+
__overlapped = pywintypes.OVERLAPPED()
|
21 |
+
|
22 |
+
def lock(file_: typing.Union[typing.IO, int], flags: LockFlags):
|
23 |
+
# Windows locking does not support locking through `fh.fileno()` so
|
24 |
+
# we cast it to make mypy and pyright happy
|
25 |
+
file_ = typing.cast(typing.IO, file_)
|
26 |
+
|
27 |
+
mode = 0
|
28 |
+
if flags & LockFlags.NON_BLOCKING:
|
29 |
+
mode |= win32con.LOCKFILE_FAIL_IMMEDIATELY
|
30 |
+
|
31 |
+
if flags & LockFlags.EXCLUSIVE:
|
32 |
+
mode |= win32con.LOCKFILE_EXCLUSIVE_LOCK
|
33 |
+
|
34 |
+
# Save the old position so we can go back to that position but
|
35 |
+
# still lock from the beginning of the file
|
36 |
+
savepos = file_.tell()
|
37 |
+
if savepos:
|
38 |
+
file_.seek(0)
|
39 |
+
|
40 |
+
os_fh = msvcrt.get_osfhandle(file_.fileno()) # type: ignore
|
41 |
+
try:
|
42 |
+
win32file.LockFileEx(os_fh, mode, 0, -0x10000, __overlapped)
|
43 |
+
except pywintypes.error as exc_value:
|
44 |
+
# error: (33, 'LockFileEx', 'The process cannot access the file
|
45 |
+
# because another process has locked a portion of the file.')
|
46 |
+
if exc_value.winerror == winerror.ERROR_LOCK_VIOLATION:
|
47 |
+
raise exceptions.AlreadyLocked(
|
48 |
+
exceptions.LockException.LOCK_FAILED,
|
49 |
+
exc_value.strerror,
|
50 |
+
fh=file_,
|
51 |
+
) from exc_value
|
52 |
+
else:
|
53 |
+
# Q: Are there exceptions/codes we should be dealing with
|
54 |
+
# here?
|
55 |
+
raise
|
56 |
+
finally:
|
57 |
+
if savepos:
|
58 |
+
file_.seek(savepos)
|
59 |
+
|
60 |
+
def unlock(file_: typing.IO):
|
61 |
+
try:
|
62 |
+
savepos = file_.tell()
|
63 |
+
if savepos:
|
64 |
+
file_.seek(0)
|
65 |
+
|
66 |
+
os_fh = msvcrt.get_osfhandle(file_.fileno()) # type: ignore
|
67 |
+
try:
|
68 |
+
win32file.UnlockFileEx(
|
69 |
+
os_fh,
|
70 |
+
0,
|
71 |
+
-0x10000,
|
72 |
+
__overlapped,
|
73 |
+
)
|
74 |
+
except pywintypes.error as exc:
|
75 |
+
if exc.winerror != winerror.ERROR_NOT_LOCKED:
|
76 |
+
# Q: Are there exceptions/codes we should be
|
77 |
+
# dealing with here?
|
78 |
+
raise
|
79 |
+
finally:
|
80 |
+
if savepos:
|
81 |
+
file_.seek(savepos)
|
82 |
+
except OSError as exc:
|
83 |
+
raise exceptions.LockException(
|
84 |
+
exceptions.LockException.LOCK_FAILED,
|
85 |
+
exc.strerror,
|
86 |
+
fh=file_,
|
87 |
+
) from exc
|
88 |
+
|
89 |
+
elif os.name == 'posix': # pragma: no cover
|
90 |
+
import fcntl
|
91 |
+
|
92 |
+
def lock(file_: typing.Union[typing.IO, int], flags: LockFlags):
|
93 |
+
locking_exceptions = (IOError,)
|
94 |
+
with contextlib.suppress(NameError):
|
95 |
+
locking_exceptions += (BlockingIOError,) # type: ignore
|
96 |
+
# Locking with NON_BLOCKING without EXCLUSIVE or SHARED enabled results
|
97 |
+
# in an error
|
98 |
+
if (flags & LockFlags.NON_BLOCKING) and not flags & (
|
99 |
+
LockFlags.SHARED | LockFlags.EXCLUSIVE
|
100 |
+
):
|
101 |
+
raise RuntimeError(
|
102 |
+
'When locking in non-blocking mode the SHARED '
|
103 |
+
'or EXCLUSIVE flag must be specified as well',
|
104 |
+
)
|
105 |
+
|
106 |
+
try:
|
107 |
+
fcntl.flock(file_, flags)
|
108 |
+
except locking_exceptions as exc_value:
|
109 |
+
# The exception code varies on different systems so we'll catch
|
110 |
+
# every IO error
|
111 |
+
raise exceptions.LockException(exc_value, fh=file_) from exc_value
|
112 |
+
|
113 |
+
def unlock(file_: typing.IO):
|
114 |
+
fcntl.flock(file_.fileno(), LockFlags.UNBLOCK)
|
115 |
+
|
116 |
+
else: # pragma: no cover
|
117 |
+
raise RuntimeError('PortaLocker only defined for nt and posix platforms')
|
env-llmeval/lib/python3.10/site-packages/portalocker/py.typed
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/portalocker/redis.py
ADDED
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import _thread
|
2 |
+
import json
|
3 |
+
import logging
|
4 |
+
import random
|
5 |
+
import time
|
6 |
+
import typing
|
7 |
+
|
8 |
+
from redis import client
|
9 |
+
|
10 |
+
from . import exceptions, utils
|
11 |
+
|
12 |
+
logger = logging.getLogger(__name__)
|
13 |
+
|
14 |
+
DEFAULT_UNAVAILABLE_TIMEOUT = 1
|
15 |
+
DEFAULT_THREAD_SLEEP_TIME = 0.1
|
16 |
+
|
17 |
+
|
18 |
+
class PubSubWorkerThread(client.PubSubWorkerThread): # type: ignore
|
19 |
+
def run(self):
|
20 |
+
try:
|
21 |
+
super().run()
|
22 |
+
except Exception: # pragma: no cover
|
23 |
+
_thread.interrupt_main()
|
24 |
+
raise
|
25 |
+
|
26 |
+
|
27 |
+
class RedisLock(utils.LockBase):
|
28 |
+
'''
|
29 |
+
An extremely reliable Redis lock based on pubsub with a keep-alive thread
|
30 |
+
|
31 |
+
As opposed to most Redis locking systems based on key/value pairs,
|
32 |
+
this locking method is based on the pubsub system. The big advantage is
|
33 |
+
that if the connection gets killed due to network issues, crashing
|
34 |
+
processes or otherwise, it will still immediately unlock instead of
|
35 |
+
waiting for a lock timeout.
|
36 |
+
|
37 |
+
To make sure both sides of the lock know about the connection state it is
|
38 |
+
recommended to set the `health_check_interval` when creating the redis
|
39 |
+
connection..
|
40 |
+
|
41 |
+
Args:
|
42 |
+
channel: the redis channel to use as locking key.
|
43 |
+
connection: an optional redis connection if you already have one
|
44 |
+
or if you need to specify the redis connection
|
45 |
+
timeout: timeout when trying to acquire a lock
|
46 |
+
check_interval: check interval while waiting
|
47 |
+
fail_when_locked: after the initial lock failed, return an error
|
48 |
+
or lock the file. This does not wait for the timeout.
|
49 |
+
thread_sleep_time: sleep time between fetching messages from redis to
|
50 |
+
prevent a busy/wait loop. In the case of lock conflicts this
|
51 |
+
increases the time it takes to resolve the conflict. This should
|
52 |
+
be smaller than the `check_interval` to be useful.
|
53 |
+
unavailable_timeout: If the conflicting lock is properly connected
|
54 |
+
this should never exceed twice your redis latency. Note that this
|
55 |
+
will increase the wait time possibly beyond your `timeout` and is
|
56 |
+
always executed if a conflict arises.
|
57 |
+
redis_kwargs: The redis connection arguments if no connection is
|
58 |
+
given. The `DEFAULT_REDIS_KWARGS` are used as default, if you want
|
59 |
+
to override these you need to explicitly specify a value (e.g.
|
60 |
+
`health_check_interval=0`)
|
61 |
+
|
62 |
+
'''
|
63 |
+
|
64 |
+
redis_kwargs: typing.Dict[str, typing.Any]
|
65 |
+
thread: typing.Optional[PubSubWorkerThread]
|
66 |
+
channel: str
|
67 |
+
timeout: float
|
68 |
+
connection: typing.Optional[client.Redis]
|
69 |
+
pubsub: typing.Optional[client.PubSub] = None
|
70 |
+
close_connection: bool
|
71 |
+
|
72 |
+
DEFAULT_REDIS_KWARGS: typing.ClassVar[typing.Dict[str, typing.Any]] = dict(
|
73 |
+
health_check_interval=10,
|
74 |
+
)
|
75 |
+
|
76 |
+
def __init__(
|
77 |
+
self,
|
78 |
+
channel: str,
|
79 |
+
connection: typing.Optional[client.Redis] = None,
|
80 |
+
timeout: typing.Optional[float] = None,
|
81 |
+
check_interval: typing.Optional[float] = None,
|
82 |
+
fail_when_locked: typing.Optional[bool] = False,
|
83 |
+
thread_sleep_time: float = DEFAULT_THREAD_SLEEP_TIME,
|
84 |
+
unavailable_timeout: float = DEFAULT_UNAVAILABLE_TIMEOUT,
|
85 |
+
redis_kwargs: typing.Optional[typing.Dict] = None,
|
86 |
+
):
|
87 |
+
# We don't want to close connections given as an argument
|
88 |
+
self.close_connection = not connection
|
89 |
+
|
90 |
+
self.thread = None
|
91 |
+
self.channel = channel
|
92 |
+
self.connection = connection
|
93 |
+
self.thread_sleep_time = thread_sleep_time
|
94 |
+
self.unavailable_timeout = unavailable_timeout
|
95 |
+
self.redis_kwargs = redis_kwargs or dict()
|
96 |
+
|
97 |
+
for key, value in self.DEFAULT_REDIS_KWARGS.items():
|
98 |
+
self.redis_kwargs.setdefault(key, value)
|
99 |
+
|
100 |
+
super().__init__(
|
101 |
+
timeout=timeout,
|
102 |
+
check_interval=check_interval,
|
103 |
+
fail_when_locked=fail_when_locked,
|
104 |
+
)
|
105 |
+
|
106 |
+
def get_connection(self) -> client.Redis:
|
107 |
+
if not self.connection:
|
108 |
+
self.connection = client.Redis(**self.redis_kwargs)
|
109 |
+
|
110 |
+
return self.connection
|
111 |
+
|
112 |
+
def channel_handler(self, message):
|
113 |
+
if message.get('type') != 'message': # pragma: no cover
|
114 |
+
return
|
115 |
+
|
116 |
+
try:
|
117 |
+
data = json.loads(message.get('data'))
|
118 |
+
except TypeError: # pragma: no cover
|
119 |
+
logger.debug('TypeError while parsing: %r', message)
|
120 |
+
return
|
121 |
+
|
122 |
+
assert self.connection is not None
|
123 |
+
self.connection.publish(data['response_channel'], str(time.time()))
|
124 |
+
|
125 |
+
@property
|
126 |
+
def client_name(self):
|
127 |
+
return f'{self.channel}-lock'
|
128 |
+
|
129 |
+
def acquire(
|
130 |
+
self,
|
131 |
+
timeout: typing.Optional[float] = None,
|
132 |
+
check_interval: typing.Optional[float] = None,
|
133 |
+
fail_when_locked: typing.Optional[bool] = None,
|
134 |
+
):
|
135 |
+
timeout = utils.coalesce(timeout, self.timeout, 0.0)
|
136 |
+
check_interval = utils.coalesce(
|
137 |
+
check_interval,
|
138 |
+
self.check_interval,
|
139 |
+
0.0,
|
140 |
+
)
|
141 |
+
fail_when_locked = utils.coalesce(
|
142 |
+
fail_when_locked,
|
143 |
+
self.fail_when_locked,
|
144 |
+
)
|
145 |
+
|
146 |
+
assert not self.pubsub, 'This lock is already active'
|
147 |
+
connection = self.get_connection()
|
148 |
+
|
149 |
+
timeout_generator = self._timeout_generator(timeout, check_interval)
|
150 |
+
for _ in timeout_generator: # pragma: no branch
|
151 |
+
subscribers = connection.pubsub_numsub(self.channel)[0][1]
|
152 |
+
|
153 |
+
if subscribers:
|
154 |
+
logger.debug(
|
155 |
+
'Found %d lock subscribers for %s',
|
156 |
+
subscribers,
|
157 |
+
self.channel,
|
158 |
+
)
|
159 |
+
|
160 |
+
if self.check_or_kill_lock(
|
161 |
+
connection,
|
162 |
+
self.unavailable_timeout,
|
163 |
+
): # pragma: no branch
|
164 |
+
continue
|
165 |
+
else: # pragma: no cover
|
166 |
+
subscribers = 0
|
167 |
+
|
168 |
+
# Note: this should not be changed to an elif because the if
|
169 |
+
# above can still end up here
|
170 |
+
if not subscribers:
|
171 |
+
connection.client_setname(self.client_name)
|
172 |
+
self.pubsub = connection.pubsub()
|
173 |
+
self.pubsub.subscribe(**{self.channel: self.channel_handler})
|
174 |
+
self.thread = PubSubWorkerThread(
|
175 |
+
self.pubsub,
|
176 |
+
sleep_time=self.thread_sleep_time,
|
177 |
+
)
|
178 |
+
self.thread.start()
|
179 |
+
|
180 |
+
subscribers = connection.pubsub_numsub(self.channel)[0][1]
|
181 |
+
if subscribers == 1: # pragma: no branch
|
182 |
+
return self
|
183 |
+
else: # pragma: no cover
|
184 |
+
# Race condition, let's try again
|
185 |
+
self.release()
|
186 |
+
|
187 |
+
if fail_when_locked: # pragma: no cover
|
188 |
+
raise exceptions.AlreadyLocked(exceptions)
|
189 |
+
|
190 |
+
raise exceptions.AlreadyLocked(exceptions)
|
191 |
+
|
192 |
+
def check_or_kill_lock(self, connection, timeout):
|
193 |
+
# Random channel name to get messages back from the lock
|
194 |
+
response_channel = f'{self.channel}-{random.random()}'
|
195 |
+
|
196 |
+
pubsub = connection.pubsub()
|
197 |
+
pubsub.subscribe(response_channel)
|
198 |
+
connection.publish(
|
199 |
+
self.channel,
|
200 |
+
json.dumps(
|
201 |
+
dict(
|
202 |
+
response_channel=response_channel,
|
203 |
+
message='ping',
|
204 |
+
),
|
205 |
+
),
|
206 |
+
)
|
207 |
+
|
208 |
+
check_interval = min(self.thread_sleep_time, timeout / 10)
|
209 |
+
for _ in self._timeout_generator(
|
210 |
+
timeout,
|
211 |
+
check_interval,
|
212 |
+
): # pragma: no branch
|
213 |
+
if pubsub.get_message(timeout=check_interval):
|
214 |
+
pubsub.close()
|
215 |
+
return True
|
216 |
+
|
217 |
+
for client_ in connection.client_list('pubsub'): # pragma: no cover
|
218 |
+
if client_.get('name') == self.client_name:
|
219 |
+
logger.warning('Killing unavailable redis client: %r', client_)
|
220 |
+
connection.client_kill_filter(client_.get('id'))
|
221 |
+
return None
|
222 |
+
|
223 |
+
def release(self):
|
224 |
+
if self.thread: # pragma: no branch
|
225 |
+
self.thread.stop()
|
226 |
+
self.thread.join()
|
227 |
+
self.thread = None
|
228 |
+
time.sleep(0.01)
|
229 |
+
|
230 |
+
if self.pubsub: # pragma: no branch
|
231 |
+
self.pubsub.unsubscribe(self.channel)
|
232 |
+
self.pubsub.close()
|
233 |
+
self.pubsub = None
|
234 |
+
|
235 |
+
def __del__(self):
|
236 |
+
self.release()
|
env-llmeval/lib/python3.10/site-packages/portalocker/utils.py
ADDED
@@ -0,0 +1,563 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import abc
|
2 |
+
import atexit
|
3 |
+
import contextlib
|
4 |
+
import logging
|
5 |
+
import os
|
6 |
+
import pathlib
|
7 |
+
import random
|
8 |
+
import tempfile
|
9 |
+
import time
|
10 |
+
import typing
|
11 |
+
import warnings
|
12 |
+
|
13 |
+
from . import constants, exceptions, portalocker
|
14 |
+
|
15 |
+
logger = logging.getLogger(__name__)
|
16 |
+
|
17 |
+
DEFAULT_TIMEOUT = 5
|
18 |
+
DEFAULT_CHECK_INTERVAL = 0.25
|
19 |
+
DEFAULT_FAIL_WHEN_LOCKED = False
|
20 |
+
LOCK_METHOD = constants.LockFlags.EXCLUSIVE | constants.LockFlags.NON_BLOCKING
|
21 |
+
|
22 |
+
__all__ = [
|
23 |
+
'Lock',
|
24 |
+
'open_atomic',
|
25 |
+
]
|
26 |
+
|
27 |
+
Filename = typing.Union[str, pathlib.Path]
|
28 |
+
|
29 |
+
|
30 |
+
def coalesce(*args: typing.Any, test_value: typing.Any = None) -> typing.Any:
|
31 |
+
'''Simple coalescing function that returns the first value that is not
|
32 |
+
equal to the `test_value`. Or `None` if no value is valid. Usually this
|
33 |
+
means that the last given value is the default value.
|
34 |
+
|
35 |
+
Note that the `test_value` is compared using an identity check
|
36 |
+
(i.e. `value is not test_value`) so changing the `test_value` won't work
|
37 |
+
for all values.
|
38 |
+
|
39 |
+
>>> coalesce(None, 1)
|
40 |
+
1
|
41 |
+
>>> coalesce()
|
42 |
+
|
43 |
+
>>> coalesce(0, False, True)
|
44 |
+
0
|
45 |
+
>>> coalesce(0, False, True, test_value=0)
|
46 |
+
False
|
47 |
+
|
48 |
+
# This won't work because of the `is not test_value` type testing:
|
49 |
+
>>> coalesce([], dict(spam='eggs'), test_value=[])
|
50 |
+
[]
|
51 |
+
'''
|
52 |
+
return next((arg for arg in args if arg is not test_value), None)
|
53 |
+
|
54 |
+
|
55 |
+
@contextlib.contextmanager
|
56 |
+
def open_atomic(
|
57 |
+
filename: Filename,
|
58 |
+
binary: bool = True,
|
59 |
+
) -> typing.Iterator[typing.IO]:
|
60 |
+
'''Open a file for atomic writing. Instead of locking this method allows
|
61 |
+
you to write the entire file and move it to the actual location. Note that
|
62 |
+
this makes the assumption that a rename is atomic on your platform which
|
63 |
+
is generally the case but not a guarantee.
|
64 |
+
|
65 |
+
http://docs.python.org/library/os.html#os.rename
|
66 |
+
|
67 |
+
>>> filename = 'test_file.txt'
|
68 |
+
>>> if os.path.exists(filename):
|
69 |
+
... os.remove(filename)
|
70 |
+
|
71 |
+
>>> with open_atomic(filename) as fh:
|
72 |
+
... written = fh.write(b'test')
|
73 |
+
>>> assert os.path.exists(filename)
|
74 |
+
>>> os.remove(filename)
|
75 |
+
|
76 |
+
>>> import pathlib
|
77 |
+
>>> path_filename = pathlib.Path('test_file.txt')
|
78 |
+
|
79 |
+
>>> with open_atomic(path_filename) as fh:
|
80 |
+
... written = fh.write(b'test')
|
81 |
+
>>> assert path_filename.exists()
|
82 |
+
>>> path_filename.unlink()
|
83 |
+
'''
|
84 |
+
# `pathlib.Path` cast in case `path` is a `str`
|
85 |
+
path: pathlib.Path = pathlib.Path(filename)
|
86 |
+
|
87 |
+
assert not path.exists(), '%r exists' % path
|
88 |
+
|
89 |
+
# Create the parent directory if it doesn't exist
|
90 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
91 |
+
|
92 |
+
temp_fh = tempfile.NamedTemporaryFile(
|
93 |
+
mode=binary and 'wb' or 'w',
|
94 |
+
dir=str(path.parent),
|
95 |
+
delete=False,
|
96 |
+
)
|
97 |
+
yield temp_fh
|
98 |
+
temp_fh.flush()
|
99 |
+
os.fsync(temp_fh.fileno())
|
100 |
+
temp_fh.close()
|
101 |
+
try:
|
102 |
+
os.rename(temp_fh.name, path)
|
103 |
+
finally:
|
104 |
+
with contextlib.suppress(Exception):
|
105 |
+
os.remove(temp_fh.name)
|
106 |
+
|
107 |
+
|
108 |
+
class LockBase(abc.ABC): # pragma: no cover
|
109 |
+
#: timeout when trying to acquire a lock
|
110 |
+
timeout: float
|
111 |
+
#: check interval while waiting for `timeout`
|
112 |
+
check_interval: float
|
113 |
+
#: skip the timeout and immediately fail if the initial lock fails
|
114 |
+
fail_when_locked: bool
|
115 |
+
|
116 |
+
def __init__(
|
117 |
+
self,
|
118 |
+
timeout: typing.Optional[float] = None,
|
119 |
+
check_interval: typing.Optional[float] = None,
|
120 |
+
fail_when_locked: typing.Optional[bool] = None,
|
121 |
+
):
|
122 |
+
self.timeout = coalesce(timeout, DEFAULT_TIMEOUT)
|
123 |
+
self.check_interval = coalesce(check_interval, DEFAULT_CHECK_INTERVAL)
|
124 |
+
self.fail_when_locked = coalesce(
|
125 |
+
fail_when_locked,
|
126 |
+
DEFAULT_FAIL_WHEN_LOCKED,
|
127 |
+
)
|
128 |
+
|
129 |
+
@abc.abstractmethod
|
130 |
+
def acquire(
|
131 |
+
self,
|
132 |
+
timeout: typing.Optional[float] = None,
|
133 |
+
check_interval: typing.Optional[float] = None,
|
134 |
+
fail_when_locked: typing.Optional[bool] = None,
|
135 |
+
):
|
136 |
+
return NotImplemented
|
137 |
+
|
138 |
+
def _timeout_generator(
|
139 |
+
self,
|
140 |
+
timeout: typing.Optional[float],
|
141 |
+
check_interval: typing.Optional[float],
|
142 |
+
) -> typing.Iterator[int]:
|
143 |
+
f_timeout = coalesce(timeout, self.timeout, 0.0)
|
144 |
+
f_check_interval = coalesce(check_interval, self.check_interval, 0.0)
|
145 |
+
|
146 |
+
yield 0
|
147 |
+
i = 0
|
148 |
+
|
149 |
+
start_time = time.perf_counter()
|
150 |
+
while start_time + f_timeout > time.perf_counter():
|
151 |
+
i += 1
|
152 |
+
yield i
|
153 |
+
|
154 |
+
# Take low lock checks into account to stay within the interval
|
155 |
+
since_start_time = time.perf_counter() - start_time
|
156 |
+
time.sleep(max(0.001, (i * f_check_interval) - since_start_time))
|
157 |
+
|
158 |
+
@abc.abstractmethod
|
159 |
+
def release(self):
|
160 |
+
return NotImplemented
|
161 |
+
|
162 |
+
def __enter__(self):
|
163 |
+
return self.acquire()
|
164 |
+
|
165 |
+
def __exit__(
|
166 |
+
self,
|
167 |
+
exc_type: typing.Optional[typing.Type[BaseException]],
|
168 |
+
exc_value: typing.Optional[BaseException],
|
169 |
+
traceback: typing.Any, # Should be typing.TracebackType
|
170 |
+
) -> typing.Optional[bool]:
|
171 |
+
self.release()
|
172 |
+
return None
|
173 |
+
|
174 |
+
def __delete__(self, instance):
|
175 |
+
instance.release()
|
176 |
+
|
177 |
+
|
178 |
+
class Lock(LockBase):
|
179 |
+
'''Lock manager with built-in timeout
|
180 |
+
|
181 |
+
Args:
|
182 |
+
filename: filename
|
183 |
+
mode: the open mode, 'a' or 'ab' should be used for writing. When mode
|
184 |
+
contains `w` the file will be truncated to 0 bytes.
|
185 |
+
timeout: timeout when trying to acquire a lock
|
186 |
+
check_interval: check interval while waiting
|
187 |
+
fail_when_locked: after the initial lock failed, return an error
|
188 |
+
or lock the file. This does not wait for the timeout.
|
189 |
+
**file_open_kwargs: The kwargs for the `open(...)` call
|
190 |
+
|
191 |
+
fail_when_locked is useful when multiple threads/processes can race
|
192 |
+
when creating a file. If set to true than the system will wait till
|
193 |
+
the lock was acquired and then return an AlreadyLocked exception.
|
194 |
+
|
195 |
+
Note that the file is opened first and locked later. So using 'w' as
|
196 |
+
mode will result in truncate _BEFORE_ the lock is checked.
|
197 |
+
'''
|
198 |
+
|
199 |
+
def __init__(
|
200 |
+
self,
|
201 |
+
filename: Filename,
|
202 |
+
mode: str = 'a',
|
203 |
+
timeout: typing.Optional[float] = None,
|
204 |
+
check_interval: float = DEFAULT_CHECK_INTERVAL,
|
205 |
+
fail_when_locked: bool = DEFAULT_FAIL_WHEN_LOCKED,
|
206 |
+
flags: constants.LockFlags = LOCK_METHOD,
|
207 |
+
**file_open_kwargs,
|
208 |
+
):
|
209 |
+
if 'w' in mode:
|
210 |
+
truncate = True
|
211 |
+
mode = mode.replace('w', 'a')
|
212 |
+
else:
|
213 |
+
truncate = False
|
214 |
+
|
215 |
+
if timeout is None:
|
216 |
+
timeout = DEFAULT_TIMEOUT
|
217 |
+
elif not (flags & constants.LockFlags.NON_BLOCKING):
|
218 |
+
warnings.warn(
|
219 |
+
'timeout has no effect in blocking mode',
|
220 |
+
stacklevel=1,
|
221 |
+
)
|
222 |
+
|
223 |
+
self.fh: typing.Optional[typing.IO] = None
|
224 |
+
self.filename: str = str(filename)
|
225 |
+
self.mode: str = mode
|
226 |
+
self.truncate: bool = truncate
|
227 |
+
self.timeout: float = timeout
|
228 |
+
self.check_interval: float = check_interval
|
229 |
+
self.fail_when_locked: bool = fail_when_locked
|
230 |
+
self.flags: constants.LockFlags = flags
|
231 |
+
self.file_open_kwargs = file_open_kwargs
|
232 |
+
|
233 |
+
def acquire(
|
234 |
+
self,
|
235 |
+
timeout: typing.Optional[float] = None,
|
236 |
+
check_interval: typing.Optional[float] = None,
|
237 |
+
fail_when_locked: typing.Optional[bool] = None,
|
238 |
+
) -> typing.IO:
|
239 |
+
'''Acquire the locked filehandle'''
|
240 |
+
|
241 |
+
fail_when_locked = coalesce(fail_when_locked, self.fail_when_locked)
|
242 |
+
|
243 |
+
if (
|
244 |
+
not (self.flags & constants.LockFlags.NON_BLOCKING)
|
245 |
+
and timeout is not None
|
246 |
+
):
|
247 |
+
warnings.warn(
|
248 |
+
'timeout has no effect in blocking mode',
|
249 |
+
stacklevel=1,
|
250 |
+
)
|
251 |
+
|
252 |
+
# If we already have a filehandle, return it
|
253 |
+
fh: typing.Optional[typing.IO] = self.fh
|
254 |
+
if fh:
|
255 |
+
return fh
|
256 |
+
|
257 |
+
# Get a new filehandler
|
258 |
+
fh = self._get_fh()
|
259 |
+
|
260 |
+
def try_close(): # pragma: no cover
|
261 |
+
# Silently try to close the handle if possible, ignore all issues
|
262 |
+
if fh is not None:
|
263 |
+
with contextlib.suppress(Exception):
|
264 |
+
fh.close()
|
265 |
+
|
266 |
+
exception = None
|
267 |
+
# Try till the timeout has passed
|
268 |
+
for _ in self._timeout_generator(timeout, check_interval):
|
269 |
+
exception = None
|
270 |
+
try:
|
271 |
+
# Try to lock
|
272 |
+
fh = self._get_lock(fh)
|
273 |
+
break
|
274 |
+
except exceptions.LockException as exc:
|
275 |
+
# Python will automatically remove the variable from memory
|
276 |
+
# unless you save it in a different location
|
277 |
+
exception = exc
|
278 |
+
|
279 |
+
# We already tried to the get the lock
|
280 |
+
# If fail_when_locked is True, stop trying
|
281 |
+
if fail_when_locked:
|
282 |
+
try_close()
|
283 |
+
raise exceptions.AlreadyLocked(exception) from exc
|
284 |
+
|
285 |
+
# Wait a bit
|
286 |
+
|
287 |
+
if exception:
|
288 |
+
try_close()
|
289 |
+
# We got a timeout... reraising
|
290 |
+
raise exceptions.LockException(exception)
|
291 |
+
|
292 |
+
# Prepare the filehandle (truncate if needed)
|
293 |
+
fh = self._prepare_fh(fh)
|
294 |
+
|
295 |
+
self.fh = fh
|
296 |
+
return fh
|
297 |
+
|
298 |
+
def release(self):
|
299 |
+
'''Releases the currently locked file handle'''
|
300 |
+
if self.fh:
|
301 |
+
portalocker.unlock(self.fh)
|
302 |
+
self.fh.close()
|
303 |
+
self.fh = None
|
304 |
+
|
305 |
+
def _get_fh(self) -> typing.IO:
|
306 |
+
'''Get a new filehandle'''
|
307 |
+
return open( # noqa: SIM115
|
308 |
+
self.filename,
|
309 |
+
self.mode,
|
310 |
+
**self.file_open_kwargs,
|
311 |
+
)
|
312 |
+
|
313 |
+
def _get_lock(self, fh: typing.IO) -> typing.IO:
|
314 |
+
'''
|
315 |
+
Try to lock the given filehandle
|
316 |
+
|
317 |
+
returns LockException if it fails'''
|
318 |
+
portalocker.lock(fh, self.flags)
|
319 |
+
return fh
|
320 |
+
|
321 |
+
def _prepare_fh(self, fh: typing.IO) -> typing.IO:
|
322 |
+
'''
|
323 |
+
Prepare the filehandle for usage
|
324 |
+
|
325 |
+
If truncate is a number, the file will be truncated to that amount of
|
326 |
+
bytes
|
327 |
+
'''
|
328 |
+
if self.truncate:
|
329 |
+
fh.seek(0)
|
330 |
+
fh.truncate(0)
|
331 |
+
|
332 |
+
return fh
|
333 |
+
|
334 |
+
|
335 |
+
class RLock(Lock):
|
336 |
+
'''
|
337 |
+
A reentrant lock, functions in a similar way to threading.RLock in that it
|
338 |
+
can be acquired multiple times. When the corresponding number of release()
|
339 |
+
calls are made the lock will finally release the underlying file lock.
|
340 |
+
'''
|
341 |
+
|
342 |
+
def __init__(
|
343 |
+
self,
|
344 |
+
filename,
|
345 |
+
mode='a',
|
346 |
+
timeout=DEFAULT_TIMEOUT,
|
347 |
+
check_interval=DEFAULT_CHECK_INTERVAL,
|
348 |
+
fail_when_locked=False,
|
349 |
+
flags=LOCK_METHOD,
|
350 |
+
):
|
351 |
+
super().__init__(
|
352 |
+
filename,
|
353 |
+
mode,
|
354 |
+
timeout,
|
355 |
+
check_interval,
|
356 |
+
fail_when_locked,
|
357 |
+
flags,
|
358 |
+
)
|
359 |
+
self._acquire_count = 0
|
360 |
+
|
361 |
+
def acquire(
|
362 |
+
self,
|
363 |
+
timeout: typing.Optional[float] = None,
|
364 |
+
check_interval: typing.Optional[float] = None,
|
365 |
+
fail_when_locked: typing.Optional[bool] = None,
|
366 |
+
) -> typing.IO:
|
367 |
+
if self._acquire_count >= 1:
|
368 |
+
fh = self.fh
|
369 |
+
else:
|
370 |
+
fh = super().acquire(timeout, check_interval, fail_when_locked)
|
371 |
+
self._acquire_count += 1
|
372 |
+
assert fh
|
373 |
+
return fh
|
374 |
+
|
375 |
+
def release(self):
|
376 |
+
if self._acquire_count == 0:
|
377 |
+
raise exceptions.LockException(
|
378 |
+
'Cannot release more times than acquired',
|
379 |
+
)
|
380 |
+
|
381 |
+
if self._acquire_count == 1:
|
382 |
+
super().release()
|
383 |
+
self._acquire_count -= 1
|
384 |
+
|
385 |
+
|
386 |
+
class TemporaryFileLock(Lock):
|
387 |
+
def __init__(
|
388 |
+
self,
|
389 |
+
filename='.lock',
|
390 |
+
timeout=DEFAULT_TIMEOUT,
|
391 |
+
check_interval=DEFAULT_CHECK_INTERVAL,
|
392 |
+
fail_when_locked=True,
|
393 |
+
flags=LOCK_METHOD,
|
394 |
+
):
|
395 |
+
Lock.__init__(
|
396 |
+
self,
|
397 |
+
filename=filename,
|
398 |
+
mode='w',
|
399 |
+
timeout=timeout,
|
400 |
+
check_interval=check_interval,
|
401 |
+
fail_when_locked=fail_when_locked,
|
402 |
+
flags=flags,
|
403 |
+
)
|
404 |
+
atexit.register(self.release)
|
405 |
+
|
406 |
+
def release(self):
|
407 |
+
Lock.release(self)
|
408 |
+
if os.path.isfile(self.filename): # pragma: no branch
|
409 |
+
os.unlink(self.filename)
|
410 |
+
|
411 |
+
|
412 |
+
class BoundedSemaphore(LockBase):
|
413 |
+
'''
|
414 |
+
Bounded semaphore to prevent too many parallel processes from running
|
415 |
+
|
416 |
+
This method is deprecated because multiple processes that are completely
|
417 |
+
unrelated could end up using the same semaphore. To prevent this,
|
418 |
+
use `NamedBoundedSemaphore` instead. The
|
419 |
+
`NamedBoundedSemaphore` is a drop-in replacement for this class.
|
420 |
+
|
421 |
+
>>> semaphore = BoundedSemaphore(2, directory='')
|
422 |
+
>>> str(semaphore.get_filenames()[0])
|
423 |
+
'bounded_semaphore.00.lock'
|
424 |
+
>>> str(sorted(semaphore.get_random_filenames())[1])
|
425 |
+
'bounded_semaphore.01.lock'
|
426 |
+
'''
|
427 |
+
|
428 |
+
lock: typing.Optional[Lock]
|
429 |
+
|
430 |
+
def __init__(
|
431 |
+
self,
|
432 |
+
maximum: int,
|
433 |
+
name: str = 'bounded_semaphore',
|
434 |
+
filename_pattern: str = '{name}.{number:02d}.lock',
|
435 |
+
directory: str = tempfile.gettempdir(),
|
436 |
+
timeout: typing.Optional[float] = DEFAULT_TIMEOUT,
|
437 |
+
check_interval: typing.Optional[float] = DEFAULT_CHECK_INTERVAL,
|
438 |
+
fail_when_locked: typing.Optional[bool] = True,
|
439 |
+
):
|
440 |
+
self.maximum = maximum
|
441 |
+
self.name = name
|
442 |
+
self.filename_pattern = filename_pattern
|
443 |
+
self.directory = directory
|
444 |
+
self.lock: typing.Optional[Lock] = None
|
445 |
+
super().__init__(
|
446 |
+
timeout=timeout,
|
447 |
+
check_interval=check_interval,
|
448 |
+
fail_when_locked=fail_when_locked,
|
449 |
+
)
|
450 |
+
|
451 |
+
if not name or name == 'bounded_semaphore':
|
452 |
+
warnings.warn(
|
453 |
+
'`BoundedSemaphore` without an explicit `name` '
|
454 |
+
'argument is deprecated, use NamedBoundedSemaphore',
|
455 |
+
DeprecationWarning,
|
456 |
+
stacklevel=1,
|
457 |
+
)
|
458 |
+
|
459 |
+
def get_filenames(self) -> typing.Sequence[pathlib.Path]:
|
460 |
+
return [self.get_filename(n) for n in range(self.maximum)]
|
461 |
+
|
462 |
+
def get_random_filenames(self) -> typing.Sequence[pathlib.Path]:
|
463 |
+
filenames = list(self.get_filenames())
|
464 |
+
random.shuffle(filenames)
|
465 |
+
return filenames
|
466 |
+
|
467 |
+
def get_filename(self, number) -> pathlib.Path:
|
468 |
+
return pathlib.Path(self.directory) / self.filename_pattern.format(
|
469 |
+
name=self.name,
|
470 |
+
number=number,
|
471 |
+
)
|
472 |
+
|
473 |
+
def acquire(
|
474 |
+
self,
|
475 |
+
timeout: typing.Optional[float] = None,
|
476 |
+
check_interval: typing.Optional[float] = None,
|
477 |
+
fail_when_locked: typing.Optional[bool] = None,
|
478 |
+
) -> typing.Optional[Lock]:
|
479 |
+
assert not self.lock, 'Already locked'
|
480 |
+
|
481 |
+
filenames = self.get_filenames()
|
482 |
+
|
483 |
+
for n in self._timeout_generator(timeout, check_interval): # pragma:
|
484 |
+
logger.debug('trying lock (attempt %d) %r', n, filenames)
|
485 |
+
# no branch
|
486 |
+
if self.try_lock(filenames): # pragma: no branch
|
487 |
+
return self.lock # pragma: no cover
|
488 |
+
|
489 |
+
if fail_when_locked := coalesce(
|
490 |
+
fail_when_locked,
|
491 |
+
self.fail_when_locked,
|
492 |
+
):
|
493 |
+
raise exceptions.AlreadyLocked()
|
494 |
+
|
495 |
+
return None
|
496 |
+
|
497 |
+
def try_lock(self, filenames: typing.Sequence[Filename]) -> bool:
|
498 |
+
filename: Filename
|
499 |
+
for filename in filenames:
|
500 |
+
logger.debug('trying lock for %r', filename)
|
501 |
+
self.lock = Lock(filename, fail_when_locked=True)
|
502 |
+
try:
|
503 |
+
self.lock.acquire()
|
504 |
+
except exceptions.AlreadyLocked:
|
505 |
+
self.lock = None
|
506 |
+
else:
|
507 |
+
logger.debug('locked %r', filename)
|
508 |
+
return True
|
509 |
+
|
510 |
+
return False
|
511 |
+
|
512 |
+
def release(self): # pragma: no cover
|
513 |
+
if self.lock is not None:
|
514 |
+
self.lock.release()
|
515 |
+
self.lock = None
|
516 |
+
|
517 |
+
|
518 |
+
class NamedBoundedSemaphore(BoundedSemaphore):
|
519 |
+
'''
|
520 |
+
Bounded semaphore to prevent too many parallel processes from running
|
521 |
+
|
522 |
+
It's also possible to specify a timeout when acquiring the lock to wait
|
523 |
+
for a resource to become available. This is very similar to
|
524 |
+
`threading.BoundedSemaphore` but works across multiple processes and across
|
525 |
+
multiple operating systems.
|
526 |
+
|
527 |
+
Because this works across multiple processes it's important to give the
|
528 |
+
semaphore a name. This name is used to create the lock files. If you
|
529 |
+
don't specify a name, a random name will be generated. This means that
|
530 |
+
you can't use the same semaphore in multiple processes unless you pass the
|
531 |
+
semaphore object to the other processes.
|
532 |
+
|
533 |
+
>>> semaphore = NamedBoundedSemaphore(2, name='test')
|
534 |
+
>>> str(semaphore.get_filenames()[0])
|
535 |
+
'...test.00.lock'
|
536 |
+
|
537 |
+
>>> semaphore = NamedBoundedSemaphore(2)
|
538 |
+
>>> 'bounded_semaphore' in str(semaphore.get_filenames()[0])
|
539 |
+
True
|
540 |
+
|
541 |
+
'''
|
542 |
+
|
543 |
+
def __init__(
|
544 |
+
self,
|
545 |
+
maximum: int,
|
546 |
+
name: typing.Optional[str] = None,
|
547 |
+
filename_pattern: str = '{name}.{number:02d}.lock',
|
548 |
+
directory: str = tempfile.gettempdir(),
|
549 |
+
timeout: typing.Optional[float] = DEFAULT_TIMEOUT,
|
550 |
+
check_interval: typing.Optional[float] = DEFAULT_CHECK_INTERVAL,
|
551 |
+
fail_when_locked: typing.Optional[bool] = True,
|
552 |
+
):
|
553 |
+
if name is None:
|
554 |
+
name = 'bounded_semaphore.%d' % random.randint(0, 1000000)
|
555 |
+
super().__init__(
|
556 |
+
maximum,
|
557 |
+
name,
|
558 |
+
filename_pattern,
|
559 |
+
directory,
|
560 |
+
timeout,
|
561 |
+
check_interval,
|
562 |
+
fail_when_locked,
|
563 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (298 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.pyx
ADDED
@@ -0,0 +1,529 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# ---------------------------------------------------------------------
|
19 |
+
# Low-level Acero bindings
|
20 |
+
|
21 |
+
# cython: profile=False
|
22 |
+
# distutils: language = c++
|
23 |
+
# cython: language_level = 3
|
24 |
+
|
25 |
+
from pyarrow.includes.common cimport *
|
26 |
+
from pyarrow.includes.libarrow cimport *
|
27 |
+
from pyarrow.includes.libarrow_acero cimport *
|
28 |
+
from pyarrow.lib cimport (Table, pyarrow_unwrap_table, pyarrow_wrap_table,
|
29 |
+
RecordBatchReader)
|
30 |
+
from pyarrow.lib import frombytes, tobytes
|
31 |
+
from pyarrow._compute cimport (
|
32 |
+
Expression, FunctionOptions, _ensure_field_ref, _true,
|
33 |
+
unwrap_null_placement, unwrap_sort_order
|
34 |
+
)
|
35 |
+
|
36 |
+
|
37 |
+
cdef class ExecNodeOptions(_Weakrefable):
|
38 |
+
"""
|
39 |
+
Base class for the node options.
|
40 |
+
|
41 |
+
Use one of the subclasses to construct an options object.
|
42 |
+
"""
|
43 |
+
__slots__ = () # avoid mistakingly creating attributes
|
44 |
+
|
45 |
+
cdef void init(self, const shared_ptr[CExecNodeOptions]& sp):
|
46 |
+
self.wrapped = sp
|
47 |
+
|
48 |
+
cdef inline shared_ptr[CExecNodeOptions] unwrap(self) nogil:
|
49 |
+
return self.wrapped
|
50 |
+
|
51 |
+
|
52 |
+
cdef class _TableSourceNodeOptions(ExecNodeOptions):
|
53 |
+
|
54 |
+
def _set_options(self, Table table):
|
55 |
+
cdef:
|
56 |
+
shared_ptr[CTable] c_table
|
57 |
+
|
58 |
+
c_table = pyarrow_unwrap_table(table)
|
59 |
+
self.wrapped.reset(
|
60 |
+
new CTableSourceNodeOptions(c_table)
|
61 |
+
)
|
62 |
+
|
63 |
+
|
64 |
+
class TableSourceNodeOptions(_TableSourceNodeOptions):
|
65 |
+
"""
|
66 |
+
A Source node which accepts a table.
|
67 |
+
|
68 |
+
This is the option class for the "table_source" node factory.
|
69 |
+
|
70 |
+
Parameters
|
71 |
+
----------
|
72 |
+
table : pyarrow.Table
|
73 |
+
The table which acts as the data source.
|
74 |
+
"""
|
75 |
+
|
76 |
+
def __init__(self, Table table):
|
77 |
+
self._set_options(table)
|
78 |
+
|
79 |
+
|
80 |
+
cdef class _FilterNodeOptions(ExecNodeOptions):
|
81 |
+
|
82 |
+
def _set_options(self, Expression filter_expression not None):
|
83 |
+
self.wrapped.reset(
|
84 |
+
new CFilterNodeOptions(<CExpression>filter_expression.unwrap())
|
85 |
+
)
|
86 |
+
|
87 |
+
|
88 |
+
class FilterNodeOptions(_FilterNodeOptions):
|
89 |
+
"""
|
90 |
+
Make a node which excludes some rows from batches passed through it.
|
91 |
+
|
92 |
+
This is the option class for the "filter" node factory.
|
93 |
+
|
94 |
+
The "filter" operation provides an option to define data filtering
|
95 |
+
criteria. It selects rows where the given expression evaluates to true.
|
96 |
+
Filters can be written using pyarrow.compute.Expression, and the
|
97 |
+
expression must have a return type of boolean.
|
98 |
+
|
99 |
+
Parameters
|
100 |
+
----------
|
101 |
+
filter_expression : pyarrow.compute.Expression
|
102 |
+
"""
|
103 |
+
|
104 |
+
def __init__(self, Expression filter_expression):
|
105 |
+
self._set_options(filter_expression)
|
106 |
+
|
107 |
+
|
108 |
+
cdef class _ProjectNodeOptions(ExecNodeOptions):
|
109 |
+
|
110 |
+
def _set_options(self, expressions, names=None):
|
111 |
+
cdef:
|
112 |
+
Expression expr
|
113 |
+
vector[CExpression] c_expressions
|
114 |
+
vector[c_string] c_names
|
115 |
+
|
116 |
+
for expr in expressions:
|
117 |
+
c_expressions.push_back(expr.unwrap())
|
118 |
+
|
119 |
+
if names is not None:
|
120 |
+
if len(names) != len(expressions):
|
121 |
+
raise ValueError(
|
122 |
+
"The number of names should be equal to the number of expressions"
|
123 |
+
)
|
124 |
+
|
125 |
+
for name in names:
|
126 |
+
c_names.push_back(<c_string>tobytes(name))
|
127 |
+
|
128 |
+
self.wrapped.reset(
|
129 |
+
new CProjectNodeOptions(c_expressions, c_names)
|
130 |
+
)
|
131 |
+
else:
|
132 |
+
self.wrapped.reset(
|
133 |
+
new CProjectNodeOptions(c_expressions)
|
134 |
+
)
|
135 |
+
|
136 |
+
|
137 |
+
class ProjectNodeOptions(_ProjectNodeOptions):
|
138 |
+
"""
|
139 |
+
Make a node which executes expressions on input batches,
|
140 |
+
producing batches of the same length with new columns.
|
141 |
+
|
142 |
+
This is the option class for the "project" node factory.
|
143 |
+
|
144 |
+
The "project" operation rearranges, deletes, transforms, and
|
145 |
+
creates columns. Each output column is computed by evaluating
|
146 |
+
an expression against the source record batch. These must be
|
147 |
+
scalar expressions (expressions consisting of scalar literals,
|
148 |
+
field references and scalar functions, i.e. elementwise functions
|
149 |
+
that return one value for each input row independent of the value
|
150 |
+
of all other rows).
|
151 |
+
|
152 |
+
Parameters
|
153 |
+
----------
|
154 |
+
expressions : list of pyarrow.compute.Expression
|
155 |
+
List of expressions to evaluate against the source batch. This must
|
156 |
+
be scalar expressions.
|
157 |
+
names : list of str, optional
|
158 |
+
List of names for each of the output columns (same length as
|
159 |
+
`expressions`). If `names` is not provided, the string
|
160 |
+
representations of exprs will be used.
|
161 |
+
"""
|
162 |
+
|
163 |
+
def __init__(self, expressions, names=None):
|
164 |
+
self._set_options(expressions, names)
|
165 |
+
|
166 |
+
|
167 |
+
cdef class _AggregateNodeOptions(ExecNodeOptions):
|
168 |
+
|
169 |
+
def _set_options(self, aggregates, keys=None):
|
170 |
+
cdef:
|
171 |
+
CAggregate c_aggr
|
172 |
+
vector[CAggregate] c_aggregations
|
173 |
+
vector[CFieldRef] c_keys
|
174 |
+
|
175 |
+
for arg_names, func_name, opts, name in aggregates:
|
176 |
+
c_aggr.function = tobytes(func_name)
|
177 |
+
if opts is not None:
|
178 |
+
c_aggr.options = (<FunctionOptions?>opts).wrapped
|
179 |
+
else:
|
180 |
+
c_aggr.options = <shared_ptr[CFunctionOptions]>nullptr
|
181 |
+
if not isinstance(arg_names, (list, tuple)):
|
182 |
+
arg_names = [arg_names]
|
183 |
+
for arg in arg_names:
|
184 |
+
c_aggr.target.push_back(_ensure_field_ref(arg))
|
185 |
+
c_aggr.name = tobytes(name)
|
186 |
+
|
187 |
+
c_aggregations.push_back(move(c_aggr))
|
188 |
+
|
189 |
+
if keys is None:
|
190 |
+
keys = []
|
191 |
+
for name in keys:
|
192 |
+
c_keys.push_back(_ensure_field_ref(name))
|
193 |
+
|
194 |
+
self.wrapped.reset(
|
195 |
+
new CAggregateNodeOptions(c_aggregations, c_keys)
|
196 |
+
)
|
197 |
+
|
198 |
+
|
199 |
+
class AggregateNodeOptions(_AggregateNodeOptions):
|
200 |
+
"""
|
201 |
+
Make a node which aggregates input batches, optionally grouped by keys.
|
202 |
+
|
203 |
+
This is the option class for the "aggregate" node factory.
|
204 |
+
|
205 |
+
Acero supports two types of aggregates: "scalar" aggregates,
|
206 |
+
and "hash" aggregates. Scalar aggregates reduce an array or scalar
|
207 |
+
input to a single scalar output (e.g. computing the mean of a column).
|
208 |
+
Hash aggregates act like GROUP BY in SQL and first partition data
|
209 |
+
based on one or more key columns, then reduce the data in each partition.
|
210 |
+
The aggregate node supports both types of computation, and can compute
|
211 |
+
any number of aggregations at once.
|
212 |
+
|
213 |
+
Parameters
|
214 |
+
----------
|
215 |
+
aggregates : list of tuples
|
216 |
+
Aggregations which will be applied to the targeted fields.
|
217 |
+
Specified as a list of tuples, where each tuple is one aggregation
|
218 |
+
specification and consists of: aggregation target column(s) followed
|
219 |
+
by function name, aggregation function options object and the
|
220 |
+
output field name.
|
221 |
+
The target column(s) specification can be a single field reference,
|
222 |
+
an empty list or a list of fields unary, nullary and n-ary aggregation
|
223 |
+
functions respectively. Each field reference can be a string
|
224 |
+
column name or expression.
|
225 |
+
keys : list of field references, optional
|
226 |
+
Keys by which aggregations will be grouped. Each key can reference
|
227 |
+
a field using a string name or expression.
|
228 |
+
"""
|
229 |
+
|
230 |
+
def __init__(self, aggregates, keys=None):
|
231 |
+
self._set_options(aggregates, keys)
|
232 |
+
|
233 |
+
|
234 |
+
cdef class _OrderByNodeOptions(ExecNodeOptions):
|
235 |
+
|
236 |
+
def _set_options(self, sort_keys, null_placement):
|
237 |
+
cdef:
|
238 |
+
vector[CSortKey] c_sort_keys
|
239 |
+
|
240 |
+
for name, order in sort_keys:
|
241 |
+
c_sort_keys.push_back(
|
242 |
+
CSortKey(_ensure_field_ref(name), unwrap_sort_order(order))
|
243 |
+
)
|
244 |
+
|
245 |
+
self.wrapped.reset(
|
246 |
+
new COrderByNodeOptions(
|
247 |
+
COrdering(c_sort_keys, unwrap_null_placement(null_placement))
|
248 |
+
)
|
249 |
+
)
|
250 |
+
|
251 |
+
|
252 |
+
class OrderByNodeOptions(_OrderByNodeOptions):
|
253 |
+
"""
|
254 |
+
Make a node which applies a new ordering to the data.
|
255 |
+
|
256 |
+
Currently this node works by accumulating all data, sorting, and then
|
257 |
+
emitting the new data with an updated batch index.
|
258 |
+
Larger-than-memory sort is not currently supported.
|
259 |
+
|
260 |
+
This is the option class for the "order_by" node factory.
|
261 |
+
|
262 |
+
Parameters
|
263 |
+
----------
|
264 |
+
sort_keys : sequence of (name, order) tuples
|
265 |
+
Names of field/column keys to sort the input on,
|
266 |
+
along with the order each field/column is sorted in.
|
267 |
+
Accepted values for `order` are "ascending", "descending".
|
268 |
+
Each field reference can be a string column name or expression.
|
269 |
+
null_placement : str, default "at_end"
|
270 |
+
Where nulls in input should be sorted, only applying to
|
271 |
+
columns/fields mentioned in `sort_keys`.
|
272 |
+
Accepted values are "at_start", "at_end".
|
273 |
+
"""
|
274 |
+
|
275 |
+
def __init__(self, sort_keys=(), *, null_placement="at_end"):
|
276 |
+
self._set_options(sort_keys, null_placement)
|
277 |
+
|
278 |
+
|
279 |
+
cdef class _HashJoinNodeOptions(ExecNodeOptions):
|
280 |
+
|
281 |
+
def _set_options(
|
282 |
+
self, join_type, left_keys, right_keys, left_output=None, right_output=None,
|
283 |
+
output_suffix_for_left="", output_suffix_for_right="",
|
284 |
+
):
|
285 |
+
cdef:
|
286 |
+
CJoinType c_join_type
|
287 |
+
vector[CFieldRef] c_left_keys
|
288 |
+
vector[CFieldRef] c_right_keys
|
289 |
+
vector[CFieldRef] c_left_output
|
290 |
+
vector[CFieldRef] c_right_output
|
291 |
+
|
292 |
+
# join type
|
293 |
+
if join_type == "left semi":
|
294 |
+
c_join_type = CJoinType_LEFT_SEMI
|
295 |
+
elif join_type == "right semi":
|
296 |
+
c_join_type = CJoinType_RIGHT_SEMI
|
297 |
+
elif join_type == "left anti":
|
298 |
+
c_join_type = CJoinType_LEFT_ANTI
|
299 |
+
elif join_type == "right anti":
|
300 |
+
c_join_type = CJoinType_RIGHT_ANTI
|
301 |
+
elif join_type == "inner":
|
302 |
+
c_join_type = CJoinType_INNER
|
303 |
+
elif join_type == "left outer":
|
304 |
+
c_join_type = CJoinType_LEFT_OUTER
|
305 |
+
elif join_type == "right outer":
|
306 |
+
c_join_type = CJoinType_RIGHT_OUTER
|
307 |
+
elif join_type == "full outer":
|
308 |
+
c_join_type = CJoinType_FULL_OUTER
|
309 |
+
else:
|
310 |
+
raise ValueError("Unsupported join type")
|
311 |
+
|
312 |
+
# left/right keys
|
313 |
+
if not isinstance(left_keys, (list, tuple)):
|
314 |
+
left_keys = [left_keys]
|
315 |
+
for key in left_keys:
|
316 |
+
c_left_keys.push_back(_ensure_field_ref(key))
|
317 |
+
if not isinstance(right_keys, (list, tuple)):
|
318 |
+
right_keys = [right_keys]
|
319 |
+
for key in right_keys:
|
320 |
+
c_right_keys.push_back(_ensure_field_ref(key))
|
321 |
+
|
322 |
+
# left/right output fields
|
323 |
+
if left_output is not None and right_output is not None:
|
324 |
+
for colname in left_output:
|
325 |
+
c_left_output.push_back(_ensure_field_ref(colname))
|
326 |
+
for colname in right_output:
|
327 |
+
c_right_output.push_back(_ensure_field_ref(colname))
|
328 |
+
|
329 |
+
self.wrapped.reset(
|
330 |
+
new CHashJoinNodeOptions(
|
331 |
+
c_join_type, c_left_keys, c_right_keys,
|
332 |
+
c_left_output, c_right_output,
|
333 |
+
_true,
|
334 |
+
<c_string>tobytes(output_suffix_for_left),
|
335 |
+
<c_string>tobytes(output_suffix_for_right)
|
336 |
+
)
|
337 |
+
)
|
338 |
+
else:
|
339 |
+
self.wrapped.reset(
|
340 |
+
new CHashJoinNodeOptions(
|
341 |
+
c_join_type, c_left_keys, c_right_keys,
|
342 |
+
_true,
|
343 |
+
<c_string>tobytes(output_suffix_for_left),
|
344 |
+
<c_string>tobytes(output_suffix_for_right)
|
345 |
+
)
|
346 |
+
)
|
347 |
+
|
348 |
+
|
349 |
+
class HashJoinNodeOptions(_HashJoinNodeOptions):
|
350 |
+
"""
|
351 |
+
Make a node which implements join operation using hash join strategy.
|
352 |
+
|
353 |
+
This is the option class for the "hashjoin" node factory.
|
354 |
+
|
355 |
+
Parameters
|
356 |
+
----------
|
357 |
+
join_type : str
|
358 |
+
Type of join. One of "left semi", "right semi", "left anti",
|
359 |
+
"right anti", "inner", "left outer", "right outer", "full outer".
|
360 |
+
left_keys : str, Expression or list
|
361 |
+
Key fields from left input. Each key can be a string column name
|
362 |
+
or a field expression, or a list of such field references.
|
363 |
+
right_keys : str, Expression or list
|
364 |
+
Key fields from right input. See `left_keys` for details.
|
365 |
+
left_output : list, optional
|
366 |
+
List of output fields passed from left input. If left and right
|
367 |
+
output fields are not specified, all valid fields from both left and
|
368 |
+
right input will be output. Each field can be a string column name
|
369 |
+
or a field expression.
|
370 |
+
right_output : list, optional
|
371 |
+
List of output fields passed from right input. If left and right
|
372 |
+
output fields are not specified, all valid fields from both left and
|
373 |
+
right input will be output. Each field can be a string column name
|
374 |
+
or a field expression.
|
375 |
+
output_suffix_for_left : str
|
376 |
+
Suffix added to names of output fields coming from left input
|
377 |
+
(used to distinguish, if necessary, between fields of the same
|
378 |
+
name in left and right input and can be left empty if there are
|
379 |
+
no name collisions).
|
380 |
+
output_suffix_for_right : str
|
381 |
+
Suffix added to names of output fields coming from right input,
|
382 |
+
see `output_suffix_for_left` for details.
|
383 |
+
"""
|
384 |
+
|
385 |
+
def __init__(
|
386 |
+
self, join_type, left_keys, right_keys, left_output=None, right_output=None,
|
387 |
+
output_suffix_for_left="", output_suffix_for_right=""
|
388 |
+
):
|
389 |
+
self._set_options(
|
390 |
+
join_type, left_keys, right_keys, left_output, right_output,
|
391 |
+
output_suffix_for_left, output_suffix_for_right
|
392 |
+
)
|
393 |
+
|
394 |
+
|
395 |
+
cdef class Declaration(_Weakrefable):
|
396 |
+
"""
|
397 |
+
Helper class for declaring the nodes of an ExecPlan.
|
398 |
+
|
399 |
+
A Declaration represents an unconstructed ExecNode, and potentially
|
400 |
+
more since its inputs may also be Declarations or when constructed
|
401 |
+
with ``from_sequence``.
|
402 |
+
|
403 |
+
The possible ExecNodes to use are registered with a name,
|
404 |
+
the "factory name", and need to be specified using this name, together
|
405 |
+
with its corresponding ExecNodeOptions subclass.
|
406 |
+
|
407 |
+
Parameters
|
408 |
+
----------
|
409 |
+
factory_name : str
|
410 |
+
The ExecNode factory name, such as "table_source", "filter",
|
411 |
+
"project" etc. See the ExecNodeOptions subclasses for the exact
|
412 |
+
factory names to use.
|
413 |
+
options : ExecNodeOptions
|
414 |
+
Corresponding ExecNodeOptions subclass (matching the factory name).
|
415 |
+
inputs : list of Declaration, optional
|
416 |
+
Input nodes for this declaration. Optional if the node is a source
|
417 |
+
node, or when the declaration gets combined later with
|
418 |
+
``from_sequence``.
|
419 |
+
|
420 |
+
Returns
|
421 |
+
-------
|
422 |
+
Declaration
|
423 |
+
"""
|
424 |
+
cdef void init(self, const CDeclaration& c_decl):
|
425 |
+
self.decl = c_decl
|
426 |
+
|
427 |
+
@staticmethod
|
428 |
+
cdef wrap(const CDeclaration& c_decl):
|
429 |
+
cdef Declaration self = Declaration.__new__(Declaration)
|
430 |
+
self.init(c_decl)
|
431 |
+
return self
|
432 |
+
|
433 |
+
cdef inline CDeclaration unwrap(self) nogil:
|
434 |
+
return self.decl
|
435 |
+
|
436 |
+
def __init__(self, factory_name, ExecNodeOptions options, inputs=None):
|
437 |
+
cdef:
|
438 |
+
c_string c_factory_name
|
439 |
+
CDeclaration c_decl
|
440 |
+
vector[CDeclaration.Input] c_inputs
|
441 |
+
|
442 |
+
c_factory_name = tobytes(factory_name)
|
443 |
+
|
444 |
+
if inputs is not None:
|
445 |
+
for ipt in inputs:
|
446 |
+
c_inputs.push_back(
|
447 |
+
CDeclaration.Input((<Declaration>ipt).unwrap())
|
448 |
+
)
|
449 |
+
|
450 |
+
c_decl = CDeclaration(c_factory_name, c_inputs, options.unwrap())
|
451 |
+
self.init(c_decl)
|
452 |
+
|
453 |
+
@staticmethod
|
454 |
+
def from_sequence(decls):
|
455 |
+
"""
|
456 |
+
Convenience factory for the common case of a simple sequence of nodes.
|
457 |
+
|
458 |
+
Each of the declarations will be appended to the inputs of the
|
459 |
+
subsequent declaration, and the final modified declaration will
|
460 |
+
be returned.
|
461 |
+
|
462 |
+
Parameters
|
463 |
+
----------
|
464 |
+
decls : list of Declaration
|
465 |
+
|
466 |
+
Returns
|
467 |
+
-------
|
468 |
+
Declaration
|
469 |
+
"""
|
470 |
+
cdef:
|
471 |
+
vector[CDeclaration] c_decls
|
472 |
+
CDeclaration c_decl
|
473 |
+
|
474 |
+
for decl in decls:
|
475 |
+
c_decls.push_back((<Declaration> decl).unwrap())
|
476 |
+
|
477 |
+
c_decl = CDeclaration.Sequence(c_decls)
|
478 |
+
return Declaration.wrap(c_decl)
|
479 |
+
|
480 |
+
def __str__(self):
|
481 |
+
return frombytes(GetResultValue(DeclarationToString(self.decl)))
|
482 |
+
|
483 |
+
def __repr__(self):
|
484 |
+
return "<pyarrow.acero.Declaration>\n{0}".format(str(self))
|
485 |
+
|
486 |
+
def to_table(self, bint use_threads=True):
|
487 |
+
"""
|
488 |
+
Run the declaration and collect the results into a table.
|
489 |
+
|
490 |
+
This method will implicitly add a sink node to the declaration
|
491 |
+
to collect results into a table. It will then create an ExecPlan
|
492 |
+
from the declaration, start the exec plan, block until the plan
|
493 |
+
has finished, and return the created table.
|
494 |
+
|
495 |
+
Parameters
|
496 |
+
----------
|
497 |
+
use_threads : bool, default True
|
498 |
+
If set to False, then all CPU work will be done on the calling
|
499 |
+
thread. I/O tasks will still happen on the I/O executor
|
500 |
+
and may be multi-threaded (but should not use significant CPU
|
501 |
+
resources).
|
502 |
+
|
503 |
+
Returns
|
504 |
+
-------
|
505 |
+
pyarrow.Table
|
506 |
+
"""
|
507 |
+
cdef:
|
508 |
+
shared_ptr[CTable] c_table
|
509 |
+
|
510 |
+
with nogil:
|
511 |
+
c_table = GetResultValue(DeclarationToTable(self.unwrap(), use_threads))
|
512 |
+
return pyarrow_wrap_table(c_table)
|
513 |
+
|
514 |
+
def to_reader(self, bint use_threads=True):
|
515 |
+
"""Run the declaration and return results as a RecordBatchReader.
|
516 |
+
|
517 |
+
For details about the parameters, see `to_table`.
|
518 |
+
|
519 |
+
Returns
|
520 |
+
-------
|
521 |
+
pyarrow.RecordBatchReader
|
522 |
+
"""
|
523 |
+
cdef:
|
524 |
+
RecordBatchReader reader
|
525 |
+
reader = RecordBatchReader.__new__(RecordBatchReader)
|
526 |
+
reader.reader.reset(
|
527 |
+
GetResultValue(DeclarationToReader(self.unwrap(), use_threads)).release()
|
528 |
+
)
|
529 |
+
return reader
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.pxd
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: language_level = 3
|
19 |
+
|
20 |
+
from pyarrow.lib cimport *
|
21 |
+
from pyarrow.includes.common cimport *
|
22 |
+
from pyarrow.includes.libarrow cimport *
|
23 |
+
|
24 |
+
cdef class UdfContext(_Weakrefable):
|
25 |
+
cdef:
|
26 |
+
CUdfContext c_context
|
27 |
+
|
28 |
+
cdef void init(self, const CUdfContext& c_context)
|
29 |
+
|
30 |
+
|
31 |
+
cdef class FunctionOptions(_Weakrefable):
|
32 |
+
cdef:
|
33 |
+
shared_ptr[CFunctionOptions] wrapped
|
34 |
+
|
35 |
+
cdef const CFunctionOptions* get_options(self) except NULL
|
36 |
+
cdef void init(self, const shared_ptr[CFunctionOptions]& sp)
|
37 |
+
|
38 |
+
cdef inline shared_ptr[CFunctionOptions] unwrap(self)
|
39 |
+
|
40 |
+
|
41 |
+
cdef class _SortOptions(FunctionOptions):
|
42 |
+
pass
|
43 |
+
|
44 |
+
|
45 |
+
cdef CExpression _bind(Expression filter, Schema schema) except *
|
46 |
+
|
47 |
+
|
48 |
+
cdef class Expression(_Weakrefable):
|
49 |
+
|
50 |
+
cdef:
|
51 |
+
CExpression expr
|
52 |
+
|
53 |
+
cdef void init(self, const CExpression& sp)
|
54 |
+
|
55 |
+
@staticmethod
|
56 |
+
cdef wrap(const CExpression& sp)
|
57 |
+
|
58 |
+
cdef inline CExpression unwrap(self)
|
59 |
+
|
60 |
+
@staticmethod
|
61 |
+
cdef Expression _expr_or_scalar(object expr)
|
62 |
+
|
63 |
+
|
64 |
+
cdef CExpression _true
|
65 |
+
|
66 |
+
cdef CFieldRef _ensure_field_ref(value) except *
|
67 |
+
|
68 |
+
cdef CSortOrder unwrap_sort_order(order) except *
|
69 |
+
|
70 |
+
cdef CNullPlacement unwrap_null_placement(null_placement) except *
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_compute_docstrings.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
"""
|
19 |
+
Custom documentation additions for compute functions.
|
20 |
+
"""
|
21 |
+
|
22 |
+
function_doc_additions = {}
|
23 |
+
|
24 |
+
function_doc_additions["filter"] = """
|
25 |
+
Examples
|
26 |
+
--------
|
27 |
+
>>> import pyarrow as pa
|
28 |
+
>>> arr = pa.array(["a", "b", "c", None, "e"])
|
29 |
+
>>> mask = pa.array([True, False, None, False, True])
|
30 |
+
>>> arr.filter(mask)
|
31 |
+
<pyarrow.lib.StringArray object at ...>
|
32 |
+
[
|
33 |
+
"a",
|
34 |
+
"e"
|
35 |
+
]
|
36 |
+
>>> arr.filter(mask, null_selection_behavior='emit_null')
|
37 |
+
<pyarrow.lib.StringArray object at ...>
|
38 |
+
[
|
39 |
+
"a",
|
40 |
+
null,
|
41 |
+
"e"
|
42 |
+
]
|
43 |
+
"""
|
44 |
+
|
45 |
+
function_doc_additions["mode"] = """
|
46 |
+
Examples
|
47 |
+
--------
|
48 |
+
>>> import pyarrow as pa
|
49 |
+
>>> import pyarrow.compute as pc
|
50 |
+
>>> arr = pa.array([1, 1, 2, 2, 3, 2, 2, 2])
|
51 |
+
>>> modes = pc.mode(arr, 2)
|
52 |
+
>>> modes[0]
|
53 |
+
<pyarrow.StructScalar: [('mode', 2), ('count', 5)]>
|
54 |
+
>>> modes[1]
|
55 |
+
<pyarrow.StructScalar: [('mode', 1), ('count', 2)]>
|
56 |
+
"""
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (370 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.pyx
ADDED
@@ -0,0 +1,1542 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: profile=False
|
19 |
+
# distutils: language = c++
|
20 |
+
# cython: language_level = 3
|
21 |
+
|
22 |
+
from cython.operator cimport dereference as deref
|
23 |
+
|
24 |
+
from collections import namedtuple
|
25 |
+
from collections.abc import Mapping
|
26 |
+
|
27 |
+
from pyarrow.includes.common cimport *
|
28 |
+
from pyarrow.includes.libarrow cimport *
|
29 |
+
from pyarrow.includes.libarrow_python cimport *
|
30 |
+
from pyarrow.lib cimport (check_status, Field, MemoryPool, Schema,
|
31 |
+
RecordBatchReader, ensure_type,
|
32 |
+
maybe_unbox_memory_pool, get_input_stream,
|
33 |
+
get_writer, native_transcoding_input_stream,
|
34 |
+
pyarrow_unwrap_batch, pyarrow_unwrap_schema,
|
35 |
+
pyarrow_unwrap_table, pyarrow_wrap_schema,
|
36 |
+
pyarrow_wrap_table, pyarrow_wrap_data_type,
|
37 |
+
pyarrow_unwrap_data_type, Table, RecordBatch,
|
38 |
+
StopToken, _CRecordBatchWriter)
|
39 |
+
from pyarrow.lib import frombytes, tobytes, SignalStopHandler
|
40 |
+
|
41 |
+
|
42 |
+
cdef unsigned char _single_char(s) except 0:
|
43 |
+
val = ord(s)
|
44 |
+
if val == 0 or val > 127:
|
45 |
+
raise ValueError("Expecting an ASCII character")
|
46 |
+
return <unsigned char> val
|
47 |
+
|
48 |
+
|
49 |
+
_InvalidRow = namedtuple(
|
50 |
+
"_InvalidRow", ("expected_columns", "actual_columns", "number", "text"),
|
51 |
+
module=__name__)
|
52 |
+
|
53 |
+
|
54 |
+
class InvalidRow(_InvalidRow):
|
55 |
+
"""
|
56 |
+
Description of an invalid row in a CSV file.
|
57 |
+
|
58 |
+
Parameters
|
59 |
+
----------
|
60 |
+
expected_columns : int
|
61 |
+
The expected number of columns in the row.
|
62 |
+
actual_columns : int
|
63 |
+
The actual number of columns in the row.
|
64 |
+
number : int or None
|
65 |
+
The physical row number if known, otherwise None.
|
66 |
+
text : str
|
67 |
+
The contents of the row.
|
68 |
+
"""
|
69 |
+
__slots__ = ()
|
70 |
+
|
71 |
+
|
72 |
+
cdef CInvalidRowResult _handle_invalid_row(
|
73 |
+
handler, const CCSVInvalidRow& c_row) except CInvalidRowResult_Error:
|
74 |
+
# A negative row number means undetermined (because of parallel reading)
|
75 |
+
row_number = c_row.number if c_row.number >= 0 else None
|
76 |
+
row = InvalidRow(c_row.expected_columns, c_row.actual_columns,
|
77 |
+
row_number, frombytes(<c_string> c_row.text))
|
78 |
+
result = handler(row)
|
79 |
+
if result == 'error':
|
80 |
+
return CInvalidRowResult_Error
|
81 |
+
elif result == 'skip':
|
82 |
+
return CInvalidRowResult_Skip
|
83 |
+
else:
|
84 |
+
raise ValueError("Invalid return value for invalid row handler: "
|
85 |
+
f"expected 'error' or 'skip', got {result!r}")
|
86 |
+
|
87 |
+
|
88 |
+
cdef class ReadOptions(_Weakrefable):
|
89 |
+
"""
|
90 |
+
Options for reading CSV files.
|
91 |
+
|
92 |
+
Parameters
|
93 |
+
----------
|
94 |
+
use_threads : bool, optional (default True)
|
95 |
+
Whether to use multiple threads to accelerate reading
|
96 |
+
block_size : int, optional
|
97 |
+
How much bytes to process at a time from the input stream.
|
98 |
+
This will determine multi-threading granularity as well as
|
99 |
+
the size of individual record batches or table chunks.
|
100 |
+
Minimum valid value for block size is 1
|
101 |
+
skip_rows : int, optional (default 0)
|
102 |
+
The number of rows to skip before the column names (if any)
|
103 |
+
and the CSV data.
|
104 |
+
skip_rows_after_names : int, optional (default 0)
|
105 |
+
The number of rows to skip after the column names.
|
106 |
+
This number can be larger than the number of rows in one
|
107 |
+
block, and empty rows are counted.
|
108 |
+
The order of application is as follows:
|
109 |
+
- `skip_rows` is applied (if non-zero);
|
110 |
+
- column names are read (unless `column_names` is set);
|
111 |
+
- `skip_rows_after_names` is applied (if non-zero).
|
112 |
+
column_names : list, optional
|
113 |
+
The column names of the target table. If empty, fall back on
|
114 |
+
`autogenerate_column_names`.
|
115 |
+
autogenerate_column_names : bool, optional (default False)
|
116 |
+
Whether to autogenerate column names if `column_names` is empty.
|
117 |
+
If true, column names will be of the form "f0", "f1"...
|
118 |
+
If false, column names will be read from the first CSV row
|
119 |
+
after `skip_rows`.
|
120 |
+
encoding : str, optional (default 'utf8')
|
121 |
+
The character encoding of the CSV data. Columns that cannot
|
122 |
+
decode using this encoding can still be read as Binary.
|
123 |
+
|
124 |
+
Examples
|
125 |
+
--------
|
126 |
+
|
127 |
+
Defining an example data:
|
128 |
+
|
129 |
+
>>> import io
|
130 |
+
>>> s = "1,2,3\\nFlamingo,2,2022-03-01\\nHorse,4,2022-03-02\\nBrittle stars,5,2022-03-03\\nCentipede,100,2022-03-04"
|
131 |
+
>>> print(s)
|
132 |
+
1,2,3
|
133 |
+
Flamingo,2,2022-03-01
|
134 |
+
Horse,4,2022-03-02
|
135 |
+
Brittle stars,5,2022-03-03
|
136 |
+
Centipede,100,2022-03-04
|
137 |
+
|
138 |
+
Ignore the first numbered row and substitute it with defined
|
139 |
+
or autogenerated column names:
|
140 |
+
|
141 |
+
>>> from pyarrow import csv
|
142 |
+
>>> read_options = csv.ReadOptions(
|
143 |
+
... column_names=["animals", "n_legs", "entry"],
|
144 |
+
... skip_rows=1)
|
145 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options)
|
146 |
+
pyarrow.Table
|
147 |
+
animals: string
|
148 |
+
n_legs: int64
|
149 |
+
entry: date32[day]
|
150 |
+
----
|
151 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
152 |
+
n_legs: [[2,4,5,100]]
|
153 |
+
entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
154 |
+
|
155 |
+
>>> read_options = csv.ReadOptions(autogenerate_column_names=True,
|
156 |
+
... skip_rows=1)
|
157 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options)
|
158 |
+
pyarrow.Table
|
159 |
+
f0: string
|
160 |
+
f1: int64
|
161 |
+
f2: date32[day]
|
162 |
+
----
|
163 |
+
f0: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
164 |
+
f1: [[2,4,5,100]]
|
165 |
+
f2: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
166 |
+
|
167 |
+
Remove the first 2 rows of the data:
|
168 |
+
|
169 |
+
>>> read_options = csv.ReadOptions(skip_rows_after_names=2)
|
170 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options)
|
171 |
+
pyarrow.Table
|
172 |
+
1: string
|
173 |
+
2: int64
|
174 |
+
3: date32[day]
|
175 |
+
----
|
176 |
+
1: [["Brittle stars","Centipede"]]
|
177 |
+
2: [[5,100]]
|
178 |
+
3: [[2022-03-03,2022-03-04]]
|
179 |
+
"""
|
180 |
+
|
181 |
+
# Avoid mistakingly creating attributes
|
182 |
+
__slots__ = ()
|
183 |
+
|
184 |
+
# __init__() is not called when unpickling, initialize storage here
|
185 |
+
def __cinit__(self, *argw, **kwargs):
|
186 |
+
self.options.reset(new CCSVReadOptions(CCSVReadOptions.Defaults()))
|
187 |
+
|
188 |
+
def __init__(self, *, use_threads=None, block_size=None, skip_rows=None,
|
189 |
+
skip_rows_after_names=None, column_names=None,
|
190 |
+
autogenerate_column_names=None, encoding='utf8'):
|
191 |
+
if use_threads is not None:
|
192 |
+
self.use_threads = use_threads
|
193 |
+
if block_size is not None:
|
194 |
+
self.block_size = block_size
|
195 |
+
if skip_rows is not None:
|
196 |
+
self.skip_rows = skip_rows
|
197 |
+
if skip_rows_after_names is not None:
|
198 |
+
self.skip_rows_after_names = skip_rows_after_names
|
199 |
+
if column_names is not None:
|
200 |
+
self.column_names = column_names
|
201 |
+
if autogenerate_column_names is not None:
|
202 |
+
self.autogenerate_column_names= autogenerate_column_names
|
203 |
+
# Python-specific option
|
204 |
+
self.encoding = encoding
|
205 |
+
|
206 |
+
@property
|
207 |
+
def use_threads(self):
|
208 |
+
"""
|
209 |
+
Whether to use multiple threads to accelerate reading.
|
210 |
+
"""
|
211 |
+
return deref(self.options).use_threads
|
212 |
+
|
213 |
+
@use_threads.setter
|
214 |
+
def use_threads(self, value):
|
215 |
+
deref(self.options).use_threads = value
|
216 |
+
|
217 |
+
@property
|
218 |
+
def block_size(self):
|
219 |
+
"""
|
220 |
+
How much bytes to process at a time from the input stream.
|
221 |
+
This will determine multi-threading granularity as well as
|
222 |
+
the size of individual record batches or table chunks.
|
223 |
+
"""
|
224 |
+
return deref(self.options).block_size
|
225 |
+
|
226 |
+
@block_size.setter
|
227 |
+
def block_size(self, value):
|
228 |
+
deref(self.options).block_size = value
|
229 |
+
|
230 |
+
@property
|
231 |
+
def skip_rows(self):
|
232 |
+
"""
|
233 |
+
The number of rows to skip before the column names (if any)
|
234 |
+
and the CSV data.
|
235 |
+
See `skip_rows_after_names` for interaction description
|
236 |
+
"""
|
237 |
+
return deref(self.options).skip_rows
|
238 |
+
|
239 |
+
@skip_rows.setter
|
240 |
+
def skip_rows(self, value):
|
241 |
+
deref(self.options).skip_rows = value
|
242 |
+
|
243 |
+
@property
|
244 |
+
def skip_rows_after_names(self):
|
245 |
+
"""
|
246 |
+
The number of rows to skip after the column names.
|
247 |
+
This number can be larger than the number of rows in one
|
248 |
+
block, and empty rows are counted.
|
249 |
+
The order of application is as follows:
|
250 |
+
- `skip_rows` is applied (if non-zero);
|
251 |
+
- column names are read (unless `column_names` is set);
|
252 |
+
- `skip_rows_after_names` is applied (if non-zero).
|
253 |
+
"""
|
254 |
+
return deref(self.options).skip_rows_after_names
|
255 |
+
|
256 |
+
@skip_rows_after_names.setter
|
257 |
+
def skip_rows_after_names(self, value):
|
258 |
+
deref(self.options).skip_rows_after_names = value
|
259 |
+
|
260 |
+
@property
|
261 |
+
def column_names(self):
|
262 |
+
"""
|
263 |
+
The column names of the target table. If empty, fall back on
|
264 |
+
`autogenerate_column_names`.
|
265 |
+
"""
|
266 |
+
return [frombytes(s) for s in deref(self.options).column_names]
|
267 |
+
|
268 |
+
@column_names.setter
|
269 |
+
def column_names(self, value):
|
270 |
+
deref(self.options).column_names.clear()
|
271 |
+
for item in value:
|
272 |
+
deref(self.options).column_names.push_back(tobytes(item))
|
273 |
+
|
274 |
+
@property
|
275 |
+
def autogenerate_column_names(self):
|
276 |
+
"""
|
277 |
+
Whether to autogenerate column names if `column_names` is empty.
|
278 |
+
If true, column names will be of the form "f0", "f1"...
|
279 |
+
If false, column names will be read from the first CSV row
|
280 |
+
after `skip_rows`.
|
281 |
+
"""
|
282 |
+
return deref(self.options).autogenerate_column_names
|
283 |
+
|
284 |
+
@autogenerate_column_names.setter
|
285 |
+
def autogenerate_column_names(self, value):
|
286 |
+
deref(self.options).autogenerate_column_names = value
|
287 |
+
|
288 |
+
def validate(self):
|
289 |
+
check_status(deref(self.options).Validate())
|
290 |
+
|
291 |
+
def equals(self, ReadOptions other):
|
292 |
+
"""
|
293 |
+
Parameters
|
294 |
+
----------
|
295 |
+
other : pyarrow.csv.ReadOptions
|
296 |
+
|
297 |
+
Returns
|
298 |
+
-------
|
299 |
+
bool
|
300 |
+
"""
|
301 |
+
return (
|
302 |
+
self.use_threads == other.use_threads and
|
303 |
+
self.block_size == other.block_size and
|
304 |
+
self.skip_rows == other.skip_rows and
|
305 |
+
self.skip_rows_after_names == other.skip_rows_after_names and
|
306 |
+
self.column_names == other.column_names and
|
307 |
+
self.autogenerate_column_names ==
|
308 |
+
other.autogenerate_column_names and
|
309 |
+
self.encoding == other.encoding
|
310 |
+
)
|
311 |
+
|
312 |
+
@staticmethod
|
313 |
+
cdef ReadOptions wrap(CCSVReadOptions options):
|
314 |
+
out = ReadOptions()
|
315 |
+
out.options.reset(new CCSVReadOptions(move(options)))
|
316 |
+
out.encoding = 'utf8' # No way to know this
|
317 |
+
return out
|
318 |
+
|
319 |
+
def __getstate__(self):
|
320 |
+
return (self.use_threads, self.block_size, self.skip_rows,
|
321 |
+
self.column_names, self.autogenerate_column_names,
|
322 |
+
self.encoding, self.skip_rows_after_names)
|
323 |
+
|
324 |
+
def __setstate__(self, state):
|
325 |
+
(self.use_threads, self.block_size, self.skip_rows,
|
326 |
+
self.column_names, self.autogenerate_column_names,
|
327 |
+
self.encoding, self.skip_rows_after_names) = state
|
328 |
+
|
329 |
+
def __eq__(self, other):
|
330 |
+
try:
|
331 |
+
return self.equals(other)
|
332 |
+
except TypeError:
|
333 |
+
return False
|
334 |
+
|
335 |
+
|
336 |
+
cdef class ParseOptions(_Weakrefable):
|
337 |
+
"""
|
338 |
+
Options for parsing CSV files.
|
339 |
+
|
340 |
+
Parameters
|
341 |
+
----------
|
342 |
+
delimiter : 1-character string, optional (default ',')
|
343 |
+
The character delimiting individual cells in the CSV data.
|
344 |
+
quote_char : 1-character string or False, optional (default '"')
|
345 |
+
The character used optionally for quoting CSV values
|
346 |
+
(False if quoting is not allowed).
|
347 |
+
double_quote : bool, optional (default True)
|
348 |
+
Whether two quotes in a quoted CSV value denote a single quote
|
349 |
+
in the data.
|
350 |
+
escape_char : 1-character string or False, optional (default False)
|
351 |
+
The character used optionally for escaping special characters
|
352 |
+
(False if escaping is not allowed).
|
353 |
+
newlines_in_values : bool, optional (default False)
|
354 |
+
Whether newline characters are allowed in CSV values.
|
355 |
+
Setting this to True reduces the performance of multi-threaded
|
356 |
+
CSV reading.
|
357 |
+
ignore_empty_lines : bool, optional (default True)
|
358 |
+
Whether empty lines are ignored in CSV input.
|
359 |
+
If False, an empty line is interpreted as containing a single empty
|
360 |
+
value (assuming a one-column CSV file).
|
361 |
+
invalid_row_handler : callable, optional (default None)
|
362 |
+
If not None, this object is called for each CSV row that fails
|
363 |
+
parsing (because of a mismatching number of columns).
|
364 |
+
It should accept a single InvalidRow argument and return either
|
365 |
+
"skip" or "error" depending on the desired outcome.
|
366 |
+
|
367 |
+
Examples
|
368 |
+
--------
|
369 |
+
|
370 |
+
Defining an example file from bytes object:
|
371 |
+
|
372 |
+
>>> import io
|
373 |
+
>>> s = (
|
374 |
+
... "animals;n_legs;entry\\n"
|
375 |
+
... "Flamingo;2;2022-03-01\\n"
|
376 |
+
... "# Comment here:\\n"
|
377 |
+
... "Horse;4;2022-03-02\\n"
|
378 |
+
... "Brittle stars;5;2022-03-03\\n"
|
379 |
+
... "Centipede;100;2022-03-04"
|
380 |
+
... )
|
381 |
+
>>> print(s)
|
382 |
+
animals;n_legs;entry
|
383 |
+
Flamingo;2;2022-03-01
|
384 |
+
# Comment here:
|
385 |
+
Horse;4;2022-03-02
|
386 |
+
Brittle stars;5;2022-03-03
|
387 |
+
Centipede;100;2022-03-04
|
388 |
+
>>> source = io.BytesIO(s.encode())
|
389 |
+
|
390 |
+
Read the data from a file skipping rows with comments
|
391 |
+
and defining the delimiter:
|
392 |
+
|
393 |
+
>>> from pyarrow import csv
|
394 |
+
>>> def skip_comment(row):
|
395 |
+
... if row.text.startswith("# "):
|
396 |
+
... return 'skip'
|
397 |
+
... else:
|
398 |
+
... return 'error'
|
399 |
+
...
|
400 |
+
>>> parse_options = csv.ParseOptions(delimiter=";", invalid_row_handler=skip_comment)
|
401 |
+
>>> csv.read_csv(source, parse_options=parse_options)
|
402 |
+
pyarrow.Table
|
403 |
+
animals: string
|
404 |
+
n_legs: int64
|
405 |
+
entry: date32[day]
|
406 |
+
----
|
407 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
408 |
+
n_legs: [[2,4,5,100]]
|
409 |
+
entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
410 |
+
"""
|
411 |
+
__slots__ = ()
|
412 |
+
|
413 |
+
def __cinit__(self, *argw, **kwargs):
|
414 |
+
self._invalid_row_handler = None
|
415 |
+
self.options.reset(new CCSVParseOptions(CCSVParseOptions.Defaults()))
|
416 |
+
|
417 |
+
def __init__(self, *, delimiter=None, quote_char=None, double_quote=None,
|
418 |
+
escape_char=None, newlines_in_values=None,
|
419 |
+
ignore_empty_lines=None, invalid_row_handler=None):
|
420 |
+
if delimiter is not None:
|
421 |
+
self.delimiter = delimiter
|
422 |
+
if quote_char is not None:
|
423 |
+
self.quote_char = quote_char
|
424 |
+
if double_quote is not None:
|
425 |
+
self.double_quote = double_quote
|
426 |
+
if escape_char is not None:
|
427 |
+
self.escape_char = escape_char
|
428 |
+
if newlines_in_values is not None:
|
429 |
+
self.newlines_in_values = newlines_in_values
|
430 |
+
if ignore_empty_lines is not None:
|
431 |
+
self.ignore_empty_lines = ignore_empty_lines
|
432 |
+
if invalid_row_handler is not None:
|
433 |
+
self.invalid_row_handler = invalid_row_handler
|
434 |
+
|
435 |
+
@property
|
436 |
+
def delimiter(self):
|
437 |
+
"""
|
438 |
+
The character delimiting individual cells in the CSV data.
|
439 |
+
"""
|
440 |
+
return chr(deref(self.options).delimiter)
|
441 |
+
|
442 |
+
@delimiter.setter
|
443 |
+
def delimiter(self, value):
|
444 |
+
deref(self.options).delimiter = _single_char(value)
|
445 |
+
|
446 |
+
@property
|
447 |
+
def quote_char(self):
|
448 |
+
"""
|
449 |
+
The character used optionally for quoting CSV values
|
450 |
+
(False if quoting is not allowed).
|
451 |
+
"""
|
452 |
+
if deref(self.options).quoting:
|
453 |
+
return chr(deref(self.options).quote_char)
|
454 |
+
else:
|
455 |
+
return False
|
456 |
+
|
457 |
+
@quote_char.setter
|
458 |
+
def quote_char(self, value):
|
459 |
+
if value is False:
|
460 |
+
deref(self.options).quoting = False
|
461 |
+
else:
|
462 |
+
deref(self.options).quote_char = _single_char(value)
|
463 |
+
deref(self.options).quoting = True
|
464 |
+
|
465 |
+
@property
|
466 |
+
def double_quote(self):
|
467 |
+
"""
|
468 |
+
Whether two quotes in a quoted CSV value denote a single quote
|
469 |
+
in the data.
|
470 |
+
"""
|
471 |
+
return deref(self.options).double_quote
|
472 |
+
|
473 |
+
@double_quote.setter
|
474 |
+
def double_quote(self, value):
|
475 |
+
deref(self.options).double_quote = value
|
476 |
+
|
477 |
+
@property
|
478 |
+
def escape_char(self):
|
479 |
+
"""
|
480 |
+
The character used optionally for escaping special characters
|
481 |
+
(False if escaping is not allowed).
|
482 |
+
"""
|
483 |
+
if deref(self.options).escaping:
|
484 |
+
return chr(deref(self.options).escape_char)
|
485 |
+
else:
|
486 |
+
return False
|
487 |
+
|
488 |
+
@escape_char.setter
|
489 |
+
def escape_char(self, value):
|
490 |
+
if value is False:
|
491 |
+
deref(self.options).escaping = False
|
492 |
+
else:
|
493 |
+
deref(self.options).escape_char = _single_char(value)
|
494 |
+
deref(self.options).escaping = True
|
495 |
+
|
496 |
+
@property
|
497 |
+
def newlines_in_values(self):
|
498 |
+
"""
|
499 |
+
Whether newline characters are allowed in CSV values.
|
500 |
+
Setting this to True reduces the performance of multi-threaded
|
501 |
+
CSV reading.
|
502 |
+
"""
|
503 |
+
return deref(self.options).newlines_in_values
|
504 |
+
|
505 |
+
@newlines_in_values.setter
|
506 |
+
def newlines_in_values(self, value):
|
507 |
+
deref(self.options).newlines_in_values = value
|
508 |
+
|
509 |
+
@property
|
510 |
+
def ignore_empty_lines(self):
|
511 |
+
"""
|
512 |
+
Whether empty lines are ignored in CSV input.
|
513 |
+
If False, an empty line is interpreted as containing a single empty
|
514 |
+
value (assuming a one-column CSV file).
|
515 |
+
"""
|
516 |
+
return deref(self.options).ignore_empty_lines
|
517 |
+
|
518 |
+
@property
|
519 |
+
def invalid_row_handler(self):
|
520 |
+
"""
|
521 |
+
Optional handler for invalid rows.
|
522 |
+
|
523 |
+
If not None, this object is called for each CSV row that fails
|
524 |
+
parsing (because of a mismatching number of columns).
|
525 |
+
It should accept a single InvalidRow argument and return either
|
526 |
+
"skip" or "error" depending on the desired outcome.
|
527 |
+
"""
|
528 |
+
return self._invalid_row_handler
|
529 |
+
|
530 |
+
@invalid_row_handler.setter
|
531 |
+
def invalid_row_handler(self, value):
|
532 |
+
if value is not None and not callable(value):
|
533 |
+
raise TypeError("Expected callable or None, "
|
534 |
+
f"got instance of {type(value)!r}")
|
535 |
+
self._invalid_row_handler = value
|
536 |
+
deref(self.options).invalid_row_handler = MakeInvalidRowHandler(
|
537 |
+
<function[PyInvalidRowCallback]> &_handle_invalid_row, value)
|
538 |
+
|
539 |
+
@ignore_empty_lines.setter
|
540 |
+
def ignore_empty_lines(self, value):
|
541 |
+
deref(self.options).ignore_empty_lines = value
|
542 |
+
|
543 |
+
def validate(self):
|
544 |
+
check_status(deref(self.options).Validate())
|
545 |
+
|
546 |
+
def equals(self, ParseOptions other):
|
547 |
+
"""
|
548 |
+
Parameters
|
549 |
+
----------
|
550 |
+
other : pyarrow.csv.ParseOptions
|
551 |
+
|
552 |
+
Returns
|
553 |
+
-------
|
554 |
+
bool
|
555 |
+
"""
|
556 |
+
return (
|
557 |
+
self.delimiter == other.delimiter and
|
558 |
+
self.quote_char == other.quote_char and
|
559 |
+
self.double_quote == other.double_quote and
|
560 |
+
self.escape_char == other.escape_char and
|
561 |
+
self.newlines_in_values == other.newlines_in_values and
|
562 |
+
self.ignore_empty_lines == other.ignore_empty_lines and
|
563 |
+
self._invalid_row_handler == other._invalid_row_handler
|
564 |
+
)
|
565 |
+
|
566 |
+
@staticmethod
|
567 |
+
cdef ParseOptions wrap(CCSVParseOptions options):
|
568 |
+
out = ParseOptions()
|
569 |
+
out.options.reset(new CCSVParseOptions(move(options)))
|
570 |
+
return out
|
571 |
+
|
572 |
+
def __getstate__(self):
|
573 |
+
return (self.delimiter, self.quote_char, self.double_quote,
|
574 |
+
self.escape_char, self.newlines_in_values,
|
575 |
+
self.ignore_empty_lines, self.invalid_row_handler)
|
576 |
+
|
577 |
+
def __setstate__(self, state):
|
578 |
+
(self.delimiter, self.quote_char, self.double_quote,
|
579 |
+
self.escape_char, self.newlines_in_values,
|
580 |
+
self.ignore_empty_lines, self.invalid_row_handler) = state
|
581 |
+
|
582 |
+
def __eq__(self, other):
|
583 |
+
try:
|
584 |
+
return self.equals(other)
|
585 |
+
except TypeError:
|
586 |
+
return False
|
587 |
+
|
588 |
+
|
589 |
+
cdef class _ISO8601(_Weakrefable):
|
590 |
+
"""
|
591 |
+
A special object indicating ISO-8601 parsing.
|
592 |
+
"""
|
593 |
+
__slots__ = ()
|
594 |
+
|
595 |
+
def __str__(self):
|
596 |
+
return 'ISO8601'
|
597 |
+
|
598 |
+
def __eq__(self, other):
|
599 |
+
return isinstance(other, _ISO8601)
|
600 |
+
|
601 |
+
|
602 |
+
ISO8601 = _ISO8601()
|
603 |
+
|
604 |
+
|
605 |
+
cdef class ConvertOptions(_Weakrefable):
|
606 |
+
"""
|
607 |
+
Options for converting CSV data.
|
608 |
+
|
609 |
+
Parameters
|
610 |
+
----------
|
611 |
+
check_utf8 : bool, optional (default True)
|
612 |
+
Whether to check UTF8 validity of string columns.
|
613 |
+
column_types : pyarrow.Schema or dict, optional
|
614 |
+
Explicitly map column names to column types. Passing this argument
|
615 |
+
disables type inference on the defined columns.
|
616 |
+
null_values : list, optional
|
617 |
+
A sequence of strings that denote nulls in the data
|
618 |
+
(defaults are appropriate in most cases). Note that by default,
|
619 |
+
string columns are not checked for null values. To enable
|
620 |
+
null checking for those, specify ``strings_can_be_null=True``.
|
621 |
+
true_values : list, optional
|
622 |
+
A sequence of strings that denote true booleans in the data
|
623 |
+
(defaults are appropriate in most cases).
|
624 |
+
false_values : list, optional
|
625 |
+
A sequence of strings that denote false booleans in the data
|
626 |
+
(defaults are appropriate in most cases).
|
627 |
+
decimal_point : 1-character string, optional (default '.')
|
628 |
+
The character used as decimal point in floating-point and decimal
|
629 |
+
data.
|
630 |
+
strings_can_be_null : bool, optional (default False)
|
631 |
+
Whether string / binary columns can have null values.
|
632 |
+
If true, then strings in null_values are considered null for
|
633 |
+
string columns.
|
634 |
+
If false, then all strings are valid string values.
|
635 |
+
quoted_strings_can_be_null : bool, optional (default True)
|
636 |
+
Whether quoted values can be null.
|
637 |
+
If true, then strings in "null_values" are also considered null
|
638 |
+
when they appear quoted in the CSV file. Otherwise, quoted values
|
639 |
+
are never considered null.
|
640 |
+
include_columns : list, optional
|
641 |
+
The names of columns to include in the Table.
|
642 |
+
If empty, the Table will include all columns from the CSV file.
|
643 |
+
If not empty, only these columns will be included, in this order.
|
644 |
+
include_missing_columns : bool, optional (default False)
|
645 |
+
If false, columns in `include_columns` but not in the CSV file will
|
646 |
+
error out.
|
647 |
+
If true, columns in `include_columns` but not in the CSV file will
|
648 |
+
produce a column of nulls (whose type is selected using
|
649 |
+
`column_types`, or null by default).
|
650 |
+
This option is ignored if `include_columns` is empty.
|
651 |
+
auto_dict_encode : bool, optional (default False)
|
652 |
+
Whether to try to automatically dict-encode string / binary data.
|
653 |
+
If true, then when type inference detects a string or binary column,
|
654 |
+
it it dict-encoded up to `auto_dict_max_cardinality` distinct values
|
655 |
+
(per chunk), after which it switches to regular encoding.
|
656 |
+
This setting is ignored for non-inferred columns (those in
|
657 |
+
`column_types`).
|
658 |
+
auto_dict_max_cardinality : int, optional
|
659 |
+
The maximum dictionary cardinality for `auto_dict_encode`.
|
660 |
+
This value is per chunk.
|
661 |
+
timestamp_parsers : list, optional
|
662 |
+
A sequence of strptime()-compatible format strings, tried in order
|
663 |
+
when attempting to infer or convert timestamp values (the special
|
664 |
+
value ISO8601() can also be given). By default, a fast built-in
|
665 |
+
ISO-8601 parser is used.
|
666 |
+
|
667 |
+
Examples
|
668 |
+
--------
|
669 |
+
|
670 |
+
Defining an example data:
|
671 |
+
|
672 |
+
>>> import io
|
673 |
+
>>> s = (
|
674 |
+
... "animals,n_legs,entry,fast\\n"
|
675 |
+
... "Flamingo,2,01/03/2022,Yes\\n"
|
676 |
+
... "Horse,4,02/03/2022,Yes\\n"
|
677 |
+
... "Brittle stars,5,03/03/2022,No\\n"
|
678 |
+
... "Centipede,100,04/03/2022,No\\n"
|
679 |
+
... ",6,05/03/2022,"
|
680 |
+
... )
|
681 |
+
>>> print(s)
|
682 |
+
animals,n_legs,entry,fast
|
683 |
+
Flamingo,2,01/03/2022,Yes
|
684 |
+
Horse,4,02/03/2022,Yes
|
685 |
+
Brittle stars,5,03/03/2022,No
|
686 |
+
Centipede,100,04/03/2022,No
|
687 |
+
,6,05/03/2022,
|
688 |
+
|
689 |
+
Change the type of a column:
|
690 |
+
|
691 |
+
>>> import pyarrow as pa
|
692 |
+
>>> from pyarrow import csv
|
693 |
+
>>> convert_options = csv.ConvertOptions(column_types={"n_legs": pa.float64()})
|
694 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
695 |
+
pyarrow.Table
|
696 |
+
animals: string
|
697 |
+
n_legs: double
|
698 |
+
entry: string
|
699 |
+
fast: string
|
700 |
+
----
|
701 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
702 |
+
n_legs: [[2,4,5,100,6]]
|
703 |
+
entry: [["01/03/2022","02/03/2022","03/03/2022","04/03/2022","05/03/2022"]]
|
704 |
+
fast: [["Yes","Yes","No","No",""]]
|
705 |
+
|
706 |
+
Define a date parsing format to get a timestamp type column
|
707 |
+
(in case dates are not in ISO format and not converted by default):
|
708 |
+
|
709 |
+
>>> convert_options = csv.ConvertOptions(
|
710 |
+
... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"])
|
711 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
712 |
+
pyarrow.Table
|
713 |
+
animals: string
|
714 |
+
n_legs: int64
|
715 |
+
entry: timestamp[s]
|
716 |
+
fast: string
|
717 |
+
----
|
718 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
719 |
+
n_legs: [[2,4,5,100,6]]
|
720 |
+
entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]]
|
721 |
+
fast: [["Yes","Yes","No","No",""]]
|
722 |
+
|
723 |
+
Specify a subset of columns to be read:
|
724 |
+
|
725 |
+
>>> convert_options = csv.ConvertOptions(
|
726 |
+
... include_columns=["animals", "n_legs"])
|
727 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
728 |
+
pyarrow.Table
|
729 |
+
animals: string
|
730 |
+
n_legs: int64
|
731 |
+
----
|
732 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
733 |
+
n_legs: [[2,4,5,100,6]]
|
734 |
+
|
735 |
+
List additional column to be included as a null typed column:
|
736 |
+
|
737 |
+
>>> convert_options = csv.ConvertOptions(
|
738 |
+
... include_columns=["animals", "n_legs", "location"],
|
739 |
+
... include_missing_columns=True)
|
740 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
741 |
+
pyarrow.Table
|
742 |
+
animals: string
|
743 |
+
n_legs: int64
|
744 |
+
location: null
|
745 |
+
----
|
746 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
747 |
+
n_legs: [[2,4,5,100,6]]
|
748 |
+
location: [5 nulls]
|
749 |
+
|
750 |
+
Define columns as dictionary type (by default only the
|
751 |
+
string/binary columns are dictionary encoded):
|
752 |
+
|
753 |
+
>>> convert_options = csv.ConvertOptions(
|
754 |
+
... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"],
|
755 |
+
... auto_dict_encode=True)
|
756 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
757 |
+
pyarrow.Table
|
758 |
+
animals: dictionary<values=string, indices=int32, ordered=0>
|
759 |
+
n_legs: int64
|
760 |
+
entry: timestamp[s]
|
761 |
+
fast: dictionary<values=string, indices=int32, ordered=0>
|
762 |
+
----
|
763 |
+
animals: [ -- dictionary:
|
764 |
+
["Flamingo","Horse","Brittle stars","Centipede",""] -- indices:
|
765 |
+
[0,1,2,3,4]]
|
766 |
+
n_legs: [[2,4,5,100,6]]
|
767 |
+
entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]]
|
768 |
+
fast: [ -- dictionary:
|
769 |
+
["Yes","No",""] -- indices:
|
770 |
+
[0,0,1,1,2]]
|
771 |
+
|
772 |
+
Set upper limit for the number of categories. If the categories
|
773 |
+
is more than the limit, the conversion to dictionary will not
|
774 |
+
happen:
|
775 |
+
|
776 |
+
>>> convert_options = csv.ConvertOptions(
|
777 |
+
... include_columns=["animals"],
|
778 |
+
... auto_dict_encode=True,
|
779 |
+
... auto_dict_max_cardinality=2)
|
780 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
781 |
+
pyarrow.Table
|
782 |
+
animals: string
|
783 |
+
----
|
784 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
785 |
+
|
786 |
+
Set empty strings to missing values:
|
787 |
+
|
788 |
+
>>> convert_options = csv.ConvertOptions(include_columns=["animals", "n_legs"],
|
789 |
+
... strings_can_be_null=True)
|
790 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
791 |
+
pyarrow.Table
|
792 |
+
animals: string
|
793 |
+
n_legs: int64
|
794 |
+
----
|
795 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",null]]
|
796 |
+
n_legs: [[2,4,5,100,6]]
|
797 |
+
|
798 |
+
Define values to be True and False when converting a column
|
799 |
+
into a bool type:
|
800 |
+
|
801 |
+
>>> convert_options = csv.ConvertOptions(
|
802 |
+
... include_columns=["fast"],
|
803 |
+
... false_values=["No"],
|
804 |
+
... true_values=["Yes"])
|
805 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
806 |
+
pyarrow.Table
|
807 |
+
fast: bool
|
808 |
+
----
|
809 |
+
fast: [[true,true,false,false,null]]
|
810 |
+
"""
|
811 |
+
|
812 |
+
# Avoid mistakingly creating attributes
|
813 |
+
__slots__ = ()
|
814 |
+
|
815 |
+
def __cinit__(self, *argw, **kwargs):
|
816 |
+
self.options.reset(
|
817 |
+
new CCSVConvertOptions(CCSVConvertOptions.Defaults()))
|
818 |
+
|
819 |
+
def __init__(self, *, check_utf8=None, column_types=None, null_values=None,
|
820 |
+
true_values=None, false_values=None, decimal_point=None,
|
821 |
+
strings_can_be_null=None, quoted_strings_can_be_null=None,
|
822 |
+
include_columns=None, include_missing_columns=None,
|
823 |
+
auto_dict_encode=None, auto_dict_max_cardinality=None,
|
824 |
+
timestamp_parsers=None):
|
825 |
+
if check_utf8 is not None:
|
826 |
+
self.check_utf8 = check_utf8
|
827 |
+
if column_types is not None:
|
828 |
+
self.column_types = column_types
|
829 |
+
if null_values is not None:
|
830 |
+
self.null_values = null_values
|
831 |
+
if true_values is not None:
|
832 |
+
self.true_values = true_values
|
833 |
+
if false_values is not None:
|
834 |
+
self.false_values = false_values
|
835 |
+
if decimal_point is not None:
|
836 |
+
self.decimal_point = decimal_point
|
837 |
+
if strings_can_be_null is not None:
|
838 |
+
self.strings_can_be_null = strings_can_be_null
|
839 |
+
if quoted_strings_can_be_null is not None:
|
840 |
+
self.quoted_strings_can_be_null = quoted_strings_can_be_null
|
841 |
+
if include_columns is not None:
|
842 |
+
self.include_columns = include_columns
|
843 |
+
if include_missing_columns is not None:
|
844 |
+
self.include_missing_columns = include_missing_columns
|
845 |
+
if auto_dict_encode is not None:
|
846 |
+
self.auto_dict_encode = auto_dict_encode
|
847 |
+
if auto_dict_max_cardinality is not None:
|
848 |
+
self.auto_dict_max_cardinality = auto_dict_max_cardinality
|
849 |
+
if timestamp_parsers is not None:
|
850 |
+
self.timestamp_parsers = timestamp_parsers
|
851 |
+
|
852 |
+
@property
|
853 |
+
def check_utf8(self):
|
854 |
+
"""
|
855 |
+
Whether to check UTF8 validity of string columns.
|
856 |
+
"""
|
857 |
+
return deref(self.options).check_utf8
|
858 |
+
|
859 |
+
@check_utf8.setter
|
860 |
+
def check_utf8(self, value):
|
861 |
+
deref(self.options).check_utf8 = value
|
862 |
+
|
863 |
+
@property
|
864 |
+
def strings_can_be_null(self):
|
865 |
+
"""
|
866 |
+
Whether string / binary columns can have null values.
|
867 |
+
"""
|
868 |
+
return deref(self.options).strings_can_be_null
|
869 |
+
|
870 |
+
@strings_can_be_null.setter
|
871 |
+
def strings_can_be_null(self, value):
|
872 |
+
deref(self.options).strings_can_be_null = value
|
873 |
+
|
874 |
+
@property
|
875 |
+
def quoted_strings_can_be_null(self):
|
876 |
+
"""
|
877 |
+
Whether quoted values can be null.
|
878 |
+
"""
|
879 |
+
return deref(self.options).quoted_strings_can_be_null
|
880 |
+
|
881 |
+
@quoted_strings_can_be_null.setter
|
882 |
+
def quoted_strings_can_be_null(self, value):
|
883 |
+
deref(self.options).quoted_strings_can_be_null = value
|
884 |
+
|
885 |
+
@property
|
886 |
+
def column_types(self):
|
887 |
+
"""
|
888 |
+
Explicitly map column names to column types.
|
889 |
+
"""
|
890 |
+
d = {frombytes(item.first): pyarrow_wrap_data_type(item.second)
|
891 |
+
for item in deref(self.options).column_types}
|
892 |
+
return d
|
893 |
+
|
894 |
+
@column_types.setter
|
895 |
+
def column_types(self, value):
|
896 |
+
cdef:
|
897 |
+
shared_ptr[CDataType] typ
|
898 |
+
|
899 |
+
if isinstance(value, Mapping):
|
900 |
+
value = value.items()
|
901 |
+
|
902 |
+
deref(self.options).column_types.clear()
|
903 |
+
for item in value:
|
904 |
+
if isinstance(item, Field):
|
905 |
+
k = item.name
|
906 |
+
v = item.type
|
907 |
+
else:
|
908 |
+
k, v = item
|
909 |
+
typ = pyarrow_unwrap_data_type(ensure_type(v))
|
910 |
+
assert typ != NULL
|
911 |
+
deref(self.options).column_types[tobytes(k)] = typ
|
912 |
+
|
913 |
+
@property
|
914 |
+
def null_values(self):
|
915 |
+
"""
|
916 |
+
A sequence of strings that denote nulls in the data.
|
917 |
+
"""
|
918 |
+
return [frombytes(x) for x in deref(self.options).null_values]
|
919 |
+
|
920 |
+
@null_values.setter
|
921 |
+
def null_values(self, value):
|
922 |
+
deref(self.options).null_values = [tobytes(x) for x in value]
|
923 |
+
|
924 |
+
@property
|
925 |
+
def true_values(self):
|
926 |
+
"""
|
927 |
+
A sequence of strings that denote true booleans in the data.
|
928 |
+
"""
|
929 |
+
return [frombytes(x) for x in deref(self.options).true_values]
|
930 |
+
|
931 |
+
@true_values.setter
|
932 |
+
def true_values(self, value):
|
933 |
+
deref(self.options).true_values = [tobytes(x) for x in value]
|
934 |
+
|
935 |
+
@property
|
936 |
+
def false_values(self):
|
937 |
+
"""
|
938 |
+
A sequence of strings that denote false booleans in the data.
|
939 |
+
"""
|
940 |
+
return [frombytes(x) for x in deref(self.options).false_values]
|
941 |
+
|
942 |
+
@false_values.setter
|
943 |
+
def false_values(self, value):
|
944 |
+
deref(self.options).false_values = [tobytes(x) for x in value]
|
945 |
+
|
946 |
+
@property
|
947 |
+
def decimal_point(self):
|
948 |
+
"""
|
949 |
+
The character used as decimal point in floating-point and decimal
|
950 |
+
data.
|
951 |
+
"""
|
952 |
+
return chr(deref(self.options).decimal_point)
|
953 |
+
|
954 |
+
@decimal_point.setter
|
955 |
+
def decimal_point(self, value):
|
956 |
+
deref(self.options).decimal_point = _single_char(value)
|
957 |
+
|
958 |
+
@property
|
959 |
+
def auto_dict_encode(self):
|
960 |
+
"""
|
961 |
+
Whether to try to automatically dict-encode string / binary data.
|
962 |
+
"""
|
963 |
+
return deref(self.options).auto_dict_encode
|
964 |
+
|
965 |
+
@auto_dict_encode.setter
|
966 |
+
def auto_dict_encode(self, value):
|
967 |
+
deref(self.options).auto_dict_encode = value
|
968 |
+
|
969 |
+
@property
|
970 |
+
def auto_dict_max_cardinality(self):
|
971 |
+
"""
|
972 |
+
The maximum dictionary cardinality for `auto_dict_encode`.
|
973 |
+
|
974 |
+
This value is per chunk.
|
975 |
+
"""
|
976 |
+
return deref(self.options).auto_dict_max_cardinality
|
977 |
+
|
978 |
+
@auto_dict_max_cardinality.setter
|
979 |
+
def auto_dict_max_cardinality(self, value):
|
980 |
+
deref(self.options).auto_dict_max_cardinality = value
|
981 |
+
|
982 |
+
@property
|
983 |
+
def include_columns(self):
|
984 |
+
"""
|
985 |
+
The names of columns to include in the Table.
|
986 |
+
|
987 |
+
If empty, the Table will include all columns from the CSV file.
|
988 |
+
If not empty, only these columns will be included, in this order.
|
989 |
+
"""
|
990 |
+
return [frombytes(s) for s in deref(self.options).include_columns]
|
991 |
+
|
992 |
+
@include_columns.setter
|
993 |
+
def include_columns(self, value):
|
994 |
+
deref(self.options).include_columns.clear()
|
995 |
+
for item in value:
|
996 |
+
deref(self.options).include_columns.push_back(tobytes(item))
|
997 |
+
|
998 |
+
@property
|
999 |
+
def include_missing_columns(self):
|
1000 |
+
"""
|
1001 |
+
If false, columns in `include_columns` but not in the CSV file will
|
1002 |
+
error out.
|
1003 |
+
If true, columns in `include_columns` but not in the CSV file will
|
1004 |
+
produce a null column (whose type is selected using `column_types`,
|
1005 |
+
or null by default).
|
1006 |
+
This option is ignored if `include_columns` is empty.
|
1007 |
+
"""
|
1008 |
+
return deref(self.options).include_missing_columns
|
1009 |
+
|
1010 |
+
@include_missing_columns.setter
|
1011 |
+
def include_missing_columns(self, value):
|
1012 |
+
deref(self.options).include_missing_columns = value
|
1013 |
+
|
1014 |
+
@property
|
1015 |
+
def timestamp_parsers(self):
|
1016 |
+
"""
|
1017 |
+
A sequence of strptime()-compatible format strings, tried in order
|
1018 |
+
when attempting to infer or convert timestamp values (the special
|
1019 |
+
value ISO8601() can also be given). By default, a fast built-in
|
1020 |
+
ISO-8601 parser is used.
|
1021 |
+
"""
|
1022 |
+
cdef:
|
1023 |
+
shared_ptr[CTimestampParser] c_parser
|
1024 |
+
c_string kind
|
1025 |
+
|
1026 |
+
parsers = []
|
1027 |
+
for c_parser in deref(self.options).timestamp_parsers:
|
1028 |
+
kind = deref(c_parser).kind()
|
1029 |
+
if kind == b'strptime':
|
1030 |
+
parsers.append(frombytes(deref(c_parser).format()))
|
1031 |
+
else:
|
1032 |
+
assert kind == b'iso8601'
|
1033 |
+
parsers.append(ISO8601)
|
1034 |
+
|
1035 |
+
return parsers
|
1036 |
+
|
1037 |
+
@timestamp_parsers.setter
|
1038 |
+
def timestamp_parsers(self, value):
|
1039 |
+
cdef:
|
1040 |
+
vector[shared_ptr[CTimestampParser]] c_parsers
|
1041 |
+
|
1042 |
+
for v in value:
|
1043 |
+
if isinstance(v, str):
|
1044 |
+
c_parsers.push_back(CTimestampParser.MakeStrptime(tobytes(v)))
|
1045 |
+
elif v == ISO8601:
|
1046 |
+
c_parsers.push_back(CTimestampParser.MakeISO8601())
|
1047 |
+
else:
|
1048 |
+
raise TypeError("Expected list of str or ISO8601 objects")
|
1049 |
+
|
1050 |
+
deref(self.options).timestamp_parsers = move(c_parsers)
|
1051 |
+
|
1052 |
+
@staticmethod
|
1053 |
+
cdef ConvertOptions wrap(CCSVConvertOptions options):
|
1054 |
+
out = ConvertOptions()
|
1055 |
+
out.options.reset(new CCSVConvertOptions(move(options)))
|
1056 |
+
return out
|
1057 |
+
|
1058 |
+
def validate(self):
|
1059 |
+
check_status(deref(self.options).Validate())
|
1060 |
+
|
1061 |
+
def equals(self, ConvertOptions other):
|
1062 |
+
"""
|
1063 |
+
Parameters
|
1064 |
+
----------
|
1065 |
+
other : pyarrow.csv.ConvertOptions
|
1066 |
+
|
1067 |
+
Returns
|
1068 |
+
-------
|
1069 |
+
bool
|
1070 |
+
"""
|
1071 |
+
return (
|
1072 |
+
self.check_utf8 == other.check_utf8 and
|
1073 |
+
self.column_types == other.column_types and
|
1074 |
+
self.null_values == other.null_values and
|
1075 |
+
self.true_values == other.true_values and
|
1076 |
+
self.false_values == other.false_values and
|
1077 |
+
self.decimal_point == other.decimal_point and
|
1078 |
+
self.timestamp_parsers == other.timestamp_parsers and
|
1079 |
+
self.strings_can_be_null == other.strings_can_be_null and
|
1080 |
+
self.quoted_strings_can_be_null ==
|
1081 |
+
other.quoted_strings_can_be_null and
|
1082 |
+
self.auto_dict_encode == other.auto_dict_encode and
|
1083 |
+
self.auto_dict_max_cardinality ==
|
1084 |
+
other.auto_dict_max_cardinality and
|
1085 |
+
self.include_columns == other.include_columns and
|
1086 |
+
self.include_missing_columns == other.include_missing_columns
|
1087 |
+
)
|
1088 |
+
|
1089 |
+
def __getstate__(self):
|
1090 |
+
return (self.check_utf8, self.column_types, self.null_values,
|
1091 |
+
self.true_values, self.false_values, self.decimal_point,
|
1092 |
+
self.timestamp_parsers, self.strings_can_be_null,
|
1093 |
+
self.quoted_strings_can_be_null, self.auto_dict_encode,
|
1094 |
+
self.auto_dict_max_cardinality, self.include_columns,
|
1095 |
+
self.include_missing_columns)
|
1096 |
+
|
1097 |
+
def __setstate__(self, state):
|
1098 |
+
(self.check_utf8, self.column_types, self.null_values,
|
1099 |
+
self.true_values, self.false_values, self.decimal_point,
|
1100 |
+
self.timestamp_parsers, self.strings_can_be_null,
|
1101 |
+
self.quoted_strings_can_be_null, self.auto_dict_encode,
|
1102 |
+
self.auto_dict_max_cardinality, self.include_columns,
|
1103 |
+
self.include_missing_columns) = state
|
1104 |
+
|
1105 |
+
def __eq__(self, other):
|
1106 |
+
try:
|
1107 |
+
return self.equals(other)
|
1108 |
+
except TypeError:
|
1109 |
+
return False
|
1110 |
+
|
1111 |
+
|
1112 |
+
cdef _get_reader(input_file, ReadOptions read_options,
|
1113 |
+
shared_ptr[CInputStream]* out):
|
1114 |
+
use_memory_map = False
|
1115 |
+
get_input_stream(input_file, use_memory_map, out)
|
1116 |
+
if read_options is not None:
|
1117 |
+
out[0] = native_transcoding_input_stream(out[0],
|
1118 |
+
read_options.encoding,
|
1119 |
+
'utf8')
|
1120 |
+
|
1121 |
+
|
1122 |
+
cdef _get_read_options(ReadOptions read_options, CCSVReadOptions* out):
|
1123 |
+
if read_options is None:
|
1124 |
+
out[0] = CCSVReadOptions.Defaults()
|
1125 |
+
else:
|
1126 |
+
out[0] = deref(read_options.options)
|
1127 |
+
|
1128 |
+
|
1129 |
+
cdef _get_parse_options(ParseOptions parse_options, CCSVParseOptions* out):
|
1130 |
+
if parse_options is None:
|
1131 |
+
out[0] = CCSVParseOptions.Defaults()
|
1132 |
+
else:
|
1133 |
+
out[0] = deref(parse_options.options)
|
1134 |
+
|
1135 |
+
|
1136 |
+
cdef _get_convert_options(ConvertOptions convert_options,
|
1137 |
+
CCSVConvertOptions* out):
|
1138 |
+
if convert_options is None:
|
1139 |
+
out[0] = CCSVConvertOptions.Defaults()
|
1140 |
+
else:
|
1141 |
+
out[0] = deref(convert_options.options)
|
1142 |
+
|
1143 |
+
|
1144 |
+
cdef class CSVStreamingReader(RecordBatchReader):
|
1145 |
+
"""An object that reads record batches incrementally from a CSV file.
|
1146 |
+
|
1147 |
+
Should not be instantiated directly by user code.
|
1148 |
+
"""
|
1149 |
+
cdef readonly:
|
1150 |
+
Schema schema
|
1151 |
+
|
1152 |
+
def __init__(self):
|
1153 |
+
raise TypeError("Do not call {}'s constructor directly, "
|
1154 |
+
"use pyarrow.csv.open_csv() instead."
|
1155 |
+
.format(self.__class__.__name__))
|
1156 |
+
|
1157 |
+
# Note about cancellation: we cannot create a SignalStopHandler
|
1158 |
+
# by default here, as several CSVStreamingReader instances may be
|
1159 |
+
# created (including by the same thread). Handling cancellation
|
1160 |
+
# would require having the user pass the SignalStopHandler.
|
1161 |
+
# (in addition to solving ARROW-11853)
|
1162 |
+
|
1163 |
+
cdef _open(self, shared_ptr[CInputStream] stream,
|
1164 |
+
CCSVReadOptions c_read_options,
|
1165 |
+
CCSVParseOptions c_parse_options,
|
1166 |
+
CCSVConvertOptions c_convert_options,
|
1167 |
+
MemoryPool memory_pool):
|
1168 |
+
cdef:
|
1169 |
+
shared_ptr[CSchema] c_schema
|
1170 |
+
CIOContext io_context
|
1171 |
+
|
1172 |
+
io_context = CIOContext(maybe_unbox_memory_pool(memory_pool))
|
1173 |
+
|
1174 |
+
with nogil:
|
1175 |
+
self.reader = <shared_ptr[CRecordBatchReader]> GetResultValue(
|
1176 |
+
CCSVStreamingReader.Make(
|
1177 |
+
io_context, stream,
|
1178 |
+
move(c_read_options), move(c_parse_options),
|
1179 |
+
move(c_convert_options)))
|
1180 |
+
c_schema = self.reader.get().schema()
|
1181 |
+
|
1182 |
+
self.schema = pyarrow_wrap_schema(c_schema)
|
1183 |
+
|
1184 |
+
|
1185 |
+
def read_csv(input_file, read_options=None, parse_options=None,
|
1186 |
+
convert_options=None, MemoryPool memory_pool=None):
|
1187 |
+
"""
|
1188 |
+
Read a Table from a stream of CSV data.
|
1189 |
+
|
1190 |
+
Parameters
|
1191 |
+
----------
|
1192 |
+
input_file : string, path or file-like object
|
1193 |
+
The location of CSV data. If a string or path, and if it ends
|
1194 |
+
with a recognized compressed file extension (e.g. ".gz" or ".bz2"),
|
1195 |
+
the data is automatically decompressed when reading.
|
1196 |
+
read_options : pyarrow.csv.ReadOptions, optional
|
1197 |
+
Options for the CSV reader (see pyarrow.csv.ReadOptions constructor
|
1198 |
+
for defaults)
|
1199 |
+
parse_options : pyarrow.csv.ParseOptions, optional
|
1200 |
+
Options for the CSV parser
|
1201 |
+
(see pyarrow.csv.ParseOptions constructor for defaults)
|
1202 |
+
convert_options : pyarrow.csv.ConvertOptions, optional
|
1203 |
+
Options for converting CSV data
|
1204 |
+
(see pyarrow.csv.ConvertOptions constructor for defaults)
|
1205 |
+
memory_pool : MemoryPool, optional
|
1206 |
+
Pool to allocate Table memory from
|
1207 |
+
|
1208 |
+
Returns
|
1209 |
+
-------
|
1210 |
+
:class:`pyarrow.Table`
|
1211 |
+
Contents of the CSV file as a in-memory table.
|
1212 |
+
|
1213 |
+
Examples
|
1214 |
+
--------
|
1215 |
+
|
1216 |
+
Defining an example file from bytes object:
|
1217 |
+
|
1218 |
+
>>> import io
|
1219 |
+
>>> s = (
|
1220 |
+
... "animals,n_legs,entry\\n"
|
1221 |
+
... "Flamingo,2,2022-03-01\\n"
|
1222 |
+
... "Horse,4,2022-03-02\\n"
|
1223 |
+
... "Brittle stars,5,2022-03-03\\n"
|
1224 |
+
... "Centipede,100,2022-03-04"
|
1225 |
+
... )
|
1226 |
+
>>> print(s)
|
1227 |
+
animals,n_legs,entry
|
1228 |
+
Flamingo,2,2022-03-01
|
1229 |
+
Horse,4,2022-03-02
|
1230 |
+
Brittle stars,5,2022-03-03
|
1231 |
+
Centipede,100,2022-03-04
|
1232 |
+
>>> source = io.BytesIO(s.encode())
|
1233 |
+
|
1234 |
+
Reading from the file
|
1235 |
+
|
1236 |
+
>>> from pyarrow import csv
|
1237 |
+
>>> csv.read_csv(source)
|
1238 |
+
pyarrow.Table
|
1239 |
+
animals: string
|
1240 |
+
n_legs: int64
|
1241 |
+
entry: date32[day]
|
1242 |
+
----
|
1243 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
1244 |
+
n_legs: [[2,4,5,100]]
|
1245 |
+
entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
1246 |
+
"""
|
1247 |
+
cdef:
|
1248 |
+
shared_ptr[CInputStream] stream
|
1249 |
+
CCSVReadOptions c_read_options
|
1250 |
+
CCSVParseOptions c_parse_options
|
1251 |
+
CCSVConvertOptions c_convert_options
|
1252 |
+
CIOContext io_context
|
1253 |
+
SharedPtrNoGIL[CCSVReader] reader
|
1254 |
+
shared_ptr[CTable] table
|
1255 |
+
|
1256 |
+
_get_reader(input_file, read_options, &stream)
|
1257 |
+
_get_read_options(read_options, &c_read_options)
|
1258 |
+
_get_parse_options(parse_options, &c_parse_options)
|
1259 |
+
_get_convert_options(convert_options, &c_convert_options)
|
1260 |
+
|
1261 |
+
with SignalStopHandler() as stop_handler:
|
1262 |
+
io_context = CIOContext(
|
1263 |
+
maybe_unbox_memory_pool(memory_pool),
|
1264 |
+
(<StopToken> stop_handler.stop_token).stop_token)
|
1265 |
+
reader = GetResultValue(CCSVReader.Make(
|
1266 |
+
io_context, stream,
|
1267 |
+
c_read_options, c_parse_options, c_convert_options))
|
1268 |
+
|
1269 |
+
with nogil:
|
1270 |
+
table = GetResultValue(reader.get().Read())
|
1271 |
+
|
1272 |
+
return pyarrow_wrap_table(table)
|
1273 |
+
|
1274 |
+
|
1275 |
+
def open_csv(input_file, read_options=None, parse_options=None,
|
1276 |
+
convert_options=None, MemoryPool memory_pool=None):
|
1277 |
+
"""
|
1278 |
+
Open a streaming reader of CSV data.
|
1279 |
+
|
1280 |
+
Reading using this function is always single-threaded.
|
1281 |
+
|
1282 |
+
Parameters
|
1283 |
+
----------
|
1284 |
+
input_file : string, path or file-like object
|
1285 |
+
The location of CSV data. If a string or path, and if it ends
|
1286 |
+
with a recognized compressed file extension (e.g. ".gz" or ".bz2"),
|
1287 |
+
the data is automatically decompressed when reading.
|
1288 |
+
read_options : pyarrow.csv.ReadOptions, optional
|
1289 |
+
Options for the CSV reader (see pyarrow.csv.ReadOptions constructor
|
1290 |
+
for defaults)
|
1291 |
+
parse_options : pyarrow.csv.ParseOptions, optional
|
1292 |
+
Options for the CSV parser
|
1293 |
+
(see pyarrow.csv.ParseOptions constructor for defaults)
|
1294 |
+
convert_options : pyarrow.csv.ConvertOptions, optional
|
1295 |
+
Options for converting CSV data
|
1296 |
+
(see pyarrow.csv.ConvertOptions constructor for defaults)
|
1297 |
+
memory_pool : MemoryPool, optional
|
1298 |
+
Pool to allocate Table memory from
|
1299 |
+
|
1300 |
+
Returns
|
1301 |
+
-------
|
1302 |
+
:class:`pyarrow.csv.CSVStreamingReader`
|
1303 |
+
"""
|
1304 |
+
cdef:
|
1305 |
+
shared_ptr[CInputStream] stream
|
1306 |
+
CCSVReadOptions c_read_options
|
1307 |
+
CCSVParseOptions c_parse_options
|
1308 |
+
CCSVConvertOptions c_convert_options
|
1309 |
+
CSVStreamingReader reader
|
1310 |
+
|
1311 |
+
_get_reader(input_file, read_options, &stream)
|
1312 |
+
_get_read_options(read_options, &c_read_options)
|
1313 |
+
_get_parse_options(parse_options, &c_parse_options)
|
1314 |
+
_get_convert_options(convert_options, &c_convert_options)
|
1315 |
+
|
1316 |
+
reader = CSVStreamingReader.__new__(CSVStreamingReader)
|
1317 |
+
reader._open(stream, move(c_read_options), move(c_parse_options),
|
1318 |
+
move(c_convert_options), memory_pool)
|
1319 |
+
return reader
|
1320 |
+
|
1321 |
+
|
1322 |
+
def _raise_invalid_function_option(value, description, *,
|
1323 |
+
exception_class=ValueError):
|
1324 |
+
raise exception_class(f"\"{value}\" is not a valid {description}")
|
1325 |
+
|
1326 |
+
|
1327 |
+
cdef CQuotingStyle unwrap_quoting_style(quoting_style) except *:
|
1328 |
+
if quoting_style == "needed":
|
1329 |
+
return CQuotingStyle_Needed
|
1330 |
+
elif quoting_style == "all_valid":
|
1331 |
+
return CQuotingStyle_AllValid
|
1332 |
+
elif quoting_style == "none":
|
1333 |
+
return CQuotingStyle_None
|
1334 |
+
_raise_invalid_function_option(quoting_style, "quoting style")
|
1335 |
+
|
1336 |
+
|
1337 |
+
cdef wrap_quoting_style(quoting_style):
|
1338 |
+
if quoting_style == CQuotingStyle_Needed:
|
1339 |
+
return 'needed'
|
1340 |
+
elif quoting_style == CQuotingStyle_AllValid:
|
1341 |
+
return 'all_valid'
|
1342 |
+
elif quoting_style == CQuotingStyle_None:
|
1343 |
+
return 'none'
|
1344 |
+
|
1345 |
+
|
1346 |
+
cdef class WriteOptions(_Weakrefable):
|
1347 |
+
"""
|
1348 |
+
Options for writing CSV files.
|
1349 |
+
|
1350 |
+
Parameters
|
1351 |
+
----------
|
1352 |
+
include_header : bool, optional (default True)
|
1353 |
+
Whether to write an initial header line with column names
|
1354 |
+
batch_size : int, optional (default 1024)
|
1355 |
+
How many rows to process together when converting and writing
|
1356 |
+
CSV data
|
1357 |
+
delimiter : 1-character string, optional (default ",")
|
1358 |
+
The character delimiting individual cells in the CSV data.
|
1359 |
+
quoting_style : str, optional (default "needed")
|
1360 |
+
Whether to quote values, and if so, which quoting style to use.
|
1361 |
+
The following values are accepted:
|
1362 |
+
|
1363 |
+
- "needed" (default): only enclose values in quotes when needed.
|
1364 |
+
- "all_valid": enclose all valid values in quotes; nulls are not quoted.
|
1365 |
+
- "none": do not enclose any values in quotes; values containing
|
1366 |
+
special characters (such as quotes, cell delimiters or line endings)
|
1367 |
+
will raise an error.
|
1368 |
+
"""
|
1369 |
+
|
1370 |
+
# Avoid mistakingly creating attributes
|
1371 |
+
__slots__ = ()
|
1372 |
+
|
1373 |
+
def __init__(self, *, include_header=None, batch_size=None,
|
1374 |
+
delimiter=None, quoting_style=None):
|
1375 |
+
self.options.reset(new CCSVWriteOptions(CCSVWriteOptions.Defaults()))
|
1376 |
+
if include_header is not None:
|
1377 |
+
self.include_header = include_header
|
1378 |
+
if batch_size is not None:
|
1379 |
+
self.batch_size = batch_size
|
1380 |
+
if delimiter is not None:
|
1381 |
+
self.delimiter = delimiter
|
1382 |
+
if quoting_style is not None:
|
1383 |
+
self.quoting_style = quoting_style
|
1384 |
+
|
1385 |
+
@property
|
1386 |
+
def include_header(self):
|
1387 |
+
"""
|
1388 |
+
Whether to write an initial header line with column names.
|
1389 |
+
"""
|
1390 |
+
return deref(self.options).include_header
|
1391 |
+
|
1392 |
+
@include_header.setter
|
1393 |
+
def include_header(self, value):
|
1394 |
+
deref(self.options).include_header = value
|
1395 |
+
|
1396 |
+
@property
|
1397 |
+
def batch_size(self):
|
1398 |
+
"""
|
1399 |
+
How many rows to process together when converting and writing
|
1400 |
+
CSV data.
|
1401 |
+
"""
|
1402 |
+
return deref(self.options).batch_size
|
1403 |
+
|
1404 |
+
@batch_size.setter
|
1405 |
+
def batch_size(self, value):
|
1406 |
+
deref(self.options).batch_size = value
|
1407 |
+
|
1408 |
+
@property
|
1409 |
+
def delimiter(self):
|
1410 |
+
"""
|
1411 |
+
The character delimiting individual cells in the CSV data.
|
1412 |
+
"""
|
1413 |
+
return chr(deref(self.options).delimiter)
|
1414 |
+
|
1415 |
+
@delimiter.setter
|
1416 |
+
def delimiter(self, value):
|
1417 |
+
deref(self.options).delimiter = _single_char(value)
|
1418 |
+
|
1419 |
+
@property
|
1420 |
+
def quoting_style(self):
|
1421 |
+
"""
|
1422 |
+
Whether to quote values, and if so, which quoting style to use.
|
1423 |
+
The following values are accepted:
|
1424 |
+
|
1425 |
+
- "needed" (default): only enclose values in quotes when needed.
|
1426 |
+
- "all_valid": enclose all valid values in quotes; nulls are not quoted.
|
1427 |
+
- "none": do not enclose any values in quotes; values containing
|
1428 |
+
special characters (such as quotes, cell delimiters or line endings)
|
1429 |
+
will raise an error.
|
1430 |
+
"""
|
1431 |
+
return wrap_quoting_style(deref(self.options).quoting_style)
|
1432 |
+
|
1433 |
+
@quoting_style.setter
|
1434 |
+
def quoting_style(self, value):
|
1435 |
+
deref(self.options).quoting_style = unwrap_quoting_style(value)
|
1436 |
+
|
1437 |
+
@staticmethod
|
1438 |
+
cdef WriteOptions wrap(CCSVWriteOptions options):
|
1439 |
+
out = WriteOptions()
|
1440 |
+
out.options.reset(new CCSVWriteOptions(move(options)))
|
1441 |
+
return out
|
1442 |
+
|
1443 |
+
def validate(self):
|
1444 |
+
check_status(self.options.get().Validate())
|
1445 |
+
|
1446 |
+
|
1447 |
+
cdef _get_write_options(WriteOptions write_options, CCSVWriteOptions* out):
|
1448 |
+
if write_options is None:
|
1449 |
+
out[0] = CCSVWriteOptions.Defaults()
|
1450 |
+
else:
|
1451 |
+
out[0] = deref(write_options.options)
|
1452 |
+
|
1453 |
+
|
1454 |
+
def write_csv(data, output_file, write_options=None,
|
1455 |
+
MemoryPool memory_pool=None):
|
1456 |
+
"""
|
1457 |
+
Write record batch or table to a CSV file.
|
1458 |
+
|
1459 |
+
Parameters
|
1460 |
+
----------
|
1461 |
+
data : pyarrow.RecordBatch or pyarrow.Table
|
1462 |
+
The data to write.
|
1463 |
+
output_file : string, path, pyarrow.NativeFile, or file-like object
|
1464 |
+
The location where to write the CSV data.
|
1465 |
+
write_options : pyarrow.csv.WriteOptions
|
1466 |
+
Options to configure writing the CSV data.
|
1467 |
+
memory_pool : MemoryPool, optional
|
1468 |
+
Pool for temporary allocations.
|
1469 |
+
|
1470 |
+
Examples
|
1471 |
+
--------
|
1472 |
+
|
1473 |
+
>>> import pyarrow as pa
|
1474 |
+
>>> from pyarrow import csv
|
1475 |
+
|
1476 |
+
>>> legs = pa.array([2, 4, 5, 100])
|
1477 |
+
>>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"])
|
1478 |
+
>>> entry_date = pa.array(["01/03/2022", "02/03/2022",
|
1479 |
+
... "03/03/2022", "04/03/2022"])
|
1480 |
+
>>> table = pa.table([animals, legs, entry_date],
|
1481 |
+
... names=["animals", "n_legs", "entry"])
|
1482 |
+
|
1483 |
+
>>> csv.write_csv(table, "animals.csv")
|
1484 |
+
|
1485 |
+
>>> write_options = csv.WriteOptions(include_header=False)
|
1486 |
+
>>> csv.write_csv(table, "animals.csv", write_options=write_options)
|
1487 |
+
|
1488 |
+
>>> write_options = csv.WriteOptions(delimiter=";")
|
1489 |
+
>>> csv.write_csv(table, "animals.csv", write_options=write_options)
|
1490 |
+
"""
|
1491 |
+
cdef:
|
1492 |
+
shared_ptr[COutputStream] stream
|
1493 |
+
CCSVWriteOptions c_write_options
|
1494 |
+
CMemoryPool* c_memory_pool
|
1495 |
+
CRecordBatch* batch
|
1496 |
+
CTable* table
|
1497 |
+
_get_write_options(write_options, &c_write_options)
|
1498 |
+
|
1499 |
+
get_writer(output_file, &stream)
|
1500 |
+
c_memory_pool = maybe_unbox_memory_pool(memory_pool)
|
1501 |
+
c_write_options.io_context = CIOContext(c_memory_pool)
|
1502 |
+
if isinstance(data, RecordBatch):
|
1503 |
+
batch = pyarrow_unwrap_batch(data).get()
|
1504 |
+
with nogil:
|
1505 |
+
check_status(WriteCSV(deref(batch), c_write_options, stream.get()))
|
1506 |
+
elif isinstance(data, Table):
|
1507 |
+
table = pyarrow_unwrap_table(data).get()
|
1508 |
+
with nogil:
|
1509 |
+
check_status(WriteCSV(deref(table), c_write_options, stream.get()))
|
1510 |
+
else:
|
1511 |
+
raise TypeError(f"Expected Table or RecordBatch, got '{type(data)}'")
|
1512 |
+
|
1513 |
+
|
1514 |
+
cdef class CSVWriter(_CRecordBatchWriter):
|
1515 |
+
"""
|
1516 |
+
Writer to create a CSV file.
|
1517 |
+
|
1518 |
+
Parameters
|
1519 |
+
----------
|
1520 |
+
sink : str, path, pyarrow.OutputStream or file-like object
|
1521 |
+
The location where to write the CSV data.
|
1522 |
+
schema : pyarrow.Schema
|
1523 |
+
The schema of the data to be written.
|
1524 |
+
write_options : pyarrow.csv.WriteOptions
|
1525 |
+
Options to configure writing the CSV data.
|
1526 |
+
memory_pool : MemoryPool, optional
|
1527 |
+
Pool for temporary allocations.
|
1528 |
+
"""
|
1529 |
+
|
1530 |
+
def __init__(self, sink, Schema schema, *,
|
1531 |
+
WriteOptions write_options=None, MemoryPool memory_pool=None):
|
1532 |
+
cdef:
|
1533 |
+
shared_ptr[COutputStream] c_stream
|
1534 |
+
shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema)
|
1535 |
+
CCSVWriteOptions c_write_options
|
1536 |
+
CMemoryPool* c_memory_pool = maybe_unbox_memory_pool(memory_pool)
|
1537 |
+
_get_write_options(write_options, &c_write_options)
|
1538 |
+
c_write_options.io_context = CIOContext(c_memory_pool)
|
1539 |
+
get_writer(sink, &c_stream)
|
1540 |
+
with nogil:
|
1541 |
+
self.writer = GetResultValue(MakeCSVWriter(
|
1542 |
+
c_stream, c_schema, c_write_options))
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_cuda.pxd
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: language_level = 3
|
19 |
+
|
20 |
+
from pyarrow.lib cimport *
|
21 |
+
from pyarrow.includes.common cimport *
|
22 |
+
from pyarrow.includes.libarrow cimport *
|
23 |
+
from pyarrow.includes.libarrow_cuda cimport *
|
24 |
+
|
25 |
+
|
26 |
+
cdef class Context(_Weakrefable):
|
27 |
+
cdef:
|
28 |
+
shared_ptr[CCudaContext] context
|
29 |
+
int device_number
|
30 |
+
|
31 |
+
cdef void init(self, const shared_ptr[CCudaContext]& ctx)
|
32 |
+
|
33 |
+
|
34 |
+
cdef class IpcMemHandle(_Weakrefable):
|
35 |
+
cdef:
|
36 |
+
shared_ptr[CCudaIpcMemHandle] handle
|
37 |
+
|
38 |
+
cdef void init(self, shared_ptr[CCudaIpcMemHandle]& h)
|
39 |
+
|
40 |
+
|
41 |
+
cdef class CudaBuffer(Buffer):
|
42 |
+
cdef:
|
43 |
+
shared_ptr[CCudaBuffer] cuda_buffer
|
44 |
+
object base
|
45 |
+
|
46 |
+
cdef void init_cuda(self,
|
47 |
+
const shared_ptr[CCudaBuffer]& buffer,
|
48 |
+
object base)
|
49 |
+
|
50 |
+
|
51 |
+
cdef class HostBuffer(Buffer):
|
52 |
+
cdef:
|
53 |
+
shared_ptr[CCudaHostBuffer] host_buffer
|
54 |
+
|
55 |
+
cdef void init_host(self, const shared_ptr[CCudaHostBuffer]& buffer)
|
56 |
+
|
57 |
+
|
58 |
+
cdef class BufferReader(NativeFile):
|
59 |
+
cdef:
|
60 |
+
CCudaBufferReader* reader
|
61 |
+
CudaBuffer buffer
|
62 |
+
|
63 |
+
|
64 |
+
cdef class BufferWriter(NativeFile):
|
65 |
+
cdef:
|
66 |
+
CCudaBufferWriter* writer
|
67 |
+
CudaBuffer buffer
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (121 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_dlpack.pxi
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
cimport cpython
|
19 |
+
from cpython.pycapsule cimport PyCapsule_New
|
20 |
+
|
21 |
+
|
22 |
+
cdef void dlpack_pycapsule_deleter(object dltensor) noexcept:
|
23 |
+
cdef DLManagedTensor* dlm_tensor
|
24 |
+
cdef PyObject* err_type
|
25 |
+
cdef PyObject* err_value
|
26 |
+
cdef PyObject* err_traceback
|
27 |
+
|
28 |
+
# Do nothing if the capsule has been consumed
|
29 |
+
if cpython.PyCapsule_IsValid(dltensor, "used_dltensor"):
|
30 |
+
return
|
31 |
+
|
32 |
+
# An exception may be in-flight, we must save it in case
|
33 |
+
# we create another one
|
34 |
+
cpython.PyErr_Fetch(&err_type, &err_value, &err_traceback)
|
35 |
+
|
36 |
+
dlm_tensor = <DLManagedTensor*>cpython.PyCapsule_GetPointer(dltensor, 'dltensor')
|
37 |
+
if dlm_tensor == NULL:
|
38 |
+
cpython.PyErr_WriteUnraisable(dltensor)
|
39 |
+
# The deleter can be NULL if there is no way for the caller
|
40 |
+
# to provide a reasonable destructor
|
41 |
+
elif dlm_tensor.deleter:
|
42 |
+
dlm_tensor.deleter(dlm_tensor)
|
43 |
+
assert (not cpython.PyErr_Occurred())
|
44 |
+
|
45 |
+
# Set the error indicator from err_type, err_value, err_traceback
|
46 |
+
cpython.PyErr_Restore(err_type, err_value, err_traceback)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (505 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_gcsfs.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (132 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfs.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (131 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfsio.pyx
ADDED
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# ----------------------------------------------------------------------
|
19 |
+
# HDFS IO implementation
|
20 |
+
|
21 |
+
# cython: language_level = 3
|
22 |
+
|
23 |
+
import re
|
24 |
+
|
25 |
+
from pyarrow.lib cimport check_status, _Weakrefable, NativeFile
|
26 |
+
from pyarrow.includes.common cimport *
|
27 |
+
from pyarrow.includes.libarrow cimport *
|
28 |
+
from pyarrow.includes.libarrow_fs cimport *
|
29 |
+
from pyarrow.lib import frombytes, tobytes, ArrowIOError
|
30 |
+
|
31 |
+
|
32 |
+
_HDFS_PATH_RE = re.compile(r'hdfs://(.*):(\d+)(.*)')
|
33 |
+
|
34 |
+
|
35 |
+
def have_libhdfs():
|
36 |
+
try:
|
37 |
+
with nogil:
|
38 |
+
check_status(HaveLibHdfs())
|
39 |
+
return True
|
40 |
+
except Exception:
|
41 |
+
return False
|
42 |
+
|
43 |
+
|
44 |
+
def strip_hdfs_abspath(path):
|
45 |
+
m = _HDFS_PATH_RE.match(path)
|
46 |
+
if m:
|
47 |
+
return m.group(3)
|
48 |
+
else:
|
49 |
+
return path
|
50 |
+
|
51 |
+
|
52 |
+
cdef class HadoopFileSystem(_Weakrefable):
|
53 |
+
cdef:
|
54 |
+
shared_ptr[CIOHadoopFileSystem] client
|
55 |
+
|
56 |
+
cdef readonly:
|
57 |
+
bint is_open
|
58 |
+
object host
|
59 |
+
object user
|
60 |
+
object kerb_ticket
|
61 |
+
int port
|
62 |
+
dict extra_conf
|
63 |
+
|
64 |
+
def _connect(self, host, port, user, kerb_ticket, extra_conf):
|
65 |
+
cdef HdfsConnectionConfig conf
|
66 |
+
|
67 |
+
if host is not None:
|
68 |
+
conf.host = tobytes(host)
|
69 |
+
self.host = host
|
70 |
+
|
71 |
+
conf.port = port
|
72 |
+
self.port = port
|
73 |
+
|
74 |
+
if user is not None:
|
75 |
+
conf.user = tobytes(user)
|
76 |
+
self.user = user
|
77 |
+
|
78 |
+
if kerb_ticket is not None:
|
79 |
+
conf.kerb_ticket = tobytes(kerb_ticket)
|
80 |
+
self.kerb_ticket = kerb_ticket
|
81 |
+
|
82 |
+
with nogil:
|
83 |
+
check_status(HaveLibHdfs())
|
84 |
+
|
85 |
+
if extra_conf is not None and isinstance(extra_conf, dict):
|
86 |
+
conf.extra_conf = {tobytes(k): tobytes(v)
|
87 |
+
for k, v in extra_conf.items()}
|
88 |
+
self.extra_conf = extra_conf
|
89 |
+
|
90 |
+
with nogil:
|
91 |
+
check_status(CIOHadoopFileSystem.Connect(&conf, &self.client))
|
92 |
+
self.is_open = True
|
93 |
+
|
94 |
+
@classmethod
|
95 |
+
def connect(cls, *args, **kwargs):
|
96 |
+
return cls(*args, **kwargs)
|
97 |
+
|
98 |
+
def __dealloc__(self):
|
99 |
+
if self.is_open:
|
100 |
+
self.close()
|
101 |
+
|
102 |
+
def close(self):
|
103 |
+
"""
|
104 |
+
Disconnect from the HDFS cluster
|
105 |
+
"""
|
106 |
+
self._ensure_client()
|
107 |
+
with nogil:
|
108 |
+
check_status(self.client.get().Disconnect())
|
109 |
+
self.is_open = False
|
110 |
+
|
111 |
+
cdef _ensure_client(self):
|
112 |
+
if self.client.get() == NULL:
|
113 |
+
raise IOError('HDFS client improperly initialized')
|
114 |
+
elif not self.is_open:
|
115 |
+
raise IOError('HDFS client is closed')
|
116 |
+
|
117 |
+
def exists(self, path):
|
118 |
+
"""
|
119 |
+
Returns True if the path is known to the cluster, False if it does not
|
120 |
+
(or there is an RPC error)
|
121 |
+
"""
|
122 |
+
self._ensure_client()
|
123 |
+
|
124 |
+
cdef c_string c_path = tobytes(path)
|
125 |
+
cdef c_bool result
|
126 |
+
with nogil:
|
127 |
+
result = self.client.get().Exists(c_path)
|
128 |
+
return result
|
129 |
+
|
130 |
+
def isdir(self, path):
|
131 |
+
cdef HdfsPathInfo info
|
132 |
+
try:
|
133 |
+
self._path_info(path, &info)
|
134 |
+
except ArrowIOError:
|
135 |
+
return False
|
136 |
+
return info.kind == ObjectType_DIRECTORY
|
137 |
+
|
138 |
+
def isfile(self, path):
|
139 |
+
cdef HdfsPathInfo info
|
140 |
+
try:
|
141 |
+
self._path_info(path, &info)
|
142 |
+
except ArrowIOError:
|
143 |
+
return False
|
144 |
+
return info.kind == ObjectType_FILE
|
145 |
+
|
146 |
+
def get_capacity(self):
|
147 |
+
"""
|
148 |
+
Get reported total capacity of file system
|
149 |
+
|
150 |
+
Returns
|
151 |
+
-------
|
152 |
+
capacity : int
|
153 |
+
"""
|
154 |
+
cdef int64_t capacity = 0
|
155 |
+
with nogil:
|
156 |
+
check_status(self.client.get().GetCapacity(&capacity))
|
157 |
+
return capacity
|
158 |
+
|
159 |
+
def get_space_used(self):
|
160 |
+
"""
|
161 |
+
Get space used on file system
|
162 |
+
|
163 |
+
Returns
|
164 |
+
-------
|
165 |
+
space_used : int
|
166 |
+
"""
|
167 |
+
cdef int64_t space_used = 0
|
168 |
+
with nogil:
|
169 |
+
check_status(self.client.get().GetUsed(&space_used))
|
170 |
+
return space_used
|
171 |
+
|
172 |
+
def df(self):
|
173 |
+
"""
|
174 |
+
Return free space on disk, like the UNIX df command
|
175 |
+
|
176 |
+
Returns
|
177 |
+
-------
|
178 |
+
space : int
|
179 |
+
"""
|
180 |
+
return self.get_capacity() - self.get_space_used()
|
181 |
+
|
182 |
+
def rename(self, path, new_path):
|
183 |
+
cdef c_string c_path = tobytes(path)
|
184 |
+
cdef c_string c_new_path = tobytes(new_path)
|
185 |
+
with nogil:
|
186 |
+
check_status(self.client.get().Rename(c_path, c_new_path))
|
187 |
+
|
188 |
+
def info(self, path):
|
189 |
+
"""
|
190 |
+
Return detailed HDFS information for path
|
191 |
+
|
192 |
+
Parameters
|
193 |
+
----------
|
194 |
+
path : string
|
195 |
+
Path to file or directory
|
196 |
+
|
197 |
+
Returns
|
198 |
+
-------
|
199 |
+
path_info : dict
|
200 |
+
"""
|
201 |
+
cdef HdfsPathInfo info
|
202 |
+
self._path_info(path, &info)
|
203 |
+
return {
|
204 |
+
'path': frombytes(info.name),
|
205 |
+
'owner': frombytes(info.owner),
|
206 |
+
'group': frombytes(info.group),
|
207 |
+
'size': info.size,
|
208 |
+
'block_size': info.block_size,
|
209 |
+
'last_modified': info.last_modified_time,
|
210 |
+
'last_accessed': info.last_access_time,
|
211 |
+
'replication': info.replication,
|
212 |
+
'permissions': info.permissions,
|
213 |
+
'kind': ('directory' if info.kind == ObjectType_DIRECTORY
|
214 |
+
else 'file')
|
215 |
+
}
|
216 |
+
|
217 |
+
def stat(self, path):
|
218 |
+
"""
|
219 |
+
Return basic file system statistics about path
|
220 |
+
|
221 |
+
Parameters
|
222 |
+
----------
|
223 |
+
path : string
|
224 |
+
Path to file or directory
|
225 |
+
|
226 |
+
Returns
|
227 |
+
-------
|
228 |
+
stat : dict
|
229 |
+
"""
|
230 |
+
cdef FileStatistics info
|
231 |
+
cdef c_string c_path = tobytes(path)
|
232 |
+
with nogil:
|
233 |
+
check_status(self.client.get()
|
234 |
+
.Stat(c_path, &info))
|
235 |
+
return {
|
236 |
+
'size': info.size,
|
237 |
+
'kind': ('directory' if info.kind == ObjectType_DIRECTORY
|
238 |
+
else 'file')
|
239 |
+
}
|
240 |
+
|
241 |
+
cdef _path_info(self, path, HdfsPathInfo* info):
|
242 |
+
cdef c_string c_path = tobytes(path)
|
243 |
+
|
244 |
+
with nogil:
|
245 |
+
check_status(self.client.get()
|
246 |
+
.GetPathInfo(c_path, info))
|
247 |
+
|
248 |
+
def ls(self, path, bint full_info):
|
249 |
+
cdef:
|
250 |
+
c_string c_path = tobytes(path)
|
251 |
+
vector[HdfsPathInfo] listing
|
252 |
+
list results = []
|
253 |
+
int i
|
254 |
+
|
255 |
+
self._ensure_client()
|
256 |
+
|
257 |
+
with nogil:
|
258 |
+
check_status(self.client.get()
|
259 |
+
.ListDirectory(c_path, &listing))
|
260 |
+
|
261 |
+
cdef const HdfsPathInfo* info
|
262 |
+
for i in range(<int> listing.size()):
|
263 |
+
info = &listing[i]
|
264 |
+
|
265 |
+
# Try to trim off the hdfs://HOST:PORT piece
|
266 |
+
name = strip_hdfs_abspath(frombytes(info.name))
|
267 |
+
|
268 |
+
if full_info:
|
269 |
+
kind = ('file' if info.kind == ObjectType_FILE
|
270 |
+
else 'directory')
|
271 |
+
|
272 |
+
results.append({
|
273 |
+
'kind': kind,
|
274 |
+
'name': name,
|
275 |
+
'owner': frombytes(info.owner),
|
276 |
+
'group': frombytes(info.group),
|
277 |
+
'last_modified_time': info.last_modified_time,
|
278 |
+
'last_access_time': info.last_access_time,
|
279 |
+
'size': info.size,
|
280 |
+
'replication': info.replication,
|
281 |
+
'block_size': info.block_size,
|
282 |
+
'permissions': info.permissions
|
283 |
+
})
|
284 |
+
else:
|
285 |
+
results.append(name)
|
286 |
+
|
287 |
+
return results
|
288 |
+
|
289 |
+
def chmod(self, path, mode):
|
290 |
+
"""
|
291 |
+
Change file permissions
|
292 |
+
|
293 |
+
Parameters
|
294 |
+
----------
|
295 |
+
path : string
|
296 |
+
absolute path to file or directory
|
297 |
+
mode : int
|
298 |
+
POSIX-like bitmask
|
299 |
+
"""
|
300 |
+
self._ensure_client()
|
301 |
+
cdef c_string c_path = tobytes(path)
|
302 |
+
cdef int c_mode = mode
|
303 |
+
with nogil:
|
304 |
+
check_status(self.client.get()
|
305 |
+
.Chmod(c_path, c_mode))
|
306 |
+
|
307 |
+
def chown(self, path, owner=None, group=None):
|
308 |
+
"""
|
309 |
+
Change file permissions
|
310 |
+
|
311 |
+
Parameters
|
312 |
+
----------
|
313 |
+
path : string
|
314 |
+
absolute path to file or directory
|
315 |
+
owner : string, default None
|
316 |
+
New owner, None for no change
|
317 |
+
group : string, default None
|
318 |
+
New group, None for no change
|
319 |
+
"""
|
320 |
+
cdef:
|
321 |
+
c_string c_path
|
322 |
+
c_string c_owner
|
323 |
+
c_string c_group
|
324 |
+
const char* c_owner_ptr = NULL
|
325 |
+
const char* c_group_ptr = NULL
|
326 |
+
|
327 |
+
self._ensure_client()
|
328 |
+
|
329 |
+
c_path = tobytes(path)
|
330 |
+
if owner is not None:
|
331 |
+
c_owner = tobytes(owner)
|
332 |
+
c_owner_ptr = c_owner.c_str()
|
333 |
+
|
334 |
+
if group is not None:
|
335 |
+
c_group = tobytes(group)
|
336 |
+
c_group_ptr = c_group.c_str()
|
337 |
+
|
338 |
+
with nogil:
|
339 |
+
check_status(self.client.get()
|
340 |
+
.Chown(c_path, c_owner_ptr, c_group_ptr))
|
341 |
+
|
342 |
+
def mkdir(self, path):
|
343 |
+
"""
|
344 |
+
Create indicated directory and any necessary parent directories
|
345 |
+
"""
|
346 |
+
self._ensure_client()
|
347 |
+
cdef c_string c_path = tobytes(path)
|
348 |
+
with nogil:
|
349 |
+
check_status(self.client.get()
|
350 |
+
.MakeDirectory(c_path))
|
351 |
+
|
352 |
+
def delete(self, path, bint recursive=False):
|
353 |
+
"""
|
354 |
+
Delete the indicated file or directory
|
355 |
+
|
356 |
+
Parameters
|
357 |
+
----------
|
358 |
+
path : string
|
359 |
+
recursive : boolean, default False
|
360 |
+
If True, also delete child paths for directories
|
361 |
+
"""
|
362 |
+
self._ensure_client()
|
363 |
+
|
364 |
+
cdef c_string c_path = tobytes(path)
|
365 |
+
with nogil:
|
366 |
+
check_status(self.client.get()
|
367 |
+
.Delete(c_path, recursive == 1))
|
368 |
+
|
369 |
+
def open(self, path, mode='rb', buffer_size=None, replication=None,
|
370 |
+
default_block_size=None):
|
371 |
+
"""
|
372 |
+
Open HDFS file for reading or writing
|
373 |
+
|
374 |
+
Parameters
|
375 |
+
----------
|
376 |
+
mode : string
|
377 |
+
Must be one of 'rb', 'wb', 'ab'
|
378 |
+
|
379 |
+
Returns
|
380 |
+
-------
|
381 |
+
handle : HdfsFile
|
382 |
+
"""
|
383 |
+
self._ensure_client()
|
384 |
+
|
385 |
+
cdef HdfsFile out = HdfsFile()
|
386 |
+
|
387 |
+
if mode not in ('rb', 'wb', 'ab'):
|
388 |
+
raise Exception("Mode must be 'rb' (read), "
|
389 |
+
"'wb' (write, new file), or 'ab' (append)")
|
390 |
+
|
391 |
+
cdef c_string c_path = tobytes(path)
|
392 |
+
cdef c_bool append = False
|
393 |
+
|
394 |
+
# 0 in libhdfs means "use the default"
|
395 |
+
cdef int32_t c_buffer_size = buffer_size or 0
|
396 |
+
cdef int16_t c_replication = replication or 0
|
397 |
+
cdef int64_t c_default_block_size = default_block_size or 0
|
398 |
+
|
399 |
+
cdef shared_ptr[HdfsOutputStream] wr_handle
|
400 |
+
cdef shared_ptr[HdfsReadableFile] rd_handle
|
401 |
+
|
402 |
+
if mode in ('wb', 'ab'):
|
403 |
+
if mode == 'ab':
|
404 |
+
append = True
|
405 |
+
|
406 |
+
with nogil:
|
407 |
+
check_status(
|
408 |
+
self.client.get()
|
409 |
+
.OpenWritable(c_path, append, c_buffer_size,
|
410 |
+
c_replication, c_default_block_size,
|
411 |
+
&wr_handle))
|
412 |
+
|
413 |
+
out.set_output_stream(<shared_ptr[COutputStream]> wr_handle)
|
414 |
+
out.is_writable = True
|
415 |
+
else:
|
416 |
+
with nogil:
|
417 |
+
check_status(self.client.get()
|
418 |
+
.OpenReadable(c_path, &rd_handle))
|
419 |
+
|
420 |
+
out.set_random_access_file(
|
421 |
+
<shared_ptr[CRandomAccessFile]> rd_handle)
|
422 |
+
out.is_readable = True
|
423 |
+
|
424 |
+
assert not out.closed
|
425 |
+
|
426 |
+
if c_buffer_size == 0:
|
427 |
+
c_buffer_size = 2 ** 16
|
428 |
+
|
429 |
+
out.mode = mode
|
430 |
+
out.buffer_size = c_buffer_size
|
431 |
+
out.parent = _HdfsFileNanny(self, out)
|
432 |
+
out.own_file = True
|
433 |
+
|
434 |
+
return out
|
435 |
+
|
436 |
+
def download(self, path, stream, buffer_size=None):
|
437 |
+
with self.open(path, 'rb') as f:
|
438 |
+
f.download(stream, buffer_size=buffer_size)
|
439 |
+
|
440 |
+
def upload(self, path, stream, buffer_size=None):
|
441 |
+
"""
|
442 |
+
Upload file-like object to HDFS path
|
443 |
+
"""
|
444 |
+
with self.open(path, 'wb') as f:
|
445 |
+
f.upload(stream, buffer_size=buffer_size)
|
446 |
+
|
447 |
+
|
448 |
+
# ARROW-404: Helper class to ensure that files are closed before the
|
449 |
+
# client. During deallocation of the extension class, the attributes are
|
450 |
+
# decref'd which can cause the client to get closed first if the file has the
|
451 |
+
# last remaining reference
|
452 |
+
cdef class _HdfsFileNanny(_Weakrefable):
|
453 |
+
cdef:
|
454 |
+
object client
|
455 |
+
object file_handle_ref
|
456 |
+
|
457 |
+
def __cinit__(self, client, file_handle):
|
458 |
+
import weakref
|
459 |
+
self.client = client
|
460 |
+
self.file_handle_ref = weakref.ref(file_handle)
|
461 |
+
|
462 |
+
def __dealloc__(self):
|
463 |
+
fh = self.file_handle_ref()
|
464 |
+
if fh:
|
465 |
+
fh.close()
|
466 |
+
# avoid cyclic GC
|
467 |
+
self.file_handle_ref = None
|
468 |
+
self.client = None
|
469 |
+
|
470 |
+
|
471 |
+
cdef class HdfsFile(NativeFile):
|
472 |
+
cdef readonly:
|
473 |
+
int32_t buffer_size
|
474 |
+
object mode
|
475 |
+
object parent
|
476 |
+
|
477 |
+
def __dealloc__(self):
|
478 |
+
self.parent = None
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_json.pxd
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: language_level = 3
|
19 |
+
|
20 |
+
from pyarrow.includes.libarrow cimport *
|
21 |
+
from pyarrow.lib cimport _Weakrefable
|
22 |
+
|
23 |
+
|
24 |
+
cdef class ParseOptions(_Weakrefable):
|
25 |
+
cdef:
|
26 |
+
CJSONParseOptions options
|
27 |
+
|
28 |
+
@staticmethod
|
29 |
+
cdef ParseOptions wrap(CJSONParseOptions options)
|
30 |
+
|
31 |
+
cdef class ReadOptions(_Weakrefable):
|
32 |
+
cdef:
|
33 |
+
CJSONReadOptions options
|
34 |
+
|
35 |
+
@staticmethod
|
36 |
+
cdef ReadOptions wrap(CJSONReadOptions options)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (605 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
# cython: language_level = 3
|
20 |
+
|
21 |
+
from pyarrow.includes.common cimport *
|
22 |
+
from pyarrow.includes.libparquet_encryption cimport *
|
23 |
+
from pyarrow._parquet cimport (ParquetCipher,
|
24 |
+
CFileEncryptionProperties,
|
25 |
+
CFileDecryptionProperties,
|
26 |
+
FileEncryptionProperties,
|
27 |
+
FileDecryptionProperties,
|
28 |
+
ParquetCipher_AES_GCM_V1,
|
29 |
+
ParquetCipher_AES_GCM_CTR_V1)
|
30 |
+
from pyarrow.lib cimport _Weakrefable
|
31 |
+
|
32 |
+
cdef class CryptoFactory(_Weakrefable):
|
33 |
+
cdef shared_ptr[CPyCryptoFactory] factory
|
34 |
+
cdef init(self, callable_client_factory)
|
35 |
+
cdef inline shared_ptr[CPyCryptoFactory] unwrap(self)
|
36 |
+
|
37 |
+
cdef class EncryptionConfiguration(_Weakrefable):
|
38 |
+
cdef shared_ptr[CEncryptionConfiguration] configuration
|
39 |
+
cdef inline shared_ptr[CEncryptionConfiguration] unwrap(self) nogil
|
40 |
+
|
41 |
+
cdef class DecryptionConfiguration(_Weakrefable):
|
42 |
+
cdef shared_ptr[CDecryptionConfiguration] configuration
|
43 |
+
cdef inline shared_ptr[CDecryptionConfiguration] unwrap(self) nogil
|
44 |
+
|
45 |
+
cdef class KmsConnectionConfig(_Weakrefable):
|
46 |
+
cdef shared_ptr[CKmsConnectionConfig] configuration
|
47 |
+
cdef inline shared_ptr[CKmsConnectionConfig] unwrap(self) nogil
|
48 |
+
|
49 |
+
@staticmethod
|
50 |
+
cdef wrap(const CKmsConnectionConfig& config)
|
51 |
+
|
52 |
+
|
53 |
+
cdef shared_ptr[CCryptoFactory] pyarrow_unwrap_cryptofactory(object crypto_factory) except *
|
54 |
+
cdef shared_ptr[CKmsConnectionConfig] pyarrow_unwrap_kmsconnectionconfig(object kmsconnectionconfig) except *
|
55 |
+
cdef shared_ptr[CEncryptionConfiguration] pyarrow_unwrap_encryptionconfig(object encryptionconfig) except *
|
56 |
+
cdef shared_ptr[CDecryptionConfiguration] pyarrow_unwrap_decryptionconfig(object decryptionconfig) except *
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
# cython: language_level = 3
|
20 |
+
|
21 |
+
from pyarrow.includes.common cimport *
|
22 |
+
from pyarrow.includes.libarrow cimport CStatus
|
23 |
+
|
24 |
+
|
25 |
+
ctypedef CStatus cb_test_func()
|
26 |
+
|
27 |
+
cdef extern from "arrow/python/python_test.h" namespace "arrow::py::testing" nogil:
|
28 |
+
|
29 |
+
cdef cppclass CTestCase "arrow::py::testing::TestCase":
|
30 |
+
c_string name
|
31 |
+
cb_test_func func
|
32 |
+
|
33 |
+
vector[CTestCase] GetCppTestCases()
|