applied-ai-018 commited on
Commit
fa6fb51
·
verified ·
1 Parent(s): 7c140d4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step80/zero/10.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step80/zero/10.attention.query_key_value.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step80/zero/13.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step80/zero/13.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step80/zero/22.input_layernorm.weight/exp_avg.pt +3 -0
  6. ckpts/universal/global_step80/zero/22.input_layernorm.weight/fp32.pt +3 -0
  7. ckpts/universal/global_step80/zero/25.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  8. ckpts/universal/global_step80/zero/25.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  9. ckpts/universal/global_step80/zero/6.attention.query_key_value.weight/exp_avg.pt +3 -0
  10. ckpts/universal/global_step80/zero/6.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  11. ckpts/universal/global_step80/zero/6.attention.query_key_value.weight/fp32.pt +3 -0
  12. venv/lib/python3.10/site-packages/pyarrow/__init__.pxd +42 -0
  13. venv/lib/python3.10/site-packages/pyarrow/__init__.py +429 -0
  14. venv/lib/python3.10/site-packages/pyarrow/_acero.pyx +608 -0
  15. venv/lib/python3.10/site-packages/pyarrow/_azurefs.cpython-310-x86_64-linux-gnu.so +0 -0
  16. venv/lib/python3.10/site-packages/pyarrow/_compute.pyx +0 -0
  17. venv/lib/python3.10/site-packages/pyarrow/_compute_docstrings.py +56 -0
  18. venv/lib/python3.10/site-packages/pyarrow/_csv.cpython-310-x86_64-linux-gnu.so +0 -0
  19. venv/lib/python3.10/site-packages/pyarrow/_csv.pxd +55 -0
  20. venv/lib/python3.10/site-packages/pyarrow/_cuda.pxd +67 -0
  21. venv/lib/python3.10/site-packages/pyarrow/_dataset.pxd +183 -0
  22. venv/lib/python3.10/site-packages/pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so +0 -0
  23. venv/lib/python3.10/site-packages/pyarrow/_dataset_orc.pyx +51 -0
  24. venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so +0 -0
  25. venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd +42 -0
  26. venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pyx +1023 -0
  27. venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.pyx +170 -0
  28. venv/lib/python3.10/site-packages/pyarrow/_dlpack.pxi +46 -0
  29. venv/lib/python3.10/site-packages/pyarrow/_feather.cpython-310-x86_64-linux-gnu.so +0 -0
  30. venv/lib/python3.10/site-packages/pyarrow/_feather.pyx +117 -0
  31. venv/lib/python3.10/site-packages/pyarrow/_fs.pxd +94 -0
  32. venv/lib/python3.10/site-packages/pyarrow/_fs.pyx +1634 -0
  33. venv/lib/python3.10/site-packages/pyarrow/_gcsfs.cpython-310-x86_64-linux-gnu.so +0 -0
  34. venv/lib/python3.10/site-packages/pyarrow/_generated_version.py +16 -0
  35. venv/lib/python3.10/site-packages/pyarrow/_hdfs.cpython-310-x86_64-linux-gnu.so +0 -0
  36. venv/lib/python3.10/site-packages/pyarrow/_hdfs.pyx +160 -0
  37. venv/lib/python3.10/site-packages/pyarrow/_json.cpython-310-x86_64-linux-gnu.so +0 -0
  38. venv/lib/python3.10/site-packages/pyarrow/_json.pxd +36 -0
  39. venv/lib/python3.10/site-packages/pyarrow/_json.pyx +310 -0
  40. venv/lib/python3.10/site-packages/pyarrow/_orc.cpython-310-x86_64-linux-gnu.so +0 -0
  41. venv/lib/python3.10/site-packages/pyarrow/_orc.pxd +134 -0
  42. venv/lib/python3.10/site-packages/pyarrow/_orc.pyx +445 -0
  43. venv/lib/python3.10/site-packages/pyarrow/_parquet.cpython-310-x86_64-linux-gnu.so +0 -0
  44. venv/lib/python3.10/site-packages/pyarrow/_parquet.pxd +674 -0
  45. venv/lib/python3.10/site-packages/pyarrow/_parquet.pyx +2205 -0
  46. venv/lib/python3.10/site-packages/pyarrow/_parquet_encryption.cpython-310-x86_64-linux-gnu.so +0 -0
  47. venv/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd +56 -0
  48. venv/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx +484 -0
  49. venv/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd +33 -0
  50. venv/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pyx +62 -0
ckpts/universal/global_step80/zero/10.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da0a9feef3a0b5310c5a298094c988f1dd05e4bdac1e0fd08ad4a6baeab514ed
3
+ size 50332828
ckpts/universal/global_step80/zero/10.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e13d91cdd295f7c1e9c37db6713016095a78675cff0731bee34caa4535257305
3
+ size 50332749
ckpts/universal/global_step80/zero/13.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9115e564b01d1502a390dfd0ced1fbcb657caf6e6ff6871ea580f9b006e11659
3
+ size 33555612
ckpts/universal/global_step80/zero/13.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f67cb0b3505655ee512758d74d672bd73ac544e9e42bc5afd2380d38dc6cf412
3
+ size 33555533
ckpts/universal/global_step80/zero/22.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0baee2fc6d2832b74f9261b7d0f458b18c1f92161453d153fc0a7bd6fe60ab96
3
+ size 9372
ckpts/universal/global_step80/zero/22.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e81f1cb276ce6a9f094d1776a4d53d015381f3c850cd8ed40274a4d495a565d0
3
+ size 9293
ckpts/universal/global_step80/zero/25.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5106655d9d1a64be4a18713b755fc7274da273e1015562a7224b920bdc74f8e0
3
+ size 33555612
ckpts/universal/global_step80/zero/25.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93633f05e3a0a2e8e1f268254c86688a8a41f89a5912dd5cef5511909acd6611
3
+ size 33555533
ckpts/universal/global_step80/zero/6.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96166d1f74caa34496af48fd40128198a602c5c73507141ecef962145693bbe4
3
+ size 50332828
ckpts/universal/global_step80/zero/6.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e827e8fa3562aed07ca95d4148ad2ca2b6d65be270ec1ebd374c9e48a85dce49
3
+ size 50332843
ckpts/universal/global_step80/zero/6.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e0c007066a0cdb98b900cc9763049e003cbcb5e3e7c3ccc83ddb41b51f3b57b
3
+ size 50332749
venv/lib/python3.10/site-packages/pyarrow/__init__.pxd ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from libcpp.memory cimport shared_ptr
19
+ from pyarrow.includes.libarrow cimport (CArray, CBuffer, CDataType,
20
+ CField, CRecordBatch, CSchema,
21
+ CTable, CTensor, CSparseCOOTensor,
22
+ CSparseCSRMatrix, CSparseCSCMatrix,
23
+ CSparseCSFTensor)
24
+
25
+ cdef extern from "arrow/python/pyarrow.h" namespace "arrow::py":
26
+ cdef int import_pyarrow() except -1
27
+ cdef object wrap_buffer(const shared_ptr[CBuffer]& buffer)
28
+ cdef object wrap_data_type(const shared_ptr[CDataType]& type)
29
+ cdef object wrap_field(const shared_ptr[CField]& field)
30
+ cdef object wrap_schema(const shared_ptr[CSchema]& schema)
31
+ cdef object wrap_array(const shared_ptr[CArray]& sp_array)
32
+ cdef object wrap_tensor(const shared_ptr[CTensor]& sp_tensor)
33
+ cdef object wrap_sparse_tensor_coo(
34
+ const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor)
35
+ cdef object wrap_sparse_tensor_csr(
36
+ const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor)
37
+ cdef object wrap_sparse_tensor_csc(
38
+ const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor)
39
+ cdef object wrap_sparse_tensor_csf(
40
+ const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor)
41
+ cdef object wrap_table(const shared_ptr[CTable]& ctable)
42
+ cdef object wrap_batch(const shared_ptr[CRecordBatch]& cbatch)
venv/lib/python3.10/site-packages/pyarrow/__init__.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # flake8: noqa
19
+
20
+ """
21
+ PyArrow is the python implementation of Apache Arrow.
22
+
23
+ Apache Arrow is a cross-language development platform for in-memory data.
24
+ It specifies a standardized language-independent columnar memory format for
25
+ flat and hierarchical data, organized for efficient analytic operations on
26
+ modern hardware. It also provides computational libraries and zero-copy
27
+ streaming messaging and interprocess communication.
28
+
29
+ For more information see the official page at https://arrow.apache.org
30
+ """
31
+
32
+ import gc as _gc
33
+ import importlib as _importlib
34
+ import os as _os
35
+ import platform as _platform
36
+ import sys as _sys
37
+ import warnings as _warnings
38
+
39
+ try:
40
+ from ._generated_version import version as __version__
41
+ except ImportError:
42
+ # Package is not installed, parse git tag at runtime
43
+ try:
44
+ import setuptools_scm
45
+ # Code duplicated from setup.py to avoid a dependency on each other
46
+
47
+ def parse_git(root, **kwargs):
48
+ """
49
+ Parse function for setuptools_scm that ignores tags for non-C++
50
+ subprojects, e.g. apache-arrow-js-XXX tags.
51
+ """
52
+ from setuptools_scm.git import parse
53
+ kwargs['describe_command'] = \
54
+ "git describe --dirty --tags --long --match 'apache-arrow-[0-9]*.*'"
55
+ return parse(root, **kwargs)
56
+ __version__ = setuptools_scm.get_version('../',
57
+ parse=parse_git)
58
+ except ImportError:
59
+ __version__ = None
60
+
61
+ # ARROW-8684: Disable GC while initializing Cython extension module,
62
+ # to workaround Cython bug in https://github.com/cython/cython/issues/3603
63
+ _gc_enabled = _gc.isenabled()
64
+ _gc.disable()
65
+ import pyarrow.lib as _lib
66
+ if _gc_enabled:
67
+ _gc.enable()
68
+
69
+ from pyarrow.lib import (BuildInfo, RuntimeInfo, set_timezone_db_path,
70
+ MonthDayNano, VersionInfo, cpp_build_info,
71
+ cpp_version, cpp_version_info, runtime_info,
72
+ cpu_count, set_cpu_count, enable_signal_handlers,
73
+ io_thread_count, set_io_thread_count)
74
+
75
+
76
+ def show_versions():
77
+ """
78
+ Print various version information, to help with error reporting.
79
+ """
80
+ def print_entry(label, value):
81
+ print(f"{label: <26}: {value: <8}")
82
+
83
+ print("pyarrow version info\n--------------------")
84
+ print_entry("Package kind", cpp_build_info.package_kind
85
+ if len(cpp_build_info.package_kind) > 0
86
+ else "not indicated")
87
+ print_entry("Arrow C++ library version", cpp_build_info.version)
88
+ print_entry("Arrow C++ compiler",
89
+ f"{cpp_build_info.compiler_id} {cpp_build_info.compiler_version}")
90
+ print_entry("Arrow C++ compiler flags", cpp_build_info.compiler_flags)
91
+ print_entry("Arrow C++ git revision", cpp_build_info.git_id)
92
+ print_entry("Arrow C++ git description", cpp_build_info.git_description)
93
+ print_entry("Arrow C++ build type", cpp_build_info.build_type)
94
+
95
+
96
+ def _module_is_available(module):
97
+ try:
98
+ _importlib.import_module(f'pyarrow.{module}')
99
+ except ImportError:
100
+ return False
101
+ else:
102
+ return True
103
+
104
+
105
+ def _filesystem_is_available(fs):
106
+ try:
107
+ import pyarrow.fs
108
+ except ImportError:
109
+ return False
110
+
111
+ try:
112
+ getattr(pyarrow.fs, fs)
113
+ except (ImportError, AttributeError):
114
+ return False
115
+ else:
116
+ return True
117
+
118
+
119
+ def show_info():
120
+ """
121
+ Print detailed version and platform information, for error reporting
122
+ """
123
+ show_versions()
124
+
125
+ def print_entry(label, value):
126
+ print(f" {label: <20}: {value: <8}")
127
+
128
+ print("\nPlatform:")
129
+ print_entry("OS / Arch", f"{_platform.system()} {_platform.machine()}")
130
+ print_entry("SIMD Level", runtime_info().simd_level)
131
+ print_entry("Detected SIMD Level", runtime_info().detected_simd_level)
132
+
133
+ pool = default_memory_pool()
134
+ print("\nMemory:")
135
+ print_entry("Default backend", pool.backend_name)
136
+ print_entry("Bytes allocated", f"{pool.bytes_allocated()} bytes")
137
+ print_entry("Max memory", f"{pool.max_memory()} bytes")
138
+ print_entry("Supported Backends", ', '.join(supported_memory_backends()))
139
+
140
+ print("\nOptional modules:")
141
+ modules = ["csv", "cuda", "dataset", "feather", "flight", "fs", "gandiva", "json",
142
+ "orc", "parquet"]
143
+ for module in modules:
144
+ status = "Enabled" if _module_is_available(module) else "-"
145
+ print(f" {module: <20}: {status: <8}")
146
+
147
+ print("\nFilesystems:")
148
+ filesystems = ["AzureFileSystem", "GcsFileSystem",
149
+ "HadoopFileSystem", "S3FileSystem"]
150
+ for fs in filesystems:
151
+ status = "Enabled" if _filesystem_is_available(fs) else "-"
152
+ print(f" {fs: <20}: {status: <8}")
153
+
154
+ print("\nCompression Codecs:")
155
+ codecs = ["brotli", "bz2", "gzip", "lz4_frame", "lz4", "snappy", "zstd"]
156
+ for codec in codecs:
157
+ status = "Enabled" if Codec.is_available(codec) else "-"
158
+ print(f" {codec: <20}: {status: <8}")
159
+
160
+
161
+ from pyarrow.lib import (null, bool_,
162
+ int8, int16, int32, int64,
163
+ uint8, uint16, uint32, uint64,
164
+ time32, time64, timestamp, date32, date64, duration,
165
+ month_day_nano_interval,
166
+ float16, float32, float64,
167
+ binary, string, utf8, binary_view, string_view,
168
+ large_binary, large_string, large_utf8,
169
+ decimal128, decimal256,
170
+ list_, large_list, list_view, large_list_view,
171
+ map_, struct,
172
+ union, sparse_union, dense_union,
173
+ dictionary,
174
+ run_end_encoded,
175
+ fixed_shape_tensor,
176
+ field,
177
+ type_for_alias,
178
+ DataType, DictionaryType, StructType,
179
+ ListType, LargeListType, FixedSizeListType,
180
+ ListViewType, LargeListViewType,
181
+ MapType, UnionType, SparseUnionType, DenseUnionType,
182
+ TimestampType, Time32Type, Time64Type, DurationType,
183
+ FixedSizeBinaryType, Decimal128Type, Decimal256Type,
184
+ BaseExtensionType, ExtensionType,
185
+ RunEndEncodedType, FixedShapeTensorType,
186
+ PyExtensionType, UnknownExtensionType,
187
+ register_extension_type, unregister_extension_type,
188
+ DictionaryMemo,
189
+ KeyValueMetadata,
190
+ Field,
191
+ Schema,
192
+ schema,
193
+ unify_schemas,
194
+ Array, Tensor,
195
+ array, chunked_array, record_batch, nulls, repeat,
196
+ SparseCOOTensor, SparseCSRMatrix, SparseCSCMatrix,
197
+ SparseCSFTensor,
198
+ infer_type, from_numpy_dtype,
199
+ NullArray,
200
+ NumericArray, IntegerArray, FloatingPointArray,
201
+ BooleanArray,
202
+ Int8Array, UInt8Array,
203
+ Int16Array, UInt16Array,
204
+ Int32Array, UInt32Array,
205
+ Int64Array, UInt64Array,
206
+ HalfFloatArray, FloatArray, DoubleArray,
207
+ ListArray, LargeListArray, FixedSizeListArray,
208
+ ListViewArray, LargeListViewArray,
209
+ MapArray, UnionArray,
210
+ BinaryArray, StringArray,
211
+ LargeBinaryArray, LargeStringArray,
212
+ BinaryViewArray, StringViewArray,
213
+ FixedSizeBinaryArray,
214
+ DictionaryArray,
215
+ Date32Array, Date64Array, TimestampArray,
216
+ Time32Array, Time64Array, DurationArray,
217
+ MonthDayNanoIntervalArray,
218
+ Decimal128Array, Decimal256Array, StructArray, ExtensionArray,
219
+ RunEndEncodedArray, FixedShapeTensorArray,
220
+ scalar, NA, _NULL as NULL, Scalar,
221
+ NullScalar, BooleanScalar,
222
+ Int8Scalar, Int16Scalar, Int32Scalar, Int64Scalar,
223
+ UInt8Scalar, UInt16Scalar, UInt32Scalar, UInt64Scalar,
224
+ HalfFloatScalar, FloatScalar, DoubleScalar,
225
+ Decimal128Scalar, Decimal256Scalar,
226
+ ListScalar, LargeListScalar, FixedSizeListScalar,
227
+ ListViewScalar, LargeListViewScalar,
228
+ Date32Scalar, Date64Scalar,
229
+ Time32Scalar, Time64Scalar,
230
+ TimestampScalar, DurationScalar,
231
+ MonthDayNanoIntervalScalar,
232
+ BinaryScalar, LargeBinaryScalar, BinaryViewScalar,
233
+ StringScalar, LargeStringScalar, StringViewScalar,
234
+ FixedSizeBinaryScalar, DictionaryScalar,
235
+ MapScalar, StructScalar, UnionScalar,
236
+ RunEndEncodedScalar, ExtensionScalar)
237
+
238
+ # Buffers, allocation
239
+ from pyarrow.lib import (Buffer, ResizableBuffer, foreign_buffer, py_buffer,
240
+ Codec, compress, decompress, allocate_buffer)
241
+
242
+ from pyarrow.lib import (MemoryPool, LoggingMemoryPool, ProxyMemoryPool,
243
+ total_allocated_bytes, set_memory_pool,
244
+ default_memory_pool, system_memory_pool,
245
+ jemalloc_memory_pool, mimalloc_memory_pool,
246
+ logging_memory_pool, proxy_memory_pool,
247
+ log_memory_allocations, jemalloc_set_decay_ms,
248
+ supported_memory_backends)
249
+
250
+ # I/O
251
+ from pyarrow.lib import (NativeFile, PythonFile,
252
+ BufferedInputStream, BufferedOutputStream, CacheOptions,
253
+ CompressedInputStream, CompressedOutputStream,
254
+ TransformInputStream, transcoding_input_stream,
255
+ FixedSizeBufferWriter,
256
+ BufferReader, BufferOutputStream,
257
+ OSFile, MemoryMappedFile, memory_map,
258
+ create_memory_map, MockOutputStream,
259
+ input_stream, output_stream,
260
+ have_libhdfs)
261
+
262
+ from pyarrow.lib import (ChunkedArray, RecordBatch, Table, table,
263
+ concat_arrays, concat_tables, TableGroupBy,
264
+ RecordBatchReader)
265
+
266
+ # Exceptions
267
+ from pyarrow.lib import (ArrowCancelled,
268
+ ArrowCapacityError,
269
+ ArrowException,
270
+ ArrowKeyError,
271
+ ArrowIndexError,
272
+ ArrowInvalid,
273
+ ArrowIOError,
274
+ ArrowMemoryError,
275
+ ArrowNotImplementedError,
276
+ ArrowTypeError,
277
+ ArrowSerializationError)
278
+
279
+ from pyarrow.ipc import serialize_pandas, deserialize_pandas
280
+ import pyarrow.ipc as ipc
281
+
282
+ import pyarrow.types as types
283
+
284
+
285
+ # ----------------------------------------------------------------------
286
+ # Deprecations
287
+
288
+ from pyarrow.util import _deprecate_api, _deprecate_class
289
+
290
+
291
+ # TODO: Deprecate these somehow in the pyarrow namespace
292
+ from pyarrow.ipc import (Message, MessageReader, MetadataVersion,
293
+ RecordBatchFileReader, RecordBatchFileWriter,
294
+ RecordBatchStreamReader, RecordBatchStreamWriter)
295
+
296
+ # ----------------------------------------------------------------------
297
+ # Returning absolute path to the pyarrow include directory (if bundled, e.g. in
298
+ # wheels)
299
+
300
+
301
+ def get_include():
302
+ """
303
+ Return absolute path to directory containing Arrow C++ include
304
+ headers. Similar to numpy.get_include
305
+ """
306
+ return _os.path.join(_os.path.dirname(__file__), 'include')
307
+
308
+
309
+ def _get_pkg_config_executable():
310
+ return _os.environ.get('PKG_CONFIG', 'pkg-config')
311
+
312
+
313
+ def _has_pkg_config(pkgname):
314
+ import subprocess
315
+ try:
316
+ return subprocess.call([_get_pkg_config_executable(),
317
+ '--exists', pkgname]) == 0
318
+ except FileNotFoundError:
319
+ return False
320
+
321
+
322
+ def _read_pkg_config_variable(pkgname, cli_args):
323
+ import subprocess
324
+ cmd = [_get_pkg_config_executable(), pkgname] + cli_args
325
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
326
+ stderr=subprocess.PIPE)
327
+ out, err = proc.communicate()
328
+ if proc.returncode != 0:
329
+ raise RuntimeError("pkg-config failed: " + err.decode('utf8'))
330
+ return out.rstrip().decode('utf8')
331
+
332
+
333
+ def get_libraries():
334
+ """
335
+ Return list of library names to include in the `libraries` argument for C
336
+ or Cython extensions using pyarrow
337
+ """
338
+ return ['arrow_python', 'arrow']
339
+
340
+
341
+ def create_library_symlinks():
342
+ """
343
+ With Linux and macOS wheels, the bundled shared libraries have an embedded
344
+ ABI version like libarrow.so.17 or libarrow.17.dylib and so linking to them
345
+ with -larrow won't work unless we create symlinks at locations like
346
+ site-packages/pyarrow/libarrow.so. This unfortunate workaround addresses
347
+ prior problems we had with shipping two copies of the shared libraries to
348
+ permit third party projects like turbodbc to build their C++ extensions
349
+ against the pyarrow wheels.
350
+
351
+ This function must only be invoked once and only when the shared libraries
352
+ are bundled with the Python package, which should only apply to wheel-based
353
+ installs. It requires write access to the site-packages/pyarrow directory
354
+ and so depending on your system may need to be run with root.
355
+ """
356
+ import glob
357
+ if _sys.platform == 'win32':
358
+ return
359
+ package_cwd = _os.path.dirname(__file__)
360
+
361
+ if _sys.platform == 'linux':
362
+ bundled_libs = glob.glob(_os.path.join(package_cwd, '*.so.*'))
363
+
364
+ def get_symlink_path(hard_path):
365
+ return hard_path.rsplit('.', 1)[0]
366
+ else:
367
+ bundled_libs = glob.glob(_os.path.join(package_cwd, '*.*.dylib'))
368
+
369
+ def get_symlink_path(hard_path):
370
+ return '.'.join((hard_path.rsplit('.', 2)[0], 'dylib'))
371
+
372
+ for lib_hard_path in bundled_libs:
373
+ symlink_path = get_symlink_path(lib_hard_path)
374
+ if _os.path.exists(symlink_path):
375
+ continue
376
+ try:
377
+ _os.symlink(lib_hard_path, symlink_path)
378
+ except PermissionError:
379
+ print("Tried creating symlink {}. If you need to link to "
380
+ "bundled shared libraries, run "
381
+ "pyarrow.create_library_symlinks() as root")
382
+
383
+
384
+ def get_library_dirs():
385
+ """
386
+ Return lists of directories likely to contain Arrow C++ libraries for
387
+ linking C or Cython extensions using pyarrow
388
+ """
389
+ package_cwd = _os.path.dirname(__file__)
390
+ library_dirs = [package_cwd]
391
+
392
+ def append_library_dir(library_dir):
393
+ if library_dir not in library_dirs:
394
+ library_dirs.append(library_dir)
395
+
396
+ # Search library paths via pkg-config. This is necessary if the user
397
+ # installed libarrow and the other shared libraries manually and they
398
+ # are not shipped inside the pyarrow package (see also ARROW-2976).
399
+ pkg_config_executable = _os.environ.get('PKG_CONFIG') or 'pkg-config'
400
+ for pkgname in ["arrow", "arrow_python"]:
401
+ if _has_pkg_config(pkgname):
402
+ library_dir = _read_pkg_config_variable(pkgname,
403
+ ["--libs-only-L"])
404
+ # pkg-config output could be empty if Arrow is installed
405
+ # as a system package.
406
+ if library_dir:
407
+ if not library_dir.startswith("-L"):
408
+ raise ValueError(
409
+ "pkg-config --libs-only-L returned unexpected "
410
+ "value {!r}".format(library_dir))
411
+ append_library_dir(library_dir[2:])
412
+
413
+ if _sys.platform == 'win32':
414
+ # TODO(wesm): Is this necessary, or does setuptools within a conda
415
+ # installation add Library\lib to the linker path for MSVC?
416
+ python_base_install = _os.path.dirname(_sys.executable)
417
+ library_dir = _os.path.join(python_base_install, 'Library', 'lib')
418
+
419
+ if _os.path.exists(_os.path.join(library_dir, 'arrow.lib')):
420
+ append_library_dir(library_dir)
421
+
422
+ # ARROW-4074: Allow for ARROW_HOME to be set to some other directory
423
+ if _os.environ.get('ARROW_HOME'):
424
+ append_library_dir(_os.path.join(_os.environ['ARROW_HOME'], 'lib'))
425
+ else:
426
+ # Python wheels bundle the Arrow libraries in the pyarrow directory.
427
+ append_library_dir(_os.path.dirname(_os.path.abspath(__file__)))
428
+
429
+ return library_dirs
venv/lib/python3.10/site-packages/pyarrow/_acero.pyx ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # ---------------------------------------------------------------------
19
+ # Low-level Acero bindings
20
+
21
+ # cython: profile=False
22
+ # distutils: language = c++
23
+ # cython: language_level = 3
24
+
25
+ from pyarrow.includes.common cimport *
26
+ from pyarrow.includes.libarrow cimport *
27
+ from pyarrow.includes.libarrow_acero cimport *
28
+ from pyarrow.lib cimport (Table, pyarrow_unwrap_table, pyarrow_wrap_table,
29
+ RecordBatchReader)
30
+ from pyarrow.lib import frombytes, tobytes
31
+ from pyarrow._compute cimport (
32
+ Expression, FunctionOptions, _ensure_field_ref, _true,
33
+ unwrap_null_placement, unwrap_sort_order
34
+ )
35
+
36
+
37
+ cdef class ExecNodeOptions(_Weakrefable):
38
+ """
39
+ Base class for the node options.
40
+
41
+ Use one of the subclasses to construct an options object.
42
+ """
43
+ __slots__ = () # avoid mistakingly creating attributes
44
+
45
+ cdef void init(self, const shared_ptr[CExecNodeOptions]& sp):
46
+ self.wrapped = sp
47
+
48
+ cdef inline shared_ptr[CExecNodeOptions] unwrap(self) nogil:
49
+ return self.wrapped
50
+
51
+
52
+ cdef class _TableSourceNodeOptions(ExecNodeOptions):
53
+
54
+ def _set_options(self, Table table):
55
+ cdef:
56
+ shared_ptr[CTable] c_table
57
+
58
+ c_table = pyarrow_unwrap_table(table)
59
+ self.wrapped.reset(
60
+ new CTableSourceNodeOptions(c_table)
61
+ )
62
+
63
+
64
+ class TableSourceNodeOptions(_TableSourceNodeOptions):
65
+ """
66
+ A Source node which accepts a table.
67
+
68
+ This is the option class for the "table_source" node factory.
69
+
70
+ Parameters
71
+ ----------
72
+ table : pyarrow.Table
73
+ The table which acts as the data source.
74
+ """
75
+
76
+ def __init__(self, Table table):
77
+ self._set_options(table)
78
+
79
+
80
+ cdef class _FilterNodeOptions(ExecNodeOptions):
81
+
82
+ def _set_options(self, Expression filter_expression not None):
83
+ self.wrapped.reset(
84
+ new CFilterNodeOptions(<CExpression>filter_expression.unwrap())
85
+ )
86
+
87
+
88
+ class FilterNodeOptions(_FilterNodeOptions):
89
+ """
90
+ Make a node which excludes some rows from batches passed through it.
91
+
92
+ This is the option class for the "filter" node factory.
93
+
94
+ The "filter" operation provides an option to define data filtering
95
+ criteria. It selects rows where the given expression evaluates to true.
96
+ Filters can be written using pyarrow.compute.Expression, and the
97
+ expression must have a return type of boolean.
98
+
99
+ Parameters
100
+ ----------
101
+ filter_expression : pyarrow.compute.Expression
102
+ """
103
+
104
+ def __init__(self, Expression filter_expression):
105
+ self._set_options(filter_expression)
106
+
107
+
108
+ cdef class _ProjectNodeOptions(ExecNodeOptions):
109
+
110
+ def _set_options(self, expressions, names=None):
111
+ cdef:
112
+ Expression expr
113
+ vector[CExpression] c_expressions
114
+ vector[c_string] c_names
115
+
116
+ for expr in expressions:
117
+ c_expressions.push_back(expr.unwrap())
118
+
119
+ if names is not None:
120
+ if len(names) != len(expressions):
121
+ raise ValueError(
122
+ "The number of names should be equal to the number of expressions"
123
+ )
124
+
125
+ for name in names:
126
+ c_names.push_back(<c_string>tobytes(name))
127
+
128
+ self.wrapped.reset(
129
+ new CProjectNodeOptions(c_expressions, c_names)
130
+ )
131
+ else:
132
+ self.wrapped.reset(
133
+ new CProjectNodeOptions(c_expressions)
134
+ )
135
+
136
+
137
+ class ProjectNodeOptions(_ProjectNodeOptions):
138
+ """
139
+ Make a node which executes expressions on input batches,
140
+ producing batches of the same length with new columns.
141
+
142
+ This is the option class for the "project" node factory.
143
+
144
+ The "project" operation rearranges, deletes, transforms, and
145
+ creates columns. Each output column is computed by evaluating
146
+ an expression against the source record batch. These must be
147
+ scalar expressions (expressions consisting of scalar literals,
148
+ field references and scalar functions, i.e. elementwise functions
149
+ that return one value for each input row independent of the value
150
+ of all other rows).
151
+
152
+ Parameters
153
+ ----------
154
+ expressions : list of pyarrow.compute.Expression
155
+ List of expressions to evaluate against the source batch. This must
156
+ be scalar expressions.
157
+ names : list of str, optional
158
+ List of names for each of the output columns (same length as
159
+ `expressions`). If `names` is not provided, the string
160
+ representations of exprs will be used.
161
+ """
162
+
163
+ def __init__(self, expressions, names=None):
164
+ self._set_options(expressions, names)
165
+
166
+
167
+ cdef class _AggregateNodeOptions(ExecNodeOptions):
168
+
169
+ def _set_options(self, aggregates, keys=None):
170
+ cdef:
171
+ CAggregate c_aggr
172
+ vector[CAggregate] c_aggregations
173
+ vector[CFieldRef] c_keys
174
+
175
+ for arg_names, func_name, opts, name in aggregates:
176
+ c_aggr.function = tobytes(func_name)
177
+ if opts is not None:
178
+ c_aggr.options = (<FunctionOptions?>opts).wrapped
179
+ else:
180
+ c_aggr.options = <shared_ptr[CFunctionOptions]>nullptr
181
+ if not isinstance(arg_names, (list, tuple)):
182
+ arg_names = [arg_names]
183
+ for arg in arg_names:
184
+ c_aggr.target.push_back(_ensure_field_ref(arg))
185
+ c_aggr.name = tobytes(name)
186
+
187
+ c_aggregations.push_back(move(c_aggr))
188
+
189
+ if keys is None:
190
+ keys = []
191
+ for name in keys:
192
+ c_keys.push_back(_ensure_field_ref(name))
193
+
194
+ self.wrapped.reset(
195
+ new CAggregateNodeOptions(c_aggregations, c_keys)
196
+ )
197
+
198
+
199
+ class AggregateNodeOptions(_AggregateNodeOptions):
200
+ """
201
+ Make a node which aggregates input batches, optionally grouped by keys.
202
+
203
+ This is the option class for the "aggregate" node factory.
204
+
205
+ Acero supports two types of aggregates: "scalar" aggregates,
206
+ and "hash" aggregates. Scalar aggregates reduce an array or scalar
207
+ input to a single scalar output (e.g. computing the mean of a column).
208
+ Hash aggregates act like GROUP BY in SQL and first partition data
209
+ based on one or more key columns, then reduce the data in each partition.
210
+ The aggregate node supports both types of computation, and can compute
211
+ any number of aggregations at once.
212
+
213
+ Parameters
214
+ ----------
215
+ aggregates : list of tuples
216
+ Aggregations which will be applied to the targeted fields.
217
+ Specified as a list of tuples, where each tuple is one aggregation
218
+ specification and consists of: aggregation target column(s) followed
219
+ by function name, aggregation function options object and the
220
+ output field name.
221
+ The target column(s) specification can be a single field reference,
222
+ an empty list or a list of fields unary, nullary and n-ary aggregation
223
+ functions respectively. Each field reference can be a string
224
+ column name or expression.
225
+ keys : list of field references, optional
226
+ Keys by which aggregations will be grouped. Each key can reference
227
+ a field using a string name or expression.
228
+ """
229
+
230
+ def __init__(self, aggregates, keys=None):
231
+ self._set_options(aggregates, keys)
232
+
233
+
234
+ cdef class _OrderByNodeOptions(ExecNodeOptions):
235
+
236
+ def _set_options(self, sort_keys, null_placement):
237
+ cdef:
238
+ vector[CSortKey] c_sort_keys
239
+
240
+ for name, order in sort_keys:
241
+ c_sort_keys.push_back(
242
+ CSortKey(_ensure_field_ref(name), unwrap_sort_order(order))
243
+ )
244
+
245
+ self.wrapped.reset(
246
+ new COrderByNodeOptions(
247
+ COrdering(c_sort_keys, unwrap_null_placement(null_placement))
248
+ )
249
+ )
250
+
251
+
252
+ class OrderByNodeOptions(_OrderByNodeOptions):
253
+ """
254
+ Make a node which applies a new ordering to the data.
255
+
256
+ Currently this node works by accumulating all data, sorting, and then
257
+ emitting the new data with an updated batch index.
258
+ Larger-than-memory sort is not currently supported.
259
+
260
+ This is the option class for the "order_by" node factory.
261
+
262
+ Parameters
263
+ ----------
264
+ sort_keys : sequence of (name, order) tuples
265
+ Names of field/column keys to sort the input on,
266
+ along with the order each field/column is sorted in.
267
+ Accepted values for `order` are "ascending", "descending".
268
+ Each field reference can be a string column name or expression.
269
+ null_placement : str, default "at_end"
270
+ Where nulls in input should be sorted, only applying to
271
+ columns/fields mentioned in `sort_keys`.
272
+ Accepted values are "at_start", "at_end".
273
+ """
274
+
275
+ def __init__(self, sort_keys=(), *, null_placement="at_end"):
276
+ self._set_options(sort_keys, null_placement)
277
+
278
+
279
+ cdef class _HashJoinNodeOptions(ExecNodeOptions):
280
+
281
+ def _set_options(
282
+ self, join_type, left_keys, right_keys, left_output=None, right_output=None,
283
+ output_suffix_for_left="", output_suffix_for_right="",
284
+ ):
285
+ cdef:
286
+ CJoinType c_join_type
287
+ vector[CFieldRef] c_left_keys
288
+ vector[CFieldRef] c_right_keys
289
+ vector[CFieldRef] c_left_output
290
+ vector[CFieldRef] c_right_output
291
+
292
+ # join type
293
+ if join_type == "left semi":
294
+ c_join_type = CJoinType_LEFT_SEMI
295
+ elif join_type == "right semi":
296
+ c_join_type = CJoinType_RIGHT_SEMI
297
+ elif join_type == "left anti":
298
+ c_join_type = CJoinType_LEFT_ANTI
299
+ elif join_type == "right anti":
300
+ c_join_type = CJoinType_RIGHT_ANTI
301
+ elif join_type == "inner":
302
+ c_join_type = CJoinType_INNER
303
+ elif join_type == "left outer":
304
+ c_join_type = CJoinType_LEFT_OUTER
305
+ elif join_type == "right outer":
306
+ c_join_type = CJoinType_RIGHT_OUTER
307
+ elif join_type == "full outer":
308
+ c_join_type = CJoinType_FULL_OUTER
309
+ else:
310
+ raise ValueError("Unsupported join type")
311
+
312
+ # left/right keys
313
+ if not isinstance(left_keys, (list, tuple)):
314
+ left_keys = [left_keys]
315
+ for key in left_keys:
316
+ c_left_keys.push_back(_ensure_field_ref(key))
317
+ if not isinstance(right_keys, (list, tuple)):
318
+ right_keys = [right_keys]
319
+ for key in right_keys:
320
+ c_right_keys.push_back(_ensure_field_ref(key))
321
+
322
+ # left/right output fields
323
+ if left_output is not None and right_output is not None:
324
+ for colname in left_output:
325
+ c_left_output.push_back(_ensure_field_ref(colname))
326
+ for colname in right_output:
327
+ c_right_output.push_back(_ensure_field_ref(colname))
328
+
329
+ self.wrapped.reset(
330
+ new CHashJoinNodeOptions(
331
+ c_join_type, c_left_keys, c_right_keys,
332
+ c_left_output, c_right_output,
333
+ _true,
334
+ <c_string>tobytes(output_suffix_for_left),
335
+ <c_string>tobytes(output_suffix_for_right)
336
+ )
337
+ )
338
+ else:
339
+ self.wrapped.reset(
340
+ new CHashJoinNodeOptions(
341
+ c_join_type, c_left_keys, c_right_keys,
342
+ _true,
343
+ <c_string>tobytes(output_suffix_for_left),
344
+ <c_string>tobytes(output_suffix_for_right)
345
+ )
346
+ )
347
+
348
+
349
+ class HashJoinNodeOptions(_HashJoinNodeOptions):
350
+ """
351
+ Make a node which implements join operation using hash join strategy.
352
+
353
+ This is the option class for the "hashjoin" node factory.
354
+
355
+ Parameters
356
+ ----------
357
+ join_type : str
358
+ Type of join. One of "left semi", "right semi", "left anti",
359
+ "right anti", "inner", "left outer", "right outer", "full outer".
360
+ left_keys : str, Expression or list
361
+ Key fields from left input. Each key can be a string column name
362
+ or a field expression, or a list of such field references.
363
+ right_keys : str, Expression or list
364
+ Key fields from right input. See `left_keys` for details.
365
+ left_output : list, optional
366
+ List of output fields passed from left input. If left and right
367
+ output fields are not specified, all valid fields from both left and
368
+ right input will be output. Each field can be a string column name
369
+ or a field expression.
370
+ right_output : list, optional
371
+ List of output fields passed from right input. If left and right
372
+ output fields are not specified, all valid fields from both left and
373
+ right input will be output. Each field can be a string column name
374
+ or a field expression.
375
+ output_suffix_for_left : str
376
+ Suffix added to names of output fields coming from left input
377
+ (used to distinguish, if necessary, between fields of the same
378
+ name in left and right input and can be left empty if there are
379
+ no name collisions).
380
+ output_suffix_for_right : str
381
+ Suffix added to names of output fields coming from right input,
382
+ see `output_suffix_for_left` for details.
383
+ """
384
+
385
+ def __init__(
386
+ self, join_type, left_keys, right_keys, left_output=None, right_output=None,
387
+ output_suffix_for_left="", output_suffix_for_right=""
388
+ ):
389
+ self._set_options(
390
+ join_type, left_keys, right_keys, left_output, right_output,
391
+ output_suffix_for_left, output_suffix_for_right
392
+ )
393
+
394
+
395
+ cdef class _AsofJoinNodeOptions(ExecNodeOptions):
396
+
397
+ def _set_options(self, left_on, left_by, right_on, right_by, tolerance):
398
+ cdef:
399
+ vector[CFieldRef] c_left_by
400
+ vector[CFieldRef] c_right_by
401
+ CAsofJoinKeys c_left_keys
402
+ CAsofJoinKeys c_right_keys
403
+ vector[CAsofJoinKeys] c_input_keys
404
+
405
+ # Prepare left AsofJoinNodeOption::Keys
406
+ if not isinstance(left_by, (list, tuple)):
407
+ left_by = [left_by]
408
+ for key in left_by:
409
+ c_left_by.push_back(_ensure_field_ref(key))
410
+
411
+ c_left_keys.on_key = _ensure_field_ref(left_on)
412
+ c_left_keys.by_key = c_left_by
413
+
414
+ c_input_keys.push_back(c_left_keys)
415
+
416
+ # Prepare right AsofJoinNodeOption::Keys
417
+ if not isinstance(right_by, (list, tuple)):
418
+ right_by = [right_by]
419
+ for key in right_by:
420
+ c_right_by.push_back(_ensure_field_ref(key))
421
+
422
+ c_right_keys.on_key = _ensure_field_ref(right_on)
423
+ c_right_keys.by_key = c_right_by
424
+
425
+ c_input_keys.push_back(c_right_keys)
426
+
427
+ self.wrapped.reset(
428
+ new CAsofJoinNodeOptions(
429
+ c_input_keys,
430
+ tolerance,
431
+ )
432
+ )
433
+
434
+
435
+ class AsofJoinNodeOptions(_AsofJoinNodeOptions):
436
+ """
437
+ Make a node which implements 'as of join' operation.
438
+
439
+ This is the option class for the "asofjoin" node factory.
440
+
441
+ Parameters
442
+ ----------
443
+ left_on : str, Expression
444
+ The left key on which the join operation should be performed.
445
+ Can be a string column name or a field expression.
446
+
447
+ An inexact match is used on the "on" key, i.e. a row is considered a
448
+ match if and only if left_on - tolerance <= right_on <= left_on.
449
+
450
+ The input dataset must be sorted by the "on" key. Must be a single
451
+ field of a common type.
452
+
453
+ Currently, the "on" key must be an integer, date, or timestamp type.
454
+ left_by: str, Expression or list
455
+ The left keys on which the join operation should be performed.
456
+ Exact equality is used for each field of the "by" keys.
457
+ Each key can be a string column name or a field expression,
458
+ or a list of such field references.
459
+ right_on : str, Expression
460
+ The right key on which the join operation should be performed.
461
+ See `left_on` for details.
462
+ right_by: str, Expression or list
463
+ The right keys on which the join operation should be performed.
464
+ See `left_by` for details.
465
+ tolerance : int
466
+ The tolerance to use for the asof join. The tolerance is interpreted in
467
+ the same units as the "on" key.
468
+ """
469
+
470
+ def __init__(self, left_on, left_by, right_on, right_by, tolerance):
471
+ self._set_options(left_on, left_by, right_on, right_by, tolerance)
472
+
473
+
474
+ cdef class Declaration(_Weakrefable):
475
+ """
476
+ Helper class for declaring the nodes of an ExecPlan.
477
+
478
+ A Declaration represents an unconstructed ExecNode, and potentially
479
+ more since its inputs may also be Declarations or when constructed
480
+ with ``from_sequence``.
481
+
482
+ The possible ExecNodes to use are registered with a name,
483
+ the "factory name", and need to be specified using this name, together
484
+ with its corresponding ExecNodeOptions subclass.
485
+
486
+ Parameters
487
+ ----------
488
+ factory_name : str
489
+ The ExecNode factory name, such as "table_source", "filter",
490
+ "project" etc. See the ExecNodeOptions subclasses for the exact
491
+ factory names to use.
492
+ options : ExecNodeOptions
493
+ Corresponding ExecNodeOptions subclass (matching the factory name).
494
+ inputs : list of Declaration, optional
495
+ Input nodes for this declaration. Optional if the node is a source
496
+ node, or when the declaration gets combined later with
497
+ ``from_sequence``.
498
+
499
+ Returns
500
+ -------
501
+ Declaration
502
+ """
503
+ cdef void init(self, const CDeclaration& c_decl):
504
+ self.decl = c_decl
505
+
506
+ @staticmethod
507
+ cdef wrap(const CDeclaration& c_decl):
508
+ cdef Declaration self = Declaration.__new__(Declaration)
509
+ self.init(c_decl)
510
+ return self
511
+
512
+ cdef inline CDeclaration unwrap(self) nogil:
513
+ return self.decl
514
+
515
+ def __init__(self, factory_name, ExecNodeOptions options, inputs=None):
516
+ cdef:
517
+ c_string c_factory_name
518
+ CDeclaration c_decl
519
+ vector[CDeclaration.Input] c_inputs
520
+
521
+ c_factory_name = tobytes(factory_name)
522
+
523
+ if inputs is not None:
524
+ for ipt in inputs:
525
+ c_inputs.push_back(
526
+ CDeclaration.Input((<Declaration>ipt).unwrap())
527
+ )
528
+
529
+ c_decl = CDeclaration(c_factory_name, c_inputs, options.unwrap())
530
+ self.init(c_decl)
531
+
532
+ @staticmethod
533
+ def from_sequence(decls):
534
+ """
535
+ Convenience factory for the common case of a simple sequence of nodes.
536
+
537
+ Each of the declarations will be appended to the inputs of the
538
+ subsequent declaration, and the final modified declaration will
539
+ be returned.
540
+
541
+ Parameters
542
+ ----------
543
+ decls : list of Declaration
544
+
545
+ Returns
546
+ -------
547
+ Declaration
548
+ """
549
+ cdef:
550
+ vector[CDeclaration] c_decls
551
+ CDeclaration c_decl
552
+
553
+ for decl in decls:
554
+ c_decls.push_back((<Declaration> decl).unwrap())
555
+
556
+ c_decl = CDeclaration.Sequence(c_decls)
557
+ return Declaration.wrap(c_decl)
558
+
559
+ def __str__(self):
560
+ return frombytes(GetResultValue(DeclarationToString(self.decl)))
561
+
562
+ def __repr__(self):
563
+ return "<pyarrow.acero.Declaration>\n{0}".format(str(self))
564
+
565
+ def to_table(self, bint use_threads=True):
566
+ """
567
+ Run the declaration and collect the results into a table.
568
+
569
+ This method will implicitly add a sink node to the declaration
570
+ to collect results into a table. It will then create an ExecPlan
571
+ from the declaration, start the exec plan, block until the plan
572
+ has finished, and return the created table.
573
+
574
+ Parameters
575
+ ----------
576
+ use_threads : bool, default True
577
+ If set to False, then all CPU work will be done on the calling
578
+ thread. I/O tasks will still happen on the I/O executor
579
+ and may be multi-threaded (but should not use significant CPU
580
+ resources).
581
+
582
+ Returns
583
+ -------
584
+ pyarrow.Table
585
+ """
586
+ cdef:
587
+ shared_ptr[CTable] c_table
588
+
589
+ with nogil:
590
+ c_table = GetResultValue(DeclarationToTable(self.unwrap(), use_threads))
591
+ return pyarrow_wrap_table(c_table)
592
+
593
+ def to_reader(self, bint use_threads=True):
594
+ """Run the declaration and return results as a RecordBatchReader.
595
+
596
+ For details about the parameters, see `to_table`.
597
+
598
+ Returns
599
+ -------
600
+ pyarrow.RecordBatchReader
601
+ """
602
+ cdef:
603
+ RecordBatchReader reader
604
+ reader = RecordBatchReader.__new__(RecordBatchReader)
605
+ reader.reader.reset(
606
+ GetResultValue(DeclarationToReader(self.unwrap(), use_threads)).release()
607
+ )
608
+ return reader
venv/lib/python3.10/site-packages/pyarrow/_azurefs.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (105 kB). View file
 
venv/lib/python3.10/site-packages/pyarrow/_compute.pyx ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/pyarrow/_compute_docstrings.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ """
19
+ Custom documentation additions for compute functions.
20
+ """
21
+
22
+ function_doc_additions = {}
23
+
24
+ function_doc_additions["filter"] = """
25
+ Examples
26
+ --------
27
+ >>> import pyarrow as pa
28
+ >>> arr = pa.array(["a", "b", "c", None, "e"])
29
+ >>> mask = pa.array([True, False, None, False, True])
30
+ >>> arr.filter(mask)
31
+ <pyarrow.lib.StringArray object at ...>
32
+ [
33
+ "a",
34
+ "e"
35
+ ]
36
+ >>> arr.filter(mask, null_selection_behavior='emit_null')
37
+ <pyarrow.lib.StringArray object at ...>
38
+ [
39
+ "a",
40
+ null,
41
+ "e"
42
+ ]
43
+ """
44
+
45
+ function_doc_additions["mode"] = """
46
+ Examples
47
+ --------
48
+ >>> import pyarrow as pa
49
+ >>> import pyarrow.compute as pc
50
+ >>> arr = pa.array([1, 1, 2, 2, 3, 2, 2, 2])
51
+ >>> modes = pc.mode(arr, 2)
52
+ >>> modes[0]
53
+ <pyarrow.StructScalar: [('mode', 2), ('count', 5)]>
54
+ >>> modes[1]
55
+ <pyarrow.StructScalar: [('mode', 1), ('count', 2)]>
56
+ """
venv/lib/python3.10/site-packages/pyarrow/_csv.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (361 kB). View file
 
venv/lib/python3.10/site-packages/pyarrow/_csv.pxd ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.includes.libarrow cimport *
21
+ from pyarrow.lib cimport _Weakrefable
22
+
23
+
24
+ cdef class ConvertOptions(_Weakrefable):
25
+ cdef:
26
+ unique_ptr[CCSVConvertOptions] options
27
+
28
+ @staticmethod
29
+ cdef ConvertOptions wrap(CCSVConvertOptions options)
30
+
31
+
32
+ cdef class ParseOptions(_Weakrefable):
33
+ cdef:
34
+ unique_ptr[CCSVParseOptions] options
35
+ object _invalid_row_handler
36
+
37
+ @staticmethod
38
+ cdef ParseOptions wrap(CCSVParseOptions options)
39
+
40
+
41
+ cdef class ReadOptions(_Weakrefable):
42
+ cdef:
43
+ unique_ptr[CCSVReadOptions] options
44
+ public object encoding
45
+
46
+ @staticmethod
47
+ cdef ReadOptions wrap(CCSVReadOptions options)
48
+
49
+
50
+ cdef class WriteOptions(_Weakrefable):
51
+ cdef:
52
+ unique_ptr[CCSVWriteOptions] options
53
+
54
+ @staticmethod
55
+ cdef WriteOptions wrap(CCSVWriteOptions options)
venv/lib/python3.10/site-packages/pyarrow/_cuda.pxd ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.lib cimport *
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport *
23
+ from pyarrow.includes.libarrow_cuda cimport *
24
+
25
+
26
+ cdef class Context(_Weakrefable):
27
+ cdef:
28
+ shared_ptr[CCudaContext] context
29
+ int device_number
30
+
31
+ cdef void init(self, const shared_ptr[CCudaContext]& ctx)
32
+
33
+
34
+ cdef class IpcMemHandle(_Weakrefable):
35
+ cdef:
36
+ shared_ptr[CCudaIpcMemHandle] handle
37
+
38
+ cdef void init(self, shared_ptr[CCudaIpcMemHandle]& h)
39
+
40
+
41
+ cdef class CudaBuffer(Buffer):
42
+ cdef:
43
+ shared_ptr[CCudaBuffer] cuda_buffer
44
+ object base
45
+
46
+ cdef void init_cuda(self,
47
+ const shared_ptr[CCudaBuffer]& buffer,
48
+ object base)
49
+
50
+
51
+ cdef class HostBuffer(Buffer):
52
+ cdef:
53
+ shared_ptr[CCudaHostBuffer] host_buffer
54
+
55
+ cdef void init_host(self, const shared_ptr[CCudaHostBuffer]& buffer)
56
+
57
+
58
+ cdef class BufferReader(NativeFile):
59
+ cdef:
60
+ CCudaBufferReader* reader
61
+ CudaBuffer buffer
62
+
63
+
64
+ cdef class BufferWriter(NativeFile):
65
+ cdef:
66
+ CCudaBufferWriter* writer
67
+ CudaBuffer buffer
venv/lib/python3.10/site-packages/pyarrow/_dataset.pxd ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ """Dataset is currently unstable. APIs subject to change without notice."""
21
+
22
+ from pyarrow.includes.common cimport *
23
+ from pyarrow.includes.libarrow_dataset cimport *
24
+ from pyarrow.lib cimport *
25
+ from pyarrow._fs cimport FileSystem, FileInfo
26
+
27
+
28
+ cdef CFileSource _make_file_source(object file, FileSystem filesystem=*, object file_size=*)
29
+
30
+ cdef class DatasetFactory(_Weakrefable):
31
+
32
+ cdef:
33
+ SharedPtrNoGIL[CDatasetFactory] wrapped
34
+ CDatasetFactory* factory
35
+
36
+ cdef init(self, const shared_ptr[CDatasetFactory]& sp)
37
+
38
+ @staticmethod
39
+ cdef wrap(const shared_ptr[CDatasetFactory]& sp)
40
+
41
+ cdef inline shared_ptr[CDatasetFactory] unwrap(self) nogil
42
+
43
+
44
+ cdef class Dataset(_Weakrefable):
45
+
46
+ cdef:
47
+ SharedPtrNoGIL[CDataset] wrapped
48
+ CDataset* dataset
49
+ public dict _scan_options
50
+
51
+ cdef void init(self, const shared_ptr[CDataset]& sp)
52
+
53
+ @staticmethod
54
+ cdef wrap(const shared_ptr[CDataset]& sp)
55
+
56
+ cdef shared_ptr[CDataset] unwrap(self) nogil
57
+
58
+
59
+ cdef class Scanner(_Weakrefable):
60
+ cdef:
61
+ SharedPtrNoGIL[CScanner] wrapped
62
+ CScanner* scanner
63
+
64
+ cdef void init(self, const shared_ptr[CScanner]& sp)
65
+
66
+ @staticmethod
67
+ cdef wrap(const shared_ptr[CScanner]& sp)
68
+
69
+ cdef shared_ptr[CScanner] unwrap(self)
70
+
71
+ @staticmethod
72
+ cdef shared_ptr[CScanOptions] _make_scan_options(Dataset dataset, dict py_scanoptions) except *
73
+
74
+
75
+ cdef class FragmentScanOptions(_Weakrefable):
76
+
77
+ cdef:
78
+ shared_ptr[CFragmentScanOptions] wrapped
79
+
80
+ cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp)
81
+
82
+ @staticmethod
83
+ cdef wrap(const shared_ptr[CFragmentScanOptions]& sp)
84
+
85
+
86
+ cdef class FileFormat(_Weakrefable):
87
+
88
+ cdef:
89
+ shared_ptr[CFileFormat] wrapped
90
+ CFileFormat* format
91
+
92
+ cdef void init(self, const shared_ptr[CFileFormat]& sp)
93
+
94
+ @staticmethod
95
+ cdef wrap(const shared_ptr[CFileFormat]& sp)
96
+
97
+ cdef inline shared_ptr[CFileFormat] unwrap(self)
98
+
99
+ cdef _set_default_fragment_scan_options(self, FragmentScanOptions options)
100
+
101
+ # Return a WrittenFile after a file was written.
102
+ # May be overridden by subclasses, e.g. to add metadata.
103
+ cdef WrittenFile _finish_write(self, path, base_dir,
104
+ CFileWriter* file_writer)
105
+
106
+
107
+ cdef class FileWriteOptions(_Weakrefable):
108
+
109
+ cdef:
110
+ shared_ptr[CFileWriteOptions] wrapped
111
+ CFileWriteOptions* c_options
112
+
113
+ cdef void init(self, const shared_ptr[CFileWriteOptions]& sp)
114
+
115
+ @staticmethod
116
+ cdef wrap(const shared_ptr[CFileWriteOptions]& sp)
117
+
118
+ cdef inline shared_ptr[CFileWriteOptions] unwrap(self)
119
+
120
+
121
+ cdef class Fragment(_Weakrefable):
122
+
123
+ cdef:
124
+ SharedPtrNoGIL[CFragment] wrapped
125
+ CFragment* fragment
126
+
127
+ cdef void init(self, const shared_ptr[CFragment]& sp)
128
+
129
+ @staticmethod
130
+ cdef wrap(const shared_ptr[CFragment]& sp)
131
+
132
+ cdef inline shared_ptr[CFragment] unwrap(self)
133
+
134
+
135
+ cdef class FileFragment(Fragment):
136
+
137
+ cdef:
138
+ CFileFragment* file_fragment
139
+
140
+ cdef void init(self, const shared_ptr[CFragment]& sp)
141
+
142
+
143
+ cdef class Partitioning(_Weakrefable):
144
+
145
+ cdef:
146
+ shared_ptr[CPartitioning] wrapped
147
+ CPartitioning* partitioning
148
+
149
+ cdef init(self, const shared_ptr[CPartitioning]& sp)
150
+
151
+ @staticmethod
152
+ cdef wrap(const shared_ptr[CPartitioning]& sp)
153
+
154
+ cdef inline shared_ptr[CPartitioning] unwrap(self)
155
+
156
+
157
+ cdef class PartitioningFactory(_Weakrefable):
158
+
159
+ cdef:
160
+ shared_ptr[CPartitioningFactory] wrapped
161
+ CPartitioningFactory* factory
162
+ object constructor
163
+ object options
164
+
165
+ cdef init(self, const shared_ptr[CPartitioningFactory]& sp)
166
+
167
+ @staticmethod
168
+ cdef wrap(const shared_ptr[CPartitioningFactory]& sp,
169
+ object constructor, object options)
170
+
171
+ cdef inline shared_ptr[CPartitioningFactory] unwrap(self)
172
+
173
+
174
+ cdef class WrittenFile(_Weakrefable):
175
+
176
+ # The full path to the created file
177
+ cdef public str path
178
+ # Optional Parquet metadata
179
+ # This metadata will have the file path attribute set to the path of
180
+ # the written file.
181
+ cdef public object metadata
182
+ # The size of the file in bytes
183
+ cdef public int64_t size
venv/lib/python3.10/site-packages/pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (78.6 kB). View file
 
venv/lib/python3.10/site-packages/pyarrow/_dataset_orc.pyx ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ """Dataset support for ORC file format."""
21
+
22
+ from pyarrow.lib cimport *
23
+ from pyarrow.includes.libarrow cimport *
24
+ from pyarrow.includes.libarrow_dataset cimport *
25
+
26
+ from pyarrow._dataset cimport FileFormat
27
+
28
+
29
+ cdef class OrcFileFormat(FileFormat):
30
+
31
+ def __init__(self):
32
+ self.init(shared_ptr[CFileFormat](new COrcFileFormat()))
33
+
34
+ def equals(self, OrcFileFormat other):
35
+ """
36
+ Parameters
37
+ ----------
38
+ other : pyarrow.dataset.OrcFileFormat
39
+
40
+ Returns
41
+ -------
42
+ True
43
+ """
44
+ return True
45
+
46
+ @property
47
+ def default_extname(self):
48
+ return "orc"
49
+
50
+ def __reduce__(self):
51
+ return OrcFileFormat, tuple()
venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (357 kB). View file
 
venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ """Dataset support for Parquet file format."""
21
+
22
+ from pyarrow.includes.libarrow_dataset cimport *
23
+ from pyarrow.includes.libarrow_dataset_parquet cimport *
24
+
25
+ from pyarrow._dataset cimport FragmentScanOptions, FileWriteOptions
26
+
27
+
28
+ cdef class ParquetFragmentScanOptions(FragmentScanOptions):
29
+ cdef:
30
+ CParquetFragmentScanOptions* parquet_options
31
+ object _parquet_decryption_config
32
+
33
+ cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp)
34
+ cdef CReaderProperties* reader_properties(self)
35
+ cdef ArrowReaderProperties* arrow_reader_properties(self)
36
+
37
+
38
+ cdef class ParquetFileWriteOptions(FileWriteOptions):
39
+
40
+ cdef:
41
+ CParquetFileWriteOptions* parquet_options
42
+ object _properties
venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pyx ADDED
@@ -0,0 +1,1023 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ """Dataset support for Parquet file format."""
21
+
22
+ from cython cimport binding
23
+ from cython.operator cimport dereference as deref
24
+
25
+ import os
26
+ import warnings
27
+
28
+ import pyarrow as pa
29
+ from pyarrow.lib cimport *
30
+ from pyarrow.lib import frombytes, tobytes
31
+ from pyarrow.includes.libarrow cimport *
32
+ from pyarrow.includes.libarrow_dataset cimport *
33
+ from pyarrow.includes.libarrow_dataset_parquet cimport *
34
+ from pyarrow._fs cimport FileSystem
35
+
36
+ from pyarrow._compute cimport Expression, _bind
37
+ from pyarrow._dataset cimport (
38
+ _make_file_source,
39
+ DatasetFactory,
40
+ FileFormat,
41
+ FileFragment,
42
+ FileWriteOptions,
43
+ Fragment,
44
+ FragmentScanOptions,
45
+ CacheOptions,
46
+ Partitioning,
47
+ PartitioningFactory,
48
+ WrittenFile
49
+ )
50
+
51
+ from pyarrow._parquet cimport (
52
+ _create_writer_properties, _create_arrow_writer_properties,
53
+ FileMetaData,
54
+ )
55
+
56
+
57
+ try:
58
+ from pyarrow._dataset_parquet_encryption import (
59
+ set_encryption_config, set_decryption_config
60
+ )
61
+ parquet_encryption_enabled = True
62
+ except ImportError:
63
+ parquet_encryption_enabled = False
64
+
65
+
66
+ cdef Expression _true = Expression._scalar(True)
67
+
68
+ ctypedef CParquetFileWriter* _CParquetFileWriterPtr
69
+
70
+
71
+ cdef class ParquetFileFormat(FileFormat):
72
+ """
73
+ FileFormat for Parquet
74
+
75
+ Parameters
76
+ ----------
77
+ read_options : ParquetReadOptions
78
+ Read options for the file.
79
+ default_fragment_scan_options : ParquetFragmentScanOptions
80
+ Scan Options for the file.
81
+ **kwargs : dict
82
+ Additional options for read option or scan option
83
+ """
84
+
85
+ cdef:
86
+ CParquetFileFormat* parquet_format
87
+
88
+ def __init__(self, read_options=None,
89
+ default_fragment_scan_options=None,
90
+ **kwargs):
91
+ cdef:
92
+ shared_ptr[CParquetFileFormat] wrapped
93
+ CParquetFileFormatReaderOptions* options
94
+
95
+ # Read/scan options
96
+ read_options_args = {option: kwargs[option] for option in kwargs
97
+ if option in _PARQUET_READ_OPTIONS}
98
+ scan_args = {option: kwargs[option] for option in kwargs
99
+ if option not in _PARQUET_READ_OPTIONS}
100
+ if read_options and read_options_args:
101
+ duplicates = ', '.join(sorted(read_options_args))
102
+ raise ValueError(f'If `read_options` is given, '
103
+ f'cannot specify {duplicates}')
104
+ if default_fragment_scan_options and scan_args:
105
+ duplicates = ', '.join(sorted(scan_args))
106
+ raise ValueError(f'If `default_fragment_scan_options` is given, '
107
+ f'cannot specify {duplicates}')
108
+
109
+ if read_options is None:
110
+ read_options = ParquetReadOptions(**read_options_args)
111
+ elif isinstance(read_options, dict):
112
+ # For backwards compatibility
113
+ duplicates = []
114
+ for option, value in read_options.items():
115
+ if option in _PARQUET_READ_OPTIONS:
116
+ read_options_args[option] = value
117
+ else:
118
+ duplicates.append(option)
119
+ scan_args[option] = value
120
+ if duplicates:
121
+ duplicates = ", ".join(duplicates)
122
+ warnings.warn(f'The scan options {duplicates} should be '
123
+ 'specified directly as keyword arguments')
124
+ read_options = ParquetReadOptions(**read_options_args)
125
+ elif not isinstance(read_options, ParquetReadOptions):
126
+ raise TypeError('`read_options` must be either a dictionary or an '
127
+ 'instance of ParquetReadOptions')
128
+
129
+ if default_fragment_scan_options is None:
130
+ default_fragment_scan_options = ParquetFragmentScanOptions(
131
+ **scan_args)
132
+ elif isinstance(default_fragment_scan_options, dict):
133
+ default_fragment_scan_options = ParquetFragmentScanOptions(
134
+ **default_fragment_scan_options)
135
+ elif not isinstance(default_fragment_scan_options,
136
+ ParquetFragmentScanOptions):
137
+ raise TypeError('`default_fragment_scan_options` must be either a '
138
+ 'dictionary or an instance of '
139
+ 'ParquetFragmentScanOptions')
140
+
141
+ wrapped = make_shared[CParquetFileFormat]()
142
+
143
+ options = &(wrapped.get().reader_options)
144
+ if read_options.dictionary_columns is not None:
145
+ for column in read_options.dictionary_columns:
146
+ options.dict_columns.insert(tobytes(column))
147
+ options.coerce_int96_timestamp_unit = \
148
+ read_options._coerce_int96_timestamp_unit
149
+
150
+ self.init(<shared_ptr[CFileFormat]> wrapped)
151
+ self.default_fragment_scan_options = default_fragment_scan_options
152
+
153
+ cdef void init(self, const shared_ptr[CFileFormat]& sp):
154
+ FileFormat.init(self, sp)
155
+ self.parquet_format = <CParquetFileFormat*> sp.get()
156
+
157
+ cdef WrittenFile _finish_write(self, path, base_dir,
158
+ CFileWriter* file_writer):
159
+ cdef:
160
+ FileMetaData parquet_metadata
161
+ CParquetFileWriter* parquet_file_writer
162
+
163
+ parquet_metadata = None
164
+ parquet_file_writer = dynamic_cast[_CParquetFileWriterPtr](file_writer)
165
+ with nogil:
166
+ metadata = deref(
167
+ deref(parquet_file_writer).parquet_writer()).metadata()
168
+ if metadata:
169
+ parquet_metadata = FileMetaData()
170
+ parquet_metadata.init(metadata)
171
+ parquet_metadata.set_file_path(os.path.relpath(path, base_dir))
172
+
173
+ size = GetResultValue(file_writer.GetBytesWritten())
174
+
175
+ return WrittenFile(path, parquet_metadata, size)
176
+
177
+ @property
178
+ def read_options(self):
179
+ cdef CParquetFileFormatReaderOptions* options
180
+ options = &self.parquet_format.reader_options
181
+ parquet_read_options = ParquetReadOptions(
182
+ dictionary_columns={frombytes(col)
183
+ for col in options.dict_columns},
184
+ )
185
+ # Read options getter/setter works with strings so setting
186
+ # the private property which uses the C Type
187
+ parquet_read_options._coerce_int96_timestamp_unit = \
188
+ options.coerce_int96_timestamp_unit
189
+ return parquet_read_options
190
+
191
+ def make_write_options(self, **kwargs):
192
+ """
193
+ Parameters
194
+ ----------
195
+ **kwargs : dict
196
+
197
+ Returns
198
+ -------
199
+ pyarrow.dataset.FileWriteOptions
200
+ """
201
+ # Safeguard from calling make_write_options as a static class method
202
+ if not isinstance(self, ParquetFileFormat):
203
+ raise TypeError("make_write_options() should be called on "
204
+ "an instance of ParquetFileFormat")
205
+ opts = FileFormat.make_write_options(self)
206
+ (<ParquetFileWriteOptions> opts).update(**kwargs)
207
+ return opts
208
+
209
+ cdef _set_default_fragment_scan_options(self, FragmentScanOptions options):
210
+ if options.type_name == 'parquet':
211
+ self.parquet_format.default_fragment_scan_options = options.wrapped
212
+ else:
213
+ super()._set_default_fragment_scan_options(options)
214
+
215
+ def equals(self, ParquetFileFormat other):
216
+ """
217
+ Parameters
218
+ ----------
219
+ other : pyarrow.dataset.ParquetFileFormat
220
+
221
+ Returns
222
+ -------
223
+ bool
224
+ """
225
+ return (
226
+ self.read_options.equals(other.read_options) and
227
+ self.default_fragment_scan_options ==
228
+ other.default_fragment_scan_options
229
+ )
230
+
231
+ @property
232
+ def default_extname(self):
233
+ return "parquet"
234
+
235
+ def __reduce__(self):
236
+ return ParquetFileFormat, (self.read_options,
237
+ self.default_fragment_scan_options)
238
+
239
+ def __repr__(self):
240
+ return f"<ParquetFileFormat read_options={self.read_options}>"
241
+
242
+ def make_fragment(self, file, filesystem=None,
243
+ Expression partition_expression=None, row_groups=None, *, file_size=None):
244
+ """
245
+ Make a FileFragment from a given file.
246
+
247
+ Parameters
248
+ ----------
249
+ file : file-like object, path-like or str
250
+ The file or file path to make a fragment from.
251
+ filesystem : Filesystem, optional
252
+ If `filesystem` is given, `file` must be a string and specifies
253
+ the path of the file to read from the filesystem.
254
+ partition_expression : Expression, optional
255
+ An expression that is guaranteed true for all rows in the fragment. Allows
256
+ fragment to be potentially skipped while scanning with a filter.
257
+ row_groups : Iterable, optional
258
+ The indices of the row groups to include
259
+ file_size : int, optional
260
+ The size of the file in bytes. Can improve performance with high-latency filesystems
261
+ when file size needs to be known before reading.
262
+
263
+ Returns
264
+ -------
265
+ fragment : Fragment
266
+ The file fragment
267
+ """
268
+ cdef:
269
+ vector[int] c_row_groups
270
+ if partition_expression is None:
271
+ partition_expression = _true
272
+ if row_groups is None:
273
+ return super().make_fragment(file, filesystem,
274
+ partition_expression, file_size=file_size)
275
+
276
+ c_source = _make_file_source(file, filesystem, file_size)
277
+ c_row_groups = [<int> row_group for row_group in set(row_groups)]
278
+
279
+ c_fragment = <shared_ptr[CFragment]> GetResultValue(
280
+ self.parquet_format.MakeFragment(move(c_source),
281
+ partition_expression.unwrap(),
282
+ <shared_ptr[CSchema]>nullptr,
283
+ move(c_row_groups)))
284
+ return Fragment.wrap(move(c_fragment))
285
+
286
+
287
+ class RowGroupInfo:
288
+ """
289
+ A wrapper class for RowGroup information
290
+
291
+ Parameters
292
+ ----------
293
+ id : integer
294
+ The group ID.
295
+ metadata : FileMetaData
296
+ The rowgroup metadata.
297
+ schema : Schema
298
+ Schema of the rows.
299
+ """
300
+
301
+ def __init__(self, id, metadata, schema):
302
+ self.id = id
303
+ self.metadata = metadata
304
+ self.schema = schema
305
+
306
+ @property
307
+ def num_rows(self):
308
+ return self.metadata.num_rows
309
+
310
+ @property
311
+ def total_byte_size(self):
312
+ return self.metadata.total_byte_size
313
+
314
+ @property
315
+ def statistics(self):
316
+ def name_stats(i):
317
+ col = self.metadata.column(i)
318
+
319
+ stats = col.statistics
320
+ if stats is None or not stats.has_min_max:
321
+ return None, None
322
+
323
+ name = col.path_in_schema
324
+ field_index = self.schema.get_field_index(name)
325
+ if field_index < 0:
326
+ return None, None
327
+
328
+ typ = self.schema.field(field_index).type
329
+ return col.path_in_schema, {
330
+ 'min': pa.scalar(stats.min, type=typ).as_py(),
331
+ 'max': pa.scalar(stats.max, type=typ).as_py()
332
+ }
333
+
334
+ return {
335
+ name: stats for name, stats
336
+ in map(name_stats, range(self.metadata.num_columns))
337
+ if stats is not None
338
+ }
339
+
340
+ def __repr__(self):
341
+ return "RowGroupInfo({})".format(self.id)
342
+
343
+ def __eq__(self, other):
344
+ if isinstance(other, int):
345
+ return self.id == other
346
+ if not isinstance(other, RowGroupInfo):
347
+ return False
348
+ return self.id == other.id
349
+
350
+
351
+ cdef class ParquetFileFragment(FileFragment):
352
+ """A Fragment representing a parquet file."""
353
+
354
+ cdef:
355
+ CParquetFileFragment* parquet_file_fragment
356
+
357
+ cdef void init(self, const shared_ptr[CFragment]& sp):
358
+ FileFragment.init(self, sp)
359
+ self.parquet_file_fragment = <CParquetFileFragment*> sp.get()
360
+
361
+ def __reduce__(self):
362
+ buffer = self.buffer
363
+ # parquet_file_fragment.row_groups() is empty if the metadata
364
+ # information of the file is not yet populated
365
+ if not bool(self.parquet_file_fragment.row_groups()):
366
+ row_groups = None
367
+ else:
368
+ row_groups = [row_group.id for row_group in self.row_groups]
369
+
370
+ return self.format.make_fragment, (
371
+ self.path if buffer is None else buffer,
372
+ self.filesystem,
373
+ self.partition_expression,
374
+ row_groups
375
+ )
376
+
377
+ def ensure_complete_metadata(self):
378
+ """
379
+ Ensure that all metadata (statistics, physical schema, ...) have
380
+ been read and cached in this fragment.
381
+ """
382
+ with nogil:
383
+ check_status(self.parquet_file_fragment.EnsureCompleteMetadata())
384
+
385
+ @property
386
+ def row_groups(self):
387
+ metadata = self.metadata
388
+ cdef vector[int] row_groups = self.parquet_file_fragment.row_groups()
389
+ return [RowGroupInfo(i, metadata.row_group(i), self.physical_schema)
390
+ for i in row_groups]
391
+
392
+ @property
393
+ def metadata(self):
394
+ self.ensure_complete_metadata()
395
+ cdef FileMetaData metadata = FileMetaData()
396
+ metadata.init(self.parquet_file_fragment.metadata())
397
+ return metadata
398
+
399
+ @property
400
+ def num_row_groups(self):
401
+ """
402
+ Return the number of row groups viewed by this fragment (not the
403
+ number of row groups in the origin file).
404
+ """
405
+ self.ensure_complete_metadata()
406
+ return self.parquet_file_fragment.row_groups().size()
407
+
408
+ def split_by_row_group(self, Expression filter=None,
409
+ Schema schema=None):
410
+ """
411
+ Split the fragment into multiple fragments.
412
+
413
+ Yield a Fragment wrapping each row group in this ParquetFileFragment.
414
+ Row groups will be excluded whose metadata contradicts the optional
415
+ filter.
416
+
417
+ Parameters
418
+ ----------
419
+ filter : Expression, default None
420
+ Only include the row groups which satisfy this predicate (using
421
+ the Parquet RowGroup statistics).
422
+ schema : Schema, default None
423
+ Schema to use when filtering row groups. Defaults to the
424
+ Fragment's physical schema
425
+
426
+ Returns
427
+ -------
428
+ A list of Fragments
429
+ """
430
+ cdef:
431
+ vector[shared_ptr[CFragment]] c_fragments
432
+ CExpression c_filter
433
+ shared_ptr[CFragment] c_fragment
434
+
435
+ schema = schema or self.physical_schema
436
+ c_filter = _bind(filter, schema)
437
+ with nogil:
438
+ c_fragments = move(GetResultValue(
439
+ self.parquet_file_fragment.SplitByRowGroup(move(c_filter))))
440
+
441
+ return [Fragment.wrap(c_fragment) for c_fragment in c_fragments]
442
+
443
+ def subset(self, Expression filter=None, Schema schema=None,
444
+ object row_group_ids=None):
445
+ """
446
+ Create a subset of the fragment (viewing a subset of the row groups).
447
+
448
+ Subset can be specified by either a filter predicate (with optional
449
+ schema) or by a list of row group IDs. Note that when using a filter,
450
+ the resulting fragment can be empty (viewing no row groups).
451
+
452
+ Parameters
453
+ ----------
454
+ filter : Expression, default None
455
+ Only include the row groups which satisfy this predicate (using
456
+ the Parquet RowGroup statistics).
457
+ schema : Schema, default None
458
+ Schema to use when filtering row groups. Defaults to the
459
+ Fragment's physical schema
460
+ row_group_ids : list of ints
461
+ The row group IDs to include in the subset. Can only be specified
462
+ if `filter` is None.
463
+
464
+ Returns
465
+ -------
466
+ ParquetFileFragment
467
+ """
468
+ cdef:
469
+ CExpression c_filter
470
+ vector[int] c_row_group_ids
471
+ shared_ptr[CFragment] c_fragment
472
+
473
+ if filter is not None and row_group_ids is not None:
474
+ raise ValueError(
475
+ "Cannot specify both 'filter' and 'row_group_ids'."
476
+ )
477
+
478
+ if filter is not None:
479
+ schema = schema or self.physical_schema
480
+ c_filter = _bind(filter, schema)
481
+ with nogil:
482
+ c_fragment = move(GetResultValue(
483
+ self.parquet_file_fragment.SubsetWithFilter(
484
+ move(c_filter))))
485
+ elif row_group_ids is not None:
486
+ c_row_group_ids = [
487
+ <int> row_group for row_group in sorted(set(row_group_ids))
488
+ ]
489
+ with nogil:
490
+ c_fragment = move(GetResultValue(
491
+ self.parquet_file_fragment.SubsetWithIds(
492
+ move(c_row_group_ids))))
493
+ else:
494
+ raise ValueError(
495
+ "Need to specify one of 'filter' or 'row_group_ids'"
496
+ )
497
+
498
+ return Fragment.wrap(c_fragment)
499
+
500
+
501
+ cdef class ParquetReadOptions(_Weakrefable):
502
+ """
503
+ Parquet format specific options for reading.
504
+
505
+ Parameters
506
+ ----------
507
+ dictionary_columns : list of string, default None
508
+ Names of columns which should be dictionary encoded as
509
+ they are read
510
+ coerce_int96_timestamp_unit : str, default None
511
+ Cast timestamps that are stored in INT96 format to a particular
512
+ resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
513
+ and therefore INT96 timestamps will be inferred as timestamps
514
+ in nanoseconds
515
+ """
516
+
517
+ cdef public:
518
+ set dictionary_columns
519
+ TimeUnit _coerce_int96_timestamp_unit
520
+
521
+ # Also see _PARQUET_READ_OPTIONS
522
+ def __init__(self, dictionary_columns=None,
523
+ coerce_int96_timestamp_unit=None):
524
+ self.dictionary_columns = set(dictionary_columns or set())
525
+ self.coerce_int96_timestamp_unit = coerce_int96_timestamp_unit
526
+
527
+ @property
528
+ def coerce_int96_timestamp_unit(self):
529
+ return timeunit_to_string(self._coerce_int96_timestamp_unit)
530
+
531
+ @coerce_int96_timestamp_unit.setter
532
+ def coerce_int96_timestamp_unit(self, unit):
533
+ if unit is not None:
534
+ self._coerce_int96_timestamp_unit = string_to_timeunit(unit)
535
+ else:
536
+ self._coerce_int96_timestamp_unit = TimeUnit_NANO
537
+
538
+ def equals(self, ParquetReadOptions other):
539
+ """
540
+ Parameters
541
+ ----------
542
+ other : pyarrow.dataset.ParquetReadOptions
543
+
544
+ Returns
545
+ -------
546
+ bool
547
+ """
548
+ return (self.dictionary_columns == other.dictionary_columns and
549
+ self.coerce_int96_timestamp_unit ==
550
+ other.coerce_int96_timestamp_unit)
551
+
552
+ def __eq__(self, other):
553
+ try:
554
+ return self.equals(other)
555
+ except TypeError:
556
+ return False
557
+
558
+ def __repr__(self):
559
+ return (
560
+ f"<ParquetReadOptions"
561
+ f" dictionary_columns={self.dictionary_columns}"
562
+ f" coerce_int96_timestamp_unit={self.coerce_int96_timestamp_unit}>"
563
+ )
564
+
565
+
566
+ cdef class ParquetFileWriteOptions(FileWriteOptions):
567
+
568
+ def update(self, **kwargs):
569
+ """
570
+ Parameters
571
+ ----------
572
+ **kwargs : dict
573
+ """
574
+ arrow_fields = {
575
+ "use_deprecated_int96_timestamps",
576
+ "coerce_timestamps",
577
+ "allow_truncated_timestamps",
578
+ "use_compliant_nested_type",
579
+ }
580
+
581
+ setters = set()
582
+ for name, value in kwargs.items():
583
+ if name not in self._properties:
584
+ raise TypeError("unexpected parquet write option: " + name)
585
+ self._properties[name] = value
586
+ if name in arrow_fields:
587
+ setters.add(self._set_arrow_properties)
588
+ elif name == "encryption_config" and value is not None:
589
+ setters.add(self._set_encryption_config)
590
+ else:
591
+ setters.add(self._set_properties)
592
+
593
+ for setter in setters:
594
+ setter()
595
+
596
+ def _set_properties(self):
597
+ cdef CParquetFileWriteOptions* opts = self.parquet_options
598
+
599
+ opts.writer_properties = _create_writer_properties(
600
+ use_dictionary=self._properties["use_dictionary"],
601
+ compression=self._properties["compression"],
602
+ version=self._properties["version"],
603
+ write_statistics=self._properties["write_statistics"],
604
+ data_page_size=self._properties["data_page_size"],
605
+ compression_level=self._properties["compression_level"],
606
+ use_byte_stream_split=(
607
+ self._properties["use_byte_stream_split"]
608
+ ),
609
+ column_encoding=self._properties["column_encoding"],
610
+ data_page_version=self._properties["data_page_version"],
611
+ encryption_properties=self._properties["encryption_properties"],
612
+ write_batch_size=self._properties["write_batch_size"],
613
+ dictionary_pagesize_limit=self._properties["dictionary_pagesize_limit"],
614
+ write_page_index=self._properties["write_page_index"],
615
+ write_page_checksum=self._properties["write_page_checksum"],
616
+ sorting_columns=self._properties["sorting_columns"],
617
+ )
618
+
619
+ def _set_arrow_properties(self):
620
+ cdef CParquetFileWriteOptions* opts = self.parquet_options
621
+
622
+ opts.arrow_writer_properties = _create_arrow_writer_properties(
623
+ use_deprecated_int96_timestamps=(
624
+ self._properties["use_deprecated_int96_timestamps"]
625
+ ),
626
+ coerce_timestamps=self._properties["coerce_timestamps"],
627
+ allow_truncated_timestamps=(
628
+ self._properties["allow_truncated_timestamps"]
629
+ ),
630
+ writer_engine_version="V2",
631
+ use_compliant_nested_type=(
632
+ self._properties["use_compliant_nested_type"]
633
+ )
634
+ )
635
+
636
+ def _set_encryption_config(self):
637
+ if not parquet_encryption_enabled:
638
+ raise NotImplementedError(
639
+ "Encryption is not enabled in your installation of pyarrow, but an "
640
+ "encryption_config was provided."
641
+ )
642
+ set_encryption_config(self, self._properties["encryption_config"])
643
+
644
+ cdef void init(self, const shared_ptr[CFileWriteOptions]& sp):
645
+ FileWriteOptions.init(self, sp)
646
+ self.parquet_options = <CParquetFileWriteOptions*> sp.get()
647
+ self._properties = dict(
648
+ use_dictionary=True,
649
+ compression="snappy",
650
+ version="2.6",
651
+ write_statistics=None,
652
+ data_page_size=None,
653
+ compression_level=None,
654
+ use_byte_stream_split=False,
655
+ column_encoding=None,
656
+ data_page_version="1.0",
657
+ use_deprecated_int96_timestamps=False,
658
+ coerce_timestamps=None,
659
+ allow_truncated_timestamps=False,
660
+ use_compliant_nested_type=True,
661
+ encryption_properties=None,
662
+ write_batch_size=None,
663
+ dictionary_pagesize_limit=None,
664
+ write_page_index=False,
665
+ encryption_config=None,
666
+ write_page_checksum=False,
667
+ sorting_columns=None,
668
+ )
669
+
670
+ self._set_properties()
671
+ self._set_arrow_properties()
672
+
673
+ def __repr__(self):
674
+ return "<pyarrow.dataset.ParquetFileWriteOptions {0}>".format(
675
+ " ".join([f"{key}={value}" for key, value in self._properties.items()])
676
+ )
677
+
678
+
679
+ cdef set _PARQUET_READ_OPTIONS = {
680
+ 'dictionary_columns', 'coerce_int96_timestamp_unit'
681
+ }
682
+
683
+
684
+ cdef class ParquetFragmentScanOptions(FragmentScanOptions):
685
+ """
686
+ Scan-specific options for Parquet fragments.
687
+
688
+ Parameters
689
+ ----------
690
+ use_buffered_stream : bool, default False
691
+ Read files through buffered input streams rather than loading entire
692
+ row groups at once. This may be enabled to reduce memory overhead.
693
+ Disabled by default.
694
+ buffer_size : int, default 8192
695
+ Size of buffered stream, if enabled. Default is 8KB.
696
+ pre_buffer : bool, default True
697
+ If enabled, pre-buffer the raw Parquet data instead of issuing one
698
+ read per column chunk. This can improve performance on high-latency
699
+ filesystems (e.g. S3, GCS) by coalescing and issuing file reads in
700
+ parallel using a background I/O thread pool.
701
+ Set to False if you want to prioritize minimal memory usage
702
+ over maximum speed.
703
+ cache_options : pyarrow.CacheOptions, default None
704
+ Cache options used when pre_buffer is enabled. The default values should
705
+ be good for most use cases. You may want to adjust these for example if
706
+ you have exceptionally high latency to the file system.
707
+ thrift_string_size_limit : int, default None
708
+ If not None, override the maximum total string size allocated
709
+ when decoding Thrift structures. The default limit should be
710
+ sufficient for most Parquet files.
711
+ thrift_container_size_limit : int, default None
712
+ If not None, override the maximum total size of containers allocated
713
+ when decoding Thrift structures. The default limit should be
714
+ sufficient for most Parquet files.
715
+ decryption_config : pyarrow.dataset.ParquetDecryptionConfig, default None
716
+ If not None, use the provided ParquetDecryptionConfig to decrypt the
717
+ Parquet file.
718
+ page_checksum_verification : bool, default False
719
+ If True, verify the page checksum for each page read from the file.
720
+ """
721
+
722
+ # Avoid mistakingly creating attributes
723
+ __slots__ = ()
724
+
725
+ def __init__(self, *, bint use_buffered_stream=False,
726
+ buffer_size=8192,
727
+ bint pre_buffer=True,
728
+ cache_options=None,
729
+ thrift_string_size_limit=None,
730
+ thrift_container_size_limit=None,
731
+ decryption_config=None,
732
+ bint page_checksum_verification=False):
733
+ self.init(shared_ptr[CFragmentScanOptions](
734
+ new CParquetFragmentScanOptions()))
735
+ self.use_buffered_stream = use_buffered_stream
736
+ self.buffer_size = buffer_size
737
+ self.pre_buffer = pre_buffer
738
+ if cache_options is not None:
739
+ self.cache_options = cache_options
740
+ if thrift_string_size_limit is not None:
741
+ self.thrift_string_size_limit = thrift_string_size_limit
742
+ if thrift_container_size_limit is not None:
743
+ self.thrift_container_size_limit = thrift_container_size_limit
744
+ if decryption_config is not None:
745
+ self.parquet_decryption_config = decryption_config
746
+ self.page_checksum_verification = page_checksum_verification
747
+
748
+ cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp):
749
+ FragmentScanOptions.init(self, sp)
750
+ self.parquet_options = <CParquetFragmentScanOptions*> sp.get()
751
+
752
+ cdef CReaderProperties* reader_properties(self):
753
+ return self.parquet_options.reader_properties.get()
754
+
755
+ cdef ArrowReaderProperties* arrow_reader_properties(self):
756
+ return self.parquet_options.arrow_reader_properties.get()
757
+
758
+ @property
759
+ def use_buffered_stream(self):
760
+ return self.reader_properties().is_buffered_stream_enabled()
761
+
762
+ @use_buffered_stream.setter
763
+ def use_buffered_stream(self, bint use_buffered_stream):
764
+ if use_buffered_stream:
765
+ self.reader_properties().enable_buffered_stream()
766
+ else:
767
+ self.reader_properties().disable_buffered_stream()
768
+
769
+ @property
770
+ def buffer_size(self):
771
+ return self.reader_properties().buffer_size()
772
+
773
+ @buffer_size.setter
774
+ def buffer_size(self, buffer_size):
775
+ if buffer_size <= 0:
776
+ raise ValueError("Buffer size must be larger than zero")
777
+ self.reader_properties().set_buffer_size(buffer_size)
778
+
779
+ @property
780
+ def pre_buffer(self):
781
+ return self.arrow_reader_properties().pre_buffer()
782
+
783
+ @pre_buffer.setter
784
+ def pre_buffer(self, bint pre_buffer):
785
+ self.arrow_reader_properties().set_pre_buffer(pre_buffer)
786
+
787
+ @property
788
+ def cache_options(self):
789
+ return CacheOptions.wrap(self.arrow_reader_properties().cache_options())
790
+
791
+ @cache_options.setter
792
+ def cache_options(self, CacheOptions options):
793
+ self.arrow_reader_properties().set_cache_options(options.unwrap())
794
+
795
+ @property
796
+ def thrift_string_size_limit(self):
797
+ return self.reader_properties().thrift_string_size_limit()
798
+
799
+ @thrift_string_size_limit.setter
800
+ def thrift_string_size_limit(self, size):
801
+ if size <= 0:
802
+ raise ValueError("size must be larger than zero")
803
+ self.reader_properties().set_thrift_string_size_limit(size)
804
+
805
+ @property
806
+ def thrift_container_size_limit(self):
807
+ return self.reader_properties().thrift_container_size_limit()
808
+
809
+ @thrift_container_size_limit.setter
810
+ def thrift_container_size_limit(self, size):
811
+ if size <= 0:
812
+ raise ValueError("size must be larger than zero")
813
+ self.reader_properties().set_thrift_container_size_limit(size)
814
+
815
+ @property
816
+ def parquet_decryption_config(self):
817
+ if not parquet_encryption_enabled:
818
+ raise NotImplementedError(
819
+ "Unable to access encryption features. "
820
+ "Encryption is not enabled in your installation of pyarrow."
821
+ )
822
+ return self._parquet_decryption_config
823
+
824
+ @parquet_decryption_config.setter
825
+ def parquet_decryption_config(self, config):
826
+ if not parquet_encryption_enabled:
827
+ raise NotImplementedError(
828
+ "Encryption is not enabled in your installation of pyarrow, but a "
829
+ "decryption_config was provided."
830
+ )
831
+ set_decryption_config(self, config)
832
+ self._parquet_decryption_config = config
833
+
834
+ @property
835
+ def page_checksum_verification(self):
836
+ return self.reader_properties().page_checksum_verification()
837
+
838
+ @page_checksum_verification.setter
839
+ def page_checksum_verification(self, bint page_checksum_verification):
840
+ self.reader_properties().set_page_checksum_verification(page_checksum_verification)
841
+
842
+ def equals(self, ParquetFragmentScanOptions other):
843
+ """
844
+ Parameters
845
+ ----------
846
+ other : pyarrow.dataset.ParquetFragmentScanOptions
847
+
848
+ Returns
849
+ -------
850
+ bool
851
+ """
852
+ attrs = (
853
+ self.use_buffered_stream, self.buffer_size, self.pre_buffer, self.cache_options,
854
+ self.thrift_string_size_limit, self.thrift_container_size_limit,
855
+ self.page_checksum_verification)
856
+ other_attrs = (
857
+ other.use_buffered_stream, other.buffer_size, other.pre_buffer, other.cache_options,
858
+ other.thrift_string_size_limit,
859
+ other.thrift_container_size_limit, other.page_checksum_verification)
860
+ return attrs == other_attrs
861
+
862
+ @staticmethod
863
+ @binding(True) # Required for Cython < 3
864
+ def _reconstruct(kwargs):
865
+ # __reduce__ doesn't allow passing named arguments directly to the
866
+ # reconstructor, hence this wrapper.
867
+ return ParquetFragmentScanOptions(**kwargs)
868
+
869
+ def __reduce__(self):
870
+ kwargs = dict(
871
+ use_buffered_stream=self.use_buffered_stream,
872
+ buffer_size=self.buffer_size,
873
+ pre_buffer=self.pre_buffer,
874
+ cache_options=self.cache_options,
875
+ thrift_string_size_limit=self.thrift_string_size_limit,
876
+ thrift_container_size_limit=self.thrift_container_size_limit,
877
+ page_checksum_verification=self.page_checksum_verification
878
+ )
879
+ return ParquetFragmentScanOptions._reconstruct, (kwargs,)
880
+
881
+
882
+ cdef class ParquetFactoryOptions(_Weakrefable):
883
+ """
884
+ Influences the discovery of parquet dataset.
885
+
886
+ Parameters
887
+ ----------
888
+ partition_base_dir : str, optional
889
+ For the purposes of applying the partitioning, paths will be
890
+ stripped of the partition_base_dir. Files not matching the
891
+ partition_base_dir prefix will be skipped for partitioning discovery.
892
+ The ignored files will still be part of the Dataset, but will not
893
+ have partition information.
894
+ partitioning : Partitioning, PartitioningFactory, optional
895
+ The partitioning scheme applied to fragments, see ``Partitioning``.
896
+ validate_column_chunk_paths : bool, default False
897
+ Assert that all ColumnChunk paths are consistent. The parquet spec
898
+ allows for ColumnChunk data to be stored in multiple files, but
899
+ ParquetDatasetFactory supports only a single file with all ColumnChunk
900
+ data. If this flag is set construction of a ParquetDatasetFactory will
901
+ raise an error if ColumnChunk data is not resident in a single file.
902
+ """
903
+
904
+ cdef:
905
+ CParquetFactoryOptions options
906
+
907
+ __slots__ = () # avoid mistakingly creating attributes
908
+
909
+ def __init__(self, partition_base_dir=None, partitioning=None,
910
+ validate_column_chunk_paths=False):
911
+ if isinstance(partitioning, PartitioningFactory):
912
+ self.partitioning_factory = partitioning
913
+ elif isinstance(partitioning, Partitioning):
914
+ self.partitioning = partitioning
915
+
916
+ if partition_base_dir is not None:
917
+ self.partition_base_dir = partition_base_dir
918
+
919
+ self.options.validate_column_chunk_paths = validate_column_chunk_paths
920
+
921
+ cdef inline CParquetFactoryOptions unwrap(self):
922
+ return self.options
923
+
924
+ @property
925
+ def partitioning(self):
926
+ """Partitioning to apply to discovered files.
927
+
928
+ NOTE: setting this property will overwrite partitioning_factory.
929
+ """
930
+ c_partitioning = self.options.partitioning.partitioning()
931
+ if c_partitioning.get() == nullptr:
932
+ return None
933
+ return Partitioning.wrap(c_partitioning)
934
+
935
+ @partitioning.setter
936
+ def partitioning(self, Partitioning value):
937
+ self.options.partitioning = (<Partitioning> value).unwrap()
938
+
939
+ @property
940
+ def partitioning_factory(self):
941
+ """PartitioningFactory to apply to discovered files and
942
+ discover a Partitioning.
943
+
944
+ NOTE: setting this property will overwrite partitioning.
945
+ """
946
+ c_factory = self.options.partitioning.factory()
947
+ if c_factory.get() == nullptr:
948
+ return None
949
+ return PartitioningFactory.wrap(c_factory, None, None)
950
+
951
+ @partitioning_factory.setter
952
+ def partitioning_factory(self, PartitioningFactory value):
953
+ self.options.partitioning = (<PartitioningFactory> value).unwrap()
954
+
955
+ @property
956
+ def partition_base_dir(self):
957
+ """
958
+ Base directory to strip paths before applying the partitioning.
959
+ """
960
+ return frombytes(self.options.partition_base_dir)
961
+
962
+ @partition_base_dir.setter
963
+ def partition_base_dir(self, value):
964
+ self.options.partition_base_dir = tobytes(value)
965
+
966
+ @property
967
+ def validate_column_chunk_paths(self):
968
+ """
969
+ Base directory to strip paths before applying the partitioning.
970
+ """
971
+ return self.options.validate_column_chunk_paths
972
+
973
+ @validate_column_chunk_paths.setter
974
+ def validate_column_chunk_paths(self, value):
975
+ self.options.validate_column_chunk_paths = value
976
+
977
+
978
+ cdef class ParquetDatasetFactory(DatasetFactory):
979
+ """
980
+ Create a ParquetDatasetFactory from a Parquet `_metadata` file.
981
+
982
+ Parameters
983
+ ----------
984
+ metadata_path : str
985
+ Path to the `_metadata` parquet metadata-only file generated with
986
+ `pyarrow.parquet.write_metadata`.
987
+ filesystem : pyarrow.fs.FileSystem
988
+ Filesystem to read the metadata_path from, and subsequent parquet
989
+ files.
990
+ format : ParquetFileFormat
991
+ Parquet format options.
992
+ options : ParquetFactoryOptions, optional
993
+ Various flags influencing the discovery of filesystem paths.
994
+ """
995
+
996
+ cdef:
997
+ CParquetDatasetFactory* parquet_factory
998
+
999
+ def __init__(self, metadata_path, FileSystem filesystem not None,
1000
+ FileFormat format not None,
1001
+ ParquetFactoryOptions options=None):
1002
+ cdef:
1003
+ c_string c_path
1004
+ shared_ptr[CFileSystem] c_filesystem
1005
+ shared_ptr[CParquetFileFormat] c_format
1006
+ CResult[shared_ptr[CDatasetFactory]] result
1007
+ CParquetFactoryOptions c_options
1008
+
1009
+ c_path = tobytes(metadata_path)
1010
+ c_filesystem = filesystem.unwrap()
1011
+ c_format = static_pointer_cast[CParquetFileFormat, CFileFormat](
1012
+ format.unwrap())
1013
+ options = options or ParquetFactoryOptions()
1014
+ c_options = options.unwrap()
1015
+
1016
+ with nogil:
1017
+ result = CParquetDatasetFactory.MakeFromMetaDataPath(
1018
+ c_path, c_filesystem, c_format, c_options)
1019
+ self.init(GetResultValue(result))
1020
+
1021
+ cdef init(self, shared_ptr[CDatasetFactory]& sp):
1022
+ DatasetFactory.init(self, sp)
1023
+ self.parquet_factory = <CParquetDatasetFactory*> sp.get()
venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.pyx ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ """Dataset support for Parquet encryption."""
21
+
22
+ from pyarrow.includes.libarrow_dataset_parquet cimport *
23
+ from pyarrow._parquet_encryption cimport *
24
+ from pyarrow._dataset_parquet cimport ParquetFragmentScanOptions, ParquetFileWriteOptions
25
+
26
+
27
+ cdef class ParquetEncryptionConfig(_Weakrefable):
28
+ """
29
+ Core configuration class encapsulating parameters for high-level encryption
30
+ within the Parquet framework.
31
+
32
+ The ParquetEncryptionConfig class serves as a bridge for passing encryption-related
33
+ parameters to the appropriate components within the Parquet library. It maintains references
34
+ to objects that define the encryption strategy, Key Management Service (KMS) configuration,
35
+ and specific encryption configurations for Parquet data.
36
+
37
+ Parameters
38
+ ----------
39
+ crypto_factory : pyarrow.parquet.encryption.CryptoFactory
40
+ Shared pointer to a `CryptoFactory` object. The `CryptoFactory` is responsible for
41
+ creating cryptographic components, such as encryptors and decryptors.
42
+ kms_connection_config : pyarrow.parquet.encryption.KmsConnectionConfig
43
+ Shared pointer to a `KmsConnectionConfig` object. This object holds the configuration
44
+ parameters necessary for connecting to a Key Management Service (KMS).
45
+ encryption_config : pyarrow.parquet.encryption.EncryptionConfiguration
46
+ Shared pointer to an `EncryptionConfiguration` object. This object defines specific
47
+ encryption settings for Parquet data, including the keys assigned to different columns.
48
+
49
+ Raises
50
+ ------
51
+ ValueError
52
+ Raised if `encryption_config` is None.
53
+ """
54
+ cdef:
55
+ shared_ptr[CParquetEncryptionConfig] c_config
56
+
57
+ # Avoid mistakenly creating attributes
58
+ __slots__ = ()
59
+
60
+ def __cinit__(self, CryptoFactory crypto_factory, KmsConnectionConfig kms_connection_config,
61
+ EncryptionConfiguration encryption_config):
62
+
63
+ cdef shared_ptr[CEncryptionConfiguration] c_encryption_config
64
+
65
+ if crypto_factory is None:
66
+ raise ValueError("crypto_factory cannot be None")
67
+
68
+ if kms_connection_config is None:
69
+ raise ValueError("kms_connection_config cannot be None")
70
+
71
+ if encryption_config is None:
72
+ raise ValueError("encryption_config cannot be None")
73
+
74
+ self.c_config.reset(new CParquetEncryptionConfig())
75
+
76
+ c_encryption_config = pyarrow_unwrap_encryptionconfig(
77
+ encryption_config)
78
+
79
+ self.c_config.get().crypto_factory = pyarrow_unwrap_cryptofactory(crypto_factory)
80
+ self.c_config.get().kms_connection_config = pyarrow_unwrap_kmsconnectionconfig(
81
+ kms_connection_config)
82
+ self.c_config.get().encryption_config = c_encryption_config
83
+
84
+ @staticmethod
85
+ cdef wrap(shared_ptr[CParquetEncryptionConfig] c_config):
86
+ cdef ParquetEncryptionConfig python_config = ParquetEncryptionConfig.__new__(ParquetEncryptionConfig)
87
+ python_config.c_config = c_config
88
+ return python_config
89
+
90
+ cdef shared_ptr[CParquetEncryptionConfig] unwrap(self):
91
+ return self.c_config
92
+
93
+
94
+ cdef class ParquetDecryptionConfig(_Weakrefable):
95
+ """
96
+ Core configuration class encapsulating parameters for high-level decryption
97
+ within the Parquet framework.
98
+
99
+ ParquetDecryptionConfig is designed to pass decryption-related parameters to
100
+ the appropriate decryption components within the Parquet library. It holds references to
101
+ objects that define the decryption strategy, Key Management Service (KMS) configuration,
102
+ and specific decryption configurations for reading encrypted Parquet data.
103
+
104
+ Parameters
105
+ ----------
106
+ crypto_factory : pyarrow.parquet.encryption.CryptoFactory
107
+ Shared pointer to a `CryptoFactory` object, pivotal in creating cryptographic
108
+ components for the decryption process.
109
+ kms_connection_config : pyarrow.parquet.encryption.KmsConnectionConfig
110
+ Shared pointer to a `KmsConnectionConfig` object, containing parameters necessary
111
+ for connecting to a Key Management Service (KMS) during decryption.
112
+ decryption_config : pyarrow.parquet.encryption.DecryptionConfiguration
113
+ Shared pointer to a `DecryptionConfiguration` object, specifying decryption settings
114
+ for reading encrypted Parquet data.
115
+
116
+ Raises
117
+ ------
118
+ ValueError
119
+ Raised if `decryption_config` is None.
120
+ """
121
+
122
+ cdef:
123
+ shared_ptr[CParquetDecryptionConfig] c_config
124
+
125
+ # Avoid mistakingly creating attributes
126
+ __slots__ = ()
127
+
128
+ def __cinit__(self, CryptoFactory crypto_factory, KmsConnectionConfig kms_connection_config,
129
+ DecryptionConfiguration decryption_config):
130
+
131
+ cdef shared_ptr[CDecryptionConfiguration] c_decryption_config
132
+
133
+ if decryption_config is None:
134
+ raise ValueError(
135
+ "decryption_config cannot be None")
136
+
137
+ self.c_config.reset(new CParquetDecryptionConfig())
138
+
139
+ c_decryption_config = pyarrow_unwrap_decryptionconfig(
140
+ decryption_config)
141
+
142
+ self.c_config.get().crypto_factory = pyarrow_unwrap_cryptofactory(crypto_factory)
143
+ self.c_config.get().kms_connection_config = pyarrow_unwrap_kmsconnectionconfig(
144
+ kms_connection_config)
145
+ self.c_config.get().decryption_config = c_decryption_config
146
+
147
+ @staticmethod
148
+ cdef wrap(shared_ptr[CParquetDecryptionConfig] c_config):
149
+ cdef ParquetDecryptionConfig python_config = ParquetDecryptionConfig.__new__(ParquetDecryptionConfig)
150
+ python_config.c_config = c_config
151
+ return python_config
152
+
153
+ cdef shared_ptr[CParquetDecryptionConfig] unwrap(self):
154
+ return self.c_config
155
+
156
+
157
+ def set_encryption_config(
158
+ ParquetFileWriteOptions opts not None,
159
+ ParquetEncryptionConfig config not None
160
+ ):
161
+ cdef shared_ptr[CParquetEncryptionConfig] c_config = config.unwrap()
162
+ opts.parquet_options.parquet_encryption_config = c_config
163
+
164
+
165
+ def set_decryption_config(
166
+ ParquetFragmentScanOptions opts not None,
167
+ ParquetDecryptionConfig config not None
168
+ ):
169
+ cdef shared_ptr[CParquetDecryptionConfig] c_config = config.unwrap()
170
+ opts.parquet_options.parquet_decryption_config = c_config
venv/lib/python3.10/site-packages/pyarrow/_dlpack.pxi ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ cimport cpython
19
+ from cpython.pycapsule cimport PyCapsule_New
20
+
21
+
22
+ cdef void dlpack_pycapsule_deleter(object dltensor) noexcept:
23
+ cdef DLManagedTensor* dlm_tensor
24
+ cdef PyObject* err_type
25
+ cdef PyObject* err_value
26
+ cdef PyObject* err_traceback
27
+
28
+ # Do nothing if the capsule has been consumed
29
+ if cpython.PyCapsule_IsValid(dltensor, "used_dltensor"):
30
+ return
31
+
32
+ # An exception may be in-flight, we must save it in case
33
+ # we create another one
34
+ cpython.PyErr_Fetch(&err_type, &err_value, &err_traceback)
35
+
36
+ dlm_tensor = <DLManagedTensor*>cpython.PyCapsule_GetPointer(dltensor, 'dltensor')
37
+ if dlm_tensor == NULL:
38
+ cpython.PyErr_WriteUnraisable(dltensor)
39
+ # The deleter can be NULL if there is no way for the caller
40
+ # to provide a reasonable destructor
41
+ elif dlm_tensor.deleter:
42
+ dlm_tensor.deleter(dlm_tensor)
43
+ assert (not cpython.PyErr_Occurred())
44
+
45
+ # Set the error indicator from err_type, err_value, err_traceback
46
+ cpython.PyErr_Restore(err_type, err_value, err_traceback)
venv/lib/python3.10/site-packages/pyarrow/_feather.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (114 kB). View file
 
venv/lib/python3.10/site-packages/pyarrow/_feather.pyx ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # ---------------------------------------------------------------------
19
+ # Implement Feather file format
20
+
21
+ # cython: profile=False
22
+ # distutils: language = c++
23
+ # cython: language_level=3
24
+
25
+ from cython.operator cimport dereference as deref
26
+ from pyarrow.includes.common cimport *
27
+ from pyarrow.includes.libarrow cimport *
28
+ from pyarrow.includes.libarrow_feather cimport *
29
+ from pyarrow.lib cimport (check_status, Table, _Weakrefable,
30
+ get_writer, get_reader, pyarrow_wrap_table)
31
+ from pyarrow.lib import tobytes
32
+
33
+
34
+ class FeatherError(Exception):
35
+ pass
36
+
37
+
38
+ def write_feather(Table table, object dest, compression=None,
39
+ compression_level=None, chunksize=None, version=2):
40
+ cdef shared_ptr[COutputStream] sink
41
+ get_writer(dest, &sink)
42
+
43
+ cdef CFeatherProperties properties
44
+ if version == 2:
45
+ properties.version = kFeatherV2Version
46
+ else:
47
+ properties.version = kFeatherV1Version
48
+
49
+ if compression == 'zstd':
50
+ properties.compression = CCompressionType_ZSTD
51
+ elif compression == 'lz4':
52
+ properties.compression = CCompressionType_LZ4_FRAME
53
+ else:
54
+ properties.compression = CCompressionType_UNCOMPRESSED
55
+
56
+ if chunksize is not None:
57
+ properties.chunksize = chunksize
58
+
59
+ if compression_level is not None:
60
+ properties.compression_level = compression_level
61
+
62
+ with nogil:
63
+ check_status(WriteFeather(deref(table.table), sink.get(),
64
+ properties))
65
+
66
+
67
+ cdef class FeatherReader(_Weakrefable):
68
+ cdef:
69
+ shared_ptr[CFeatherReader] reader
70
+
71
+ def __cinit__(self, source, c_bool use_memory_map, c_bool use_threads):
72
+ cdef:
73
+ shared_ptr[CRandomAccessFile] reader
74
+ CIpcReadOptions options = CIpcReadOptions.Defaults()
75
+ options.use_threads = use_threads
76
+
77
+ get_reader(source, use_memory_map, &reader)
78
+ with nogil:
79
+ self.reader = GetResultValue(CFeatherReader.Open(reader, options))
80
+
81
+ @property
82
+ def version(self):
83
+ return self.reader.get().version()
84
+
85
+ def read(self):
86
+ cdef shared_ptr[CTable] sp_table
87
+ with nogil:
88
+ check_status(self.reader.get()
89
+ .Read(&sp_table))
90
+
91
+ return pyarrow_wrap_table(sp_table)
92
+
93
+ def read_indices(self, indices):
94
+ cdef:
95
+ shared_ptr[CTable] sp_table
96
+ vector[int] c_indices
97
+
98
+ for index in indices:
99
+ c_indices.push_back(index)
100
+ with nogil:
101
+ check_status(self.reader.get()
102
+ .Read(c_indices, &sp_table))
103
+
104
+ return pyarrow_wrap_table(sp_table)
105
+
106
+ def read_names(self, names):
107
+ cdef:
108
+ shared_ptr[CTable] sp_table
109
+ vector[c_string] c_names
110
+
111
+ for name in names:
112
+ c_names.push_back(tobytes(name))
113
+ with nogil:
114
+ check_status(self.reader.get()
115
+ .Read(c_names, &sp_table))
116
+
117
+ return pyarrow_wrap_table(sp_table)
venv/lib/python3.10/site-packages/pyarrow/_fs.pxd ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.includes.common cimport *
21
+ from pyarrow.includes.libarrow_fs cimport *
22
+ from pyarrow.lib import _detect_compression, frombytes, tobytes
23
+ from pyarrow.lib cimport *
24
+
25
+
26
+ cpdef enum FileType:
27
+ NotFound = <int8_t> CFileType_NotFound
28
+ Unknown = <int8_t> CFileType_Unknown
29
+ File = <int8_t> CFileType_File
30
+ Directory = <int8_t> CFileType_Directory
31
+
32
+
33
+ cdef class FileInfo(_Weakrefable):
34
+ cdef:
35
+ CFileInfo info
36
+
37
+ @staticmethod
38
+ cdef wrap(CFileInfo info)
39
+
40
+ cdef inline CFileInfo unwrap(self) nogil
41
+
42
+ @staticmethod
43
+ cdef CFileInfo unwrap_safe(obj)
44
+
45
+
46
+ cdef class FileSelector(_Weakrefable):
47
+ cdef:
48
+ CFileSelector selector
49
+
50
+ @staticmethod
51
+ cdef FileSelector wrap(CFileSelector selector)
52
+
53
+ cdef inline CFileSelector unwrap(self) nogil
54
+
55
+
56
+ cdef class FileSystem(_Weakrefable):
57
+ cdef:
58
+ shared_ptr[CFileSystem] wrapped
59
+ CFileSystem* fs
60
+
61
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
62
+
63
+ @staticmethod
64
+ cdef wrap(const shared_ptr[CFileSystem]& sp)
65
+
66
+ cdef inline shared_ptr[CFileSystem] unwrap(self) nogil
67
+
68
+
69
+ cdef class LocalFileSystem(FileSystem):
70
+ cdef:
71
+ CLocalFileSystem* localfs
72
+
73
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
74
+
75
+
76
+ cdef class SubTreeFileSystem(FileSystem):
77
+ cdef:
78
+ CSubTreeFileSystem* subtreefs
79
+
80
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
81
+
82
+
83
+ cdef class _MockFileSystem(FileSystem):
84
+ cdef:
85
+ CMockFileSystem* mockfs
86
+
87
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
88
+
89
+
90
+ cdef class PyFileSystem(FileSystem):
91
+ cdef:
92
+ CPyFileSystem* pyfs
93
+
94
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
venv/lib/python3.10/site-packages/pyarrow/_fs.pyx ADDED
@@ -0,0 +1,1634 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from cpython.datetime cimport datetime, PyDateTime_DateTime
21
+ from cython cimport binding
22
+
23
+ from pyarrow.includes.common cimport *
24
+ from pyarrow.includes.libarrow_python cimport PyDateTime_to_TimePoint
25
+ from pyarrow.lib import _detect_compression, frombytes, tobytes
26
+ from pyarrow.lib cimport *
27
+ from pyarrow.util import _stringify_path
28
+
29
+ from abc import ABC, abstractmethod
30
+ from datetime import datetime, timezone
31
+ import os
32
+ import pathlib
33
+ import sys
34
+
35
+
36
+ cdef _init_ca_paths():
37
+ cdef CFileSystemGlobalOptions options
38
+
39
+ import ssl
40
+ paths = ssl.get_default_verify_paths()
41
+ if paths.cafile:
42
+ options.tls_ca_file_path = os.fsencode(paths.cafile)
43
+ if paths.capath:
44
+ options.tls_ca_dir_path = os.fsencode(paths.capath)
45
+ check_status(CFileSystemsInitialize(options))
46
+
47
+
48
+ if sys.platform == 'linux':
49
+ # ARROW-9261: On Linux, we may need to fixup the paths to TLS CA certs
50
+ # (especially in manylinux packages) since the values hardcoded at
51
+ # compile-time in libcurl may be wrong.
52
+ _init_ca_paths()
53
+
54
+
55
+ cdef inline c_string _path_as_bytes(path) except *:
56
+ # handle only abstract paths, not bound to any filesystem like pathlib is,
57
+ # so we only accept plain strings
58
+ if not isinstance(path, (bytes, str)):
59
+ raise TypeError('Path must be a string')
60
+ # tobytes always uses utf-8, which is more or less ok, at least on Windows
61
+ # since the C++ side then decodes from utf-8. On Unix, os.fsencode may be
62
+ # better.
63
+ return tobytes(path)
64
+
65
+
66
+ cdef object _wrap_file_type(CFileType ty):
67
+ return FileType(<int8_t> ty)
68
+
69
+
70
+ cdef CFileType _unwrap_file_type(FileType ty) except *:
71
+ if ty == FileType.Unknown:
72
+ return CFileType_Unknown
73
+ elif ty == FileType.NotFound:
74
+ return CFileType_NotFound
75
+ elif ty == FileType.File:
76
+ return CFileType_File
77
+ elif ty == FileType.Directory:
78
+ return CFileType_Directory
79
+ assert 0
80
+
81
+
82
+ def _file_type_to_string(ty):
83
+ # Python 3.11 changed str(IntEnum) to return the string representation
84
+ # of the integer value: https://github.com/python/cpython/issues/94763
85
+ return f"{ty.__class__.__name__}.{ty._name_}"
86
+
87
+
88
+ cdef class FileInfo(_Weakrefable):
89
+ """
90
+ FileSystem entry info.
91
+
92
+ Parameters
93
+ ----------
94
+ path : str
95
+ The full path to the filesystem entry.
96
+ type : FileType
97
+ The type of the filesystem entry.
98
+ mtime : datetime or float, default None
99
+ If given, the modification time of the filesystem entry.
100
+ If a float is given, it is the number of seconds since the
101
+ Unix epoch.
102
+ mtime_ns : int, default None
103
+ If given, the modification time of the filesystem entry,
104
+ in nanoseconds since the Unix epoch.
105
+ `mtime` and `mtime_ns` are mutually exclusive.
106
+ size : int, default None
107
+ If given, the filesystem entry size in bytes. This should only
108
+ be given if `type` is `FileType.File`.
109
+
110
+ Examples
111
+ --------
112
+ Generate a file:
113
+
114
+ >>> from pyarrow import fs
115
+ >>> local = fs.LocalFileSystem()
116
+ >>> path_fs = local_path + '/pyarrow-fs-example.dat'
117
+ >>> with local.open_output_stream(path_fs) as stream:
118
+ ... stream.write(b'data')
119
+ 4
120
+
121
+ Get FileInfo object using ``get_file_info()``:
122
+
123
+ >>> file_info = local.get_file_info(path_fs)
124
+ >>> file_info
125
+ <FileInfo for '.../pyarrow-fs-example.dat': type=FileType.File, size=4>
126
+
127
+ Inspect FileInfo attributes:
128
+
129
+ >>> file_info.type
130
+ <FileType.File: 2>
131
+
132
+ >>> file_info.is_file
133
+ True
134
+
135
+ >>> file_info.path
136
+ '/.../pyarrow-fs-example.dat'
137
+
138
+ >>> file_info.base_name
139
+ 'pyarrow-fs-example.dat'
140
+
141
+ >>> file_info.size
142
+ 4
143
+
144
+ >>> file_info.extension
145
+ 'dat'
146
+
147
+ >>> file_info.mtime # doctest: +SKIP
148
+ datetime.datetime(2022, 6, 29, 7, 56, 10, 873922, tzinfo=datetime.timezone.utc)
149
+
150
+ >>> file_info.mtime_ns # doctest: +SKIP
151
+ 1656489370873922073
152
+ """
153
+
154
+ def __init__(self, path, FileType type=FileType.Unknown, *,
155
+ mtime=None, mtime_ns=None, size=None):
156
+ self.info.set_path(tobytes(path))
157
+ self.info.set_type(_unwrap_file_type(type))
158
+ if mtime is not None:
159
+ if mtime_ns is not None:
160
+ raise TypeError("Only one of mtime and mtime_ns "
161
+ "can be given")
162
+ if isinstance(mtime, datetime):
163
+ self.info.set_mtime(PyDateTime_to_TimePoint(
164
+ <PyDateTime_DateTime*> mtime))
165
+ else:
166
+ self.info.set_mtime(TimePoint_from_s(mtime))
167
+ elif mtime_ns is not None:
168
+ self.info.set_mtime(TimePoint_from_ns(mtime_ns))
169
+ if size is not None:
170
+ self.info.set_size(size)
171
+
172
+ @staticmethod
173
+ cdef wrap(CFileInfo info):
174
+ cdef FileInfo self = FileInfo.__new__(FileInfo)
175
+ self.info = move(info)
176
+ return self
177
+
178
+ cdef inline CFileInfo unwrap(self) nogil:
179
+ return self.info
180
+
181
+ @staticmethod
182
+ cdef CFileInfo unwrap_safe(obj):
183
+ if not isinstance(obj, FileInfo):
184
+ raise TypeError("Expected FileInfo instance, got {0}"
185
+ .format(type(obj)))
186
+ return (<FileInfo> obj).unwrap()
187
+
188
+ def __repr__(self):
189
+ def getvalue(attr):
190
+ try:
191
+ return getattr(self, attr)
192
+ except ValueError:
193
+ return ''
194
+
195
+ s = (f'<FileInfo for {self.path!r}: '
196
+ f'type={_file_type_to_string(self.type)}')
197
+ if self.is_file:
198
+ s += f', size={self.size}'
199
+ s += '>'
200
+ return s
201
+
202
+ @property
203
+ def type(self):
204
+ """
205
+ Type of the file.
206
+
207
+ The returned enum values can be the following:
208
+
209
+ - FileType.NotFound: target does not exist
210
+ - FileType.Unknown: target exists but its type is unknown (could be a
211
+ special file such as a Unix socket or character device, or
212
+ Windows NUL / CON / ...)
213
+ - FileType.File: target is a regular file
214
+ - FileType.Directory: target is a regular directory
215
+
216
+ Returns
217
+ -------
218
+ type : FileType
219
+ """
220
+ return _wrap_file_type(self.info.type())
221
+
222
+ @property
223
+ def is_file(self):
224
+ """
225
+ """
226
+ return self.type == FileType.File
227
+
228
+ @property
229
+ def path(self):
230
+ """
231
+ The full file path in the filesystem.
232
+
233
+ Examples
234
+ --------
235
+ >>> file_info = local.get_file_info(path)
236
+ >>> file_info.path
237
+ '/.../pyarrow-fs-example.dat'
238
+ """
239
+ return frombytes(self.info.path())
240
+
241
+ @property
242
+ def base_name(self):
243
+ """
244
+ The file base name.
245
+
246
+ Component after the last directory separator.
247
+
248
+ Examples
249
+ --------
250
+ >>> file_info = local.get_file_info(path)
251
+ >>> file_info.base_name
252
+ 'pyarrow-fs-example.dat'
253
+ """
254
+ return frombytes(self.info.base_name())
255
+
256
+ @property
257
+ def size(self):
258
+ """
259
+ The size in bytes, if available.
260
+
261
+ Only regular files are guaranteed to have a size.
262
+
263
+ Returns
264
+ -------
265
+ size : int or None
266
+ """
267
+ cdef int64_t size
268
+ size = self.info.size()
269
+ return (size if size != -1 else None)
270
+
271
+ @property
272
+ def extension(self):
273
+ """
274
+ The file extension.
275
+
276
+ Examples
277
+ --------
278
+ >>> file_info = local.get_file_info(path)
279
+ >>> file_info.extension
280
+ 'dat'
281
+ """
282
+ return frombytes(self.info.extension())
283
+
284
+ @property
285
+ def mtime(self):
286
+ """
287
+ The time of last modification, if available.
288
+
289
+ Returns
290
+ -------
291
+ mtime : datetime.datetime or None
292
+
293
+ Examples
294
+ --------
295
+ >>> file_info = local.get_file_info(path)
296
+ >>> file_info.mtime # doctest: +SKIP
297
+ datetime.datetime(2022, 6, 29, 7, 56, 10, 873922, tzinfo=datetime.timezone.utc)
298
+ """
299
+ cdef int64_t nanoseconds
300
+ nanoseconds = TimePoint_to_ns(self.info.mtime())
301
+ return (datetime.fromtimestamp(nanoseconds / 1.0e9, timezone.utc)
302
+ if nanoseconds != -1 else None)
303
+
304
+ @property
305
+ def mtime_ns(self):
306
+ """
307
+ The time of last modification, if available, expressed in nanoseconds
308
+ since the Unix epoch.
309
+
310
+ Returns
311
+ -------
312
+ mtime_ns : int or None
313
+
314
+ Examples
315
+ --------
316
+ >>> file_info = local.get_file_info(path)
317
+ >>> file_info.mtime_ns # doctest: +SKIP
318
+ 1656489370873922073
319
+ """
320
+ cdef int64_t nanoseconds
321
+ nanoseconds = TimePoint_to_ns(self.info.mtime())
322
+ return (nanoseconds if nanoseconds != -1 else None)
323
+
324
+
325
+ cdef class FileSelector(_Weakrefable):
326
+ """
327
+ File and directory selector.
328
+
329
+ It contains a set of options that describes how to search for files and
330
+ directories.
331
+
332
+ Parameters
333
+ ----------
334
+ base_dir : str
335
+ The directory in which to select files. Relative paths also work, use
336
+ '.' for the current directory and '..' for the parent.
337
+ allow_not_found : bool, default False
338
+ The behavior if `base_dir` doesn't exist in the filesystem.
339
+ If false, an error is returned.
340
+ If true, an empty selection is returned.
341
+ recursive : bool, default False
342
+ Whether to recurse into subdirectories.
343
+
344
+ Examples
345
+ --------
346
+ List the contents of a directory and subdirectories:
347
+
348
+ >>> selector_1 = fs.FileSelector(local_path, recursive=True)
349
+ >>> local.get_file_info(selector_1) # doctest: +SKIP
350
+ [<FileInfo for 'tmp/alphabet/example.dat': type=FileType.File, size=4>,
351
+ <FileInfo for 'tmp/alphabet/subdir': type=FileType.Directory>,
352
+ <FileInfo for 'tmp/alphabet/subdir/example_copy.dat': type=FileType.File, size=4>]
353
+
354
+ List only the contents of the base directory:
355
+
356
+ >>> selector_2 = fs.FileSelector(local_path)
357
+ >>> local.get_file_info(selector_2) # doctest: +SKIP
358
+ [<FileInfo for 'tmp/alphabet/example.dat': type=FileType.File, size=4>,
359
+ <FileInfo for 'tmp/alphabet/subdir': type=FileType.Directory>]
360
+
361
+ Return empty selection if the directory doesn't exist:
362
+
363
+ >>> selector_not_found = fs.FileSelector(local_path + '/missing',
364
+ ... recursive=True,
365
+ ... allow_not_found=True)
366
+ >>> local.get_file_info(selector_not_found)
367
+ []
368
+ """
369
+
370
+ def __init__(self, base_dir, bint allow_not_found=False,
371
+ bint recursive=False):
372
+ self.base_dir = base_dir
373
+ self.recursive = recursive
374
+ self.allow_not_found = allow_not_found
375
+
376
+ @staticmethod
377
+ cdef FileSelector wrap(CFileSelector wrapped):
378
+ cdef FileSelector self = FileSelector.__new__(FileSelector)
379
+ self.selector = move(wrapped)
380
+ return self
381
+
382
+ cdef inline CFileSelector unwrap(self) nogil:
383
+ return self.selector
384
+
385
+ @property
386
+ def base_dir(self):
387
+ return frombytes(self.selector.base_dir)
388
+
389
+ @base_dir.setter
390
+ def base_dir(self, base_dir):
391
+ self.selector.base_dir = _path_as_bytes(base_dir)
392
+
393
+ @property
394
+ def allow_not_found(self):
395
+ return self.selector.allow_not_found
396
+
397
+ @allow_not_found.setter
398
+ def allow_not_found(self, bint allow_not_found):
399
+ self.selector.allow_not_found = allow_not_found
400
+
401
+ @property
402
+ def recursive(self):
403
+ return self.selector.recursive
404
+
405
+ @recursive.setter
406
+ def recursive(self, bint recursive):
407
+ self.selector.recursive = recursive
408
+
409
+ def __repr__(self):
410
+ return ("<FileSelector base_dir={0.base_dir!r} "
411
+ "recursive={0.recursive}>".format(self))
412
+
413
+
414
+ cdef class FileSystem(_Weakrefable):
415
+ """
416
+ Abstract file system API.
417
+ """
418
+
419
+ def __init__(self):
420
+ raise TypeError("FileSystem is an abstract class, instantiate one of "
421
+ "the subclasses instead: LocalFileSystem or "
422
+ "SubTreeFileSystem")
423
+
424
+ @staticmethod
425
+ def from_uri(uri):
426
+ """
427
+ Create a new FileSystem from URI or Path.
428
+
429
+ Recognized URI schemes are "file", "mock", "s3fs", "gs", "gcs", "hdfs" and "viewfs".
430
+ In addition, the argument can be a pathlib.Path object, or a string
431
+ describing an absolute local path.
432
+
433
+ Parameters
434
+ ----------
435
+ uri : string
436
+ URI-based path, for example: file:///some/local/path.
437
+
438
+ Returns
439
+ -------
440
+ tuple of (FileSystem, str path)
441
+ With (filesystem, path) tuple where path is the abstract path
442
+ inside the FileSystem instance.
443
+
444
+ Examples
445
+ --------
446
+ Create a new FileSystem subclass from a URI:
447
+
448
+ >>> uri = 'file:///{}/pyarrow-fs-example.dat'.format(local_path)
449
+ >>> local_new, path_new = fs.FileSystem.from_uri(uri)
450
+ >>> local_new
451
+ <pyarrow._fs.LocalFileSystem object at ...
452
+ >>> path_new
453
+ '/.../pyarrow-fs-example.dat'
454
+
455
+ Or from a s3 bucket:
456
+
457
+ >>> fs.FileSystem.from_uri("s3://usgs-landsat/collection02/")
458
+ (<pyarrow._s3fs.S3FileSystem object at ...>, 'usgs-landsat/collection02')
459
+ """
460
+ cdef:
461
+ c_string c_path
462
+ c_string c_uri
463
+ CResult[shared_ptr[CFileSystem]] result
464
+
465
+ if isinstance(uri, pathlib.Path):
466
+ # Make absolute
467
+ uri = uri.resolve().absolute()
468
+ c_uri = tobytes(_stringify_path(uri))
469
+ with nogil:
470
+ result = CFileSystemFromUriOrPath(c_uri, &c_path)
471
+ return FileSystem.wrap(GetResultValue(result)), frombytes(c_path)
472
+
473
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
474
+ self.wrapped = wrapped
475
+ self.fs = wrapped.get()
476
+
477
+ @staticmethod
478
+ cdef wrap(const shared_ptr[CFileSystem]& sp):
479
+ cdef FileSystem self
480
+
481
+ typ = frombytes(sp.get().type_name())
482
+ if typ == 'local':
483
+ self = LocalFileSystem.__new__(LocalFileSystem)
484
+ elif typ == 'mock':
485
+ self = _MockFileSystem.__new__(_MockFileSystem)
486
+ elif typ == 'subtree':
487
+ self = SubTreeFileSystem.__new__(SubTreeFileSystem)
488
+ elif typ == 's3':
489
+ from pyarrow._s3fs import S3FileSystem
490
+ self = S3FileSystem.__new__(S3FileSystem)
491
+ elif typ == 'gcs':
492
+ from pyarrow._gcsfs import GcsFileSystem
493
+ self = GcsFileSystem.__new__(GcsFileSystem)
494
+ elif typ == 'abfs':
495
+ from pyarrow._azurefs import AzureFileSystem
496
+ self = AzureFileSystem.__new__(AzureFileSystem)
497
+ elif typ == 'hdfs':
498
+ from pyarrow._hdfs import HadoopFileSystem
499
+ self = HadoopFileSystem.__new__(HadoopFileSystem)
500
+ elif typ.startswith('py::'):
501
+ self = PyFileSystem.__new__(PyFileSystem)
502
+ else:
503
+ raise TypeError('Cannot wrap FileSystem pointer')
504
+
505
+ self.init(sp)
506
+ return self
507
+
508
+ cdef inline shared_ptr[CFileSystem] unwrap(self) nogil:
509
+ return self.wrapped
510
+
511
+ def equals(self, FileSystem other not None):
512
+ """
513
+ Parameters
514
+ ----------
515
+ other : pyarrow.fs.FileSystem
516
+
517
+ Returns
518
+ -------
519
+ bool
520
+ """
521
+ return self.fs.Equals(other.unwrap())
522
+
523
+ def __eq__(self, other):
524
+ try:
525
+ return self.equals(other)
526
+ except TypeError:
527
+ return NotImplemented
528
+
529
+ @property
530
+ def type_name(self):
531
+ """
532
+ The filesystem's type name.
533
+ """
534
+ return frombytes(self.fs.type_name())
535
+
536
+ def get_file_info(self, paths_or_selector):
537
+ """
538
+ Get info for the given files.
539
+
540
+ Any symlink is automatically dereferenced, recursively. A non-existing
541
+ or unreachable file returns a FileStat object and has a FileType of
542
+ value NotFound. An exception indicates a truly exceptional condition
543
+ (low-level I/O error, etc.).
544
+
545
+ Parameters
546
+ ----------
547
+ paths_or_selector : FileSelector, path-like or list of path-likes
548
+ Either a selector object, a path-like object or a list of
549
+ path-like objects. The selector's base directory will not be
550
+ part of the results, even if it exists. If it doesn't exist,
551
+ use `allow_not_found`.
552
+
553
+ Returns
554
+ -------
555
+ FileInfo or list of FileInfo
556
+ Single FileInfo object is returned for a single path, otherwise
557
+ a list of FileInfo objects is returned.
558
+
559
+ Examples
560
+ --------
561
+ >>> local
562
+ <pyarrow._fs.LocalFileSystem object at ...>
563
+ >>> local.get_file_info("/{}/pyarrow-fs-example.dat".format(local_path))
564
+ <FileInfo for '/.../pyarrow-fs-example.dat': type=FileType.File, size=4>
565
+ """
566
+ cdef:
567
+ CFileInfo info
568
+ c_string path
569
+ vector[CFileInfo] infos
570
+ vector[c_string] paths
571
+ CFileSelector selector
572
+
573
+ if isinstance(paths_or_selector, FileSelector):
574
+ with nogil:
575
+ selector = (<FileSelector>paths_or_selector).selector
576
+ infos = GetResultValue(self.fs.GetFileInfo(selector))
577
+ elif isinstance(paths_or_selector, (list, tuple)):
578
+ paths = [_path_as_bytes(s) for s in paths_or_selector]
579
+ with nogil:
580
+ infos = GetResultValue(self.fs.GetFileInfo(paths))
581
+ elif isinstance(paths_or_selector, (bytes, str)):
582
+ path =_path_as_bytes(paths_or_selector)
583
+ with nogil:
584
+ info = GetResultValue(self.fs.GetFileInfo(path))
585
+ return FileInfo.wrap(info)
586
+ else:
587
+ raise TypeError('Must pass either path(s) or a FileSelector')
588
+
589
+ return [FileInfo.wrap(info) for info in infos]
590
+
591
+ def create_dir(self, path, *, bint recursive=True):
592
+ """
593
+ Create a directory and subdirectories.
594
+
595
+ This function succeeds if the directory already exists.
596
+
597
+ Parameters
598
+ ----------
599
+ path : str
600
+ The path of the new directory.
601
+ recursive : bool, default True
602
+ Create nested directories as well.
603
+ """
604
+ cdef c_string directory = _path_as_bytes(path)
605
+ with nogil:
606
+ check_status(self.fs.CreateDir(directory, recursive=recursive))
607
+
608
+ def delete_dir(self, path):
609
+ """
610
+ Delete a directory and its contents, recursively.
611
+
612
+ Parameters
613
+ ----------
614
+ path : str
615
+ The path of the directory to be deleted.
616
+ """
617
+ cdef c_string directory = _path_as_bytes(path)
618
+ with nogil:
619
+ check_status(self.fs.DeleteDir(directory))
620
+
621
+ def delete_dir_contents(self, path, *,
622
+ bint accept_root_dir=False,
623
+ bint missing_dir_ok=False):
624
+ """
625
+ Delete a directory's contents, recursively.
626
+
627
+ Like delete_dir, but doesn't delete the directory itself.
628
+
629
+ Parameters
630
+ ----------
631
+ path : str
632
+ The path of the directory to be deleted.
633
+ accept_root_dir : boolean, default False
634
+ Allow deleting the root directory's contents
635
+ (if path is empty or "/")
636
+ missing_dir_ok : boolean, default False
637
+ If False then an error is raised if path does
638
+ not exist
639
+ """
640
+ cdef c_string directory = _path_as_bytes(path)
641
+ if accept_root_dir and directory.strip(b"/") == b"":
642
+ with nogil:
643
+ check_status(self.fs.DeleteRootDirContents())
644
+ else:
645
+ with nogil:
646
+ check_status(self.fs.DeleteDirContents(directory,
647
+ missing_dir_ok))
648
+
649
+ def move(self, src, dest):
650
+ """
651
+ Move / rename a file or directory.
652
+
653
+ If the destination exists:
654
+ - if it is a non-empty directory, an error is returned
655
+ - otherwise, if it has the same type as the source, it is replaced
656
+ - otherwise, behavior is unspecified (implementation-dependent).
657
+
658
+ Parameters
659
+ ----------
660
+ src : str
661
+ The path of the file or the directory to be moved.
662
+ dest : str
663
+ The destination path where the file or directory is moved to.
664
+
665
+ Examples
666
+ --------
667
+ Create a new folder with a file:
668
+
669
+ >>> local.create_dir('/tmp/other_dir')
670
+ >>> local.copy_file(path,'/tmp/move_example.dat')
671
+
672
+ Move the file:
673
+
674
+ >>> local.move('/tmp/move_example.dat',
675
+ ... '/tmp/other_dir/move_example_2.dat')
676
+
677
+ Inspect the file info:
678
+
679
+ >>> local.get_file_info('/tmp/other_dir/move_example_2.dat')
680
+ <FileInfo for '/tmp/other_dir/move_example_2.dat': type=FileType.File, size=4>
681
+ >>> local.get_file_info('/tmp/move_example.dat')
682
+ <FileInfo for '/tmp/move_example.dat': type=FileType.NotFound>
683
+
684
+ Delete the folder:
685
+ >>> local.delete_dir('/tmp/other_dir')
686
+ """
687
+ cdef:
688
+ c_string source = _path_as_bytes(src)
689
+ c_string destination = _path_as_bytes(dest)
690
+ with nogil:
691
+ check_status(self.fs.Move(source, destination))
692
+
693
+ def copy_file(self, src, dest):
694
+ """
695
+ Copy a file.
696
+
697
+ If the destination exists and is a directory, an error is returned.
698
+ Otherwise, it is replaced.
699
+
700
+ Parameters
701
+ ----------
702
+ src : str
703
+ The path of the file to be copied from.
704
+ dest : str
705
+ The destination path where the file is copied to.
706
+
707
+ Examples
708
+ --------
709
+ >>> local.copy_file(path,
710
+ ... local_path + '/pyarrow-fs-example_copy.dat')
711
+
712
+ Inspect the file info:
713
+
714
+ >>> local.get_file_info(local_path + '/pyarrow-fs-example_copy.dat')
715
+ <FileInfo for '/.../pyarrow-fs-example_copy.dat': type=FileType.File, size=4>
716
+ >>> local.get_file_info(path)
717
+ <FileInfo for '/.../pyarrow-fs-example.dat': type=FileType.File, size=4>
718
+ """
719
+ cdef:
720
+ c_string source = _path_as_bytes(src)
721
+ c_string destination = _path_as_bytes(dest)
722
+ with nogil:
723
+ check_status(self.fs.CopyFile(source, destination))
724
+
725
+ def delete_file(self, path):
726
+ """
727
+ Delete a file.
728
+
729
+ Parameters
730
+ ----------
731
+ path : str
732
+ The path of the file to be deleted.
733
+ """
734
+ cdef c_string file = _path_as_bytes(path)
735
+ with nogil:
736
+ check_status(self.fs.DeleteFile(file))
737
+
738
+ def _wrap_input_stream(self, stream, path, compression, buffer_size):
739
+ if buffer_size is not None and buffer_size != 0:
740
+ stream = BufferedInputStream(stream, buffer_size)
741
+ if compression == 'detect':
742
+ compression = _detect_compression(path)
743
+ if compression is not None:
744
+ stream = CompressedInputStream(stream, compression)
745
+ return stream
746
+
747
+ def _wrap_output_stream(self, stream, path, compression, buffer_size):
748
+ if buffer_size is not None and buffer_size != 0:
749
+ stream = BufferedOutputStream(stream, buffer_size)
750
+ if compression == 'detect':
751
+ compression = _detect_compression(path)
752
+ if compression is not None:
753
+ stream = CompressedOutputStream(stream, compression)
754
+ return stream
755
+
756
+ def open_input_file(self, path):
757
+ """
758
+ Open an input file for random access reading.
759
+
760
+ Parameters
761
+ ----------
762
+ path : str
763
+ The source to open for reading.
764
+
765
+ Returns
766
+ -------
767
+ stream : NativeFile
768
+
769
+ Examples
770
+ --------
771
+ Print the data from the file with `open_input_file()`:
772
+
773
+ >>> with local.open_input_file(path) as f:
774
+ ... print(f.readall())
775
+ b'data'
776
+ """
777
+ cdef:
778
+ c_string pathstr = _path_as_bytes(path)
779
+ NativeFile stream = NativeFile()
780
+ shared_ptr[CRandomAccessFile] in_handle
781
+
782
+ with nogil:
783
+ in_handle = GetResultValue(self.fs.OpenInputFile(pathstr))
784
+
785
+ stream.set_random_access_file(in_handle)
786
+ stream.is_readable = True
787
+ return stream
788
+
789
+ def open_input_stream(self, path, compression='detect', buffer_size=None):
790
+ """
791
+ Open an input stream for sequential reading.
792
+
793
+ Parameters
794
+ ----------
795
+ path : str
796
+ The source to open for reading.
797
+ compression : str optional, default 'detect'
798
+ The compression algorithm to use for on-the-fly decompression.
799
+ If "detect" and source is a file path, then compression will be
800
+ chosen based on the file extension.
801
+ If None, no compression will be applied. Otherwise, a well-known
802
+ algorithm name must be supplied (e.g. "gzip").
803
+ buffer_size : int optional, default None
804
+ If None or 0, no buffering will happen. Otherwise the size of the
805
+ temporary read buffer.
806
+
807
+ Returns
808
+ -------
809
+ stream : NativeFile
810
+
811
+ Examples
812
+ --------
813
+ Print the data from the file with `open_input_stream()`:
814
+
815
+ >>> with local.open_input_stream(path) as f:
816
+ ... print(f.readall())
817
+ b'data'
818
+ """
819
+ cdef:
820
+ c_string pathstr = _path_as_bytes(path)
821
+ NativeFile stream = NativeFile()
822
+ shared_ptr[CInputStream] in_handle
823
+
824
+ with nogil:
825
+ in_handle = GetResultValue(self.fs.OpenInputStream(pathstr))
826
+
827
+ stream.set_input_stream(in_handle)
828
+ stream.is_readable = True
829
+
830
+ return self._wrap_input_stream(
831
+ stream, path=path, compression=compression, buffer_size=buffer_size
832
+ )
833
+
834
+ def open_output_stream(self, path, compression='detect',
835
+ buffer_size=None, metadata=None):
836
+ """
837
+ Open an output stream for sequential writing.
838
+
839
+ If the target already exists, existing data is truncated.
840
+
841
+ Parameters
842
+ ----------
843
+ path : str
844
+ The source to open for writing.
845
+ compression : str optional, default 'detect'
846
+ The compression algorithm to use for on-the-fly compression.
847
+ If "detect" and source is a file path, then compression will be
848
+ chosen based on the file extension.
849
+ If None, no compression will be applied. Otherwise, a well-known
850
+ algorithm name must be supplied (e.g. "gzip").
851
+ buffer_size : int optional, default None
852
+ If None or 0, no buffering will happen. Otherwise the size of the
853
+ temporary write buffer.
854
+ metadata : dict optional, default None
855
+ If not None, a mapping of string keys to string values.
856
+ Some filesystems support storing metadata along the file
857
+ (such as "Content-Type").
858
+ Unsupported metadata keys will be ignored.
859
+
860
+ Returns
861
+ -------
862
+ stream : NativeFile
863
+
864
+ Examples
865
+ --------
866
+ >>> local = fs.LocalFileSystem()
867
+ >>> with local.open_output_stream(path) as stream:
868
+ ... stream.write(b'data')
869
+ 4
870
+ """
871
+ cdef:
872
+ c_string pathstr = _path_as_bytes(path)
873
+ NativeFile stream = NativeFile()
874
+ shared_ptr[COutputStream] out_handle
875
+ shared_ptr[const CKeyValueMetadata] c_metadata
876
+
877
+ if metadata is not None:
878
+ c_metadata = pyarrow_unwrap_metadata(KeyValueMetadata(metadata))
879
+
880
+ with nogil:
881
+ out_handle = GetResultValue(
882
+ self.fs.OpenOutputStream(pathstr, c_metadata))
883
+
884
+ stream.set_output_stream(out_handle)
885
+ stream.is_writable = True
886
+
887
+ return self._wrap_output_stream(
888
+ stream, path=path, compression=compression, buffer_size=buffer_size
889
+ )
890
+
891
+ def open_append_stream(self, path, compression='detect',
892
+ buffer_size=None, metadata=None):
893
+ """
894
+ Open an output stream for appending.
895
+
896
+ If the target doesn't exist, a new empty file is created.
897
+
898
+ .. note::
899
+ Some filesystem implementations do not support efficient
900
+ appending to an existing file, in which case this method will
901
+ raise NotImplementedError.
902
+ Consider writing to multiple files (using e.g. the dataset layer)
903
+ instead.
904
+
905
+ Parameters
906
+ ----------
907
+ path : str
908
+ The source to open for writing.
909
+ compression : str optional, default 'detect'
910
+ The compression algorithm to use for on-the-fly compression.
911
+ If "detect" and source is a file path, then compression will be
912
+ chosen based on the file extension.
913
+ If None, no compression will be applied. Otherwise, a well-known
914
+ algorithm name must be supplied (e.g. "gzip").
915
+ buffer_size : int optional, default None
916
+ If None or 0, no buffering will happen. Otherwise the size of the
917
+ temporary write buffer.
918
+ metadata : dict optional, default None
919
+ If not None, a mapping of string keys to string values.
920
+ Some filesystems support storing metadata along the file
921
+ (such as "Content-Type").
922
+ Unsupported metadata keys will be ignored.
923
+
924
+ Returns
925
+ -------
926
+ stream : NativeFile
927
+
928
+ Examples
929
+ --------
930
+ Append new data to a FileSystem subclass with nonempty file:
931
+
932
+ >>> with local.open_append_stream(path) as f:
933
+ ... f.write(b'+newly added')
934
+ 12
935
+
936
+ Print out the content fo the file:
937
+
938
+ >>> with local.open_input_file(path) as f:
939
+ ... print(f.readall())
940
+ b'data+newly added'
941
+ """
942
+ cdef:
943
+ c_string pathstr = _path_as_bytes(path)
944
+ NativeFile stream = NativeFile()
945
+ shared_ptr[COutputStream] out_handle
946
+ shared_ptr[const CKeyValueMetadata] c_metadata
947
+
948
+ if metadata is not None:
949
+ c_metadata = pyarrow_unwrap_metadata(KeyValueMetadata(metadata))
950
+
951
+ with nogil:
952
+ out_handle = GetResultValue(
953
+ self.fs.OpenAppendStream(pathstr, c_metadata))
954
+
955
+ stream.set_output_stream(out_handle)
956
+ stream.is_writable = True
957
+
958
+ return self._wrap_output_stream(
959
+ stream, path=path, compression=compression, buffer_size=buffer_size
960
+ )
961
+
962
+ def normalize_path(self, path):
963
+ """
964
+ Normalize filesystem path.
965
+
966
+ Parameters
967
+ ----------
968
+ path : str
969
+ The path to normalize
970
+
971
+ Returns
972
+ -------
973
+ normalized_path : str
974
+ The normalized path
975
+ """
976
+ cdef:
977
+ c_string c_path = _path_as_bytes(path)
978
+ c_string c_path_normalized
979
+
980
+ c_path_normalized = GetResultValue(self.fs.NormalizePath(c_path))
981
+ return frombytes(c_path_normalized)
982
+
983
+
984
+ cdef class LocalFileSystem(FileSystem):
985
+ """
986
+ A FileSystem implementation accessing files on the local machine.
987
+
988
+ Details such as symlinks are abstracted away (symlinks are always followed,
989
+ except when deleting an entry).
990
+
991
+ Parameters
992
+ ----------
993
+ use_mmap : bool, default False
994
+ Whether open_input_stream and open_input_file should return
995
+ a mmap'ed file or a regular file.
996
+
997
+ Examples
998
+ --------
999
+ Create a FileSystem object with LocalFileSystem constructor:
1000
+
1001
+ >>> from pyarrow import fs
1002
+ >>> local = fs.LocalFileSystem()
1003
+ >>> local
1004
+ <pyarrow._fs.LocalFileSystem object at ...>
1005
+
1006
+ and write data on to the file:
1007
+
1008
+ >>> with local.open_output_stream('/tmp/local_fs.dat') as stream:
1009
+ ... stream.write(b'data')
1010
+ 4
1011
+ >>> with local.open_input_stream('/tmp/local_fs.dat') as stream:
1012
+ ... print(stream.readall())
1013
+ b'data'
1014
+
1015
+ Create a FileSystem object inferred from a URI of the saved file:
1016
+
1017
+ >>> local_new, path = fs.LocalFileSystem().from_uri('/tmp/local_fs.dat')
1018
+ >>> local_new
1019
+ <pyarrow._fs.LocalFileSystem object at ...
1020
+ >>> path
1021
+ '/tmp/local_fs.dat'
1022
+
1023
+ Check if FileSystems `local` and `local_new` are equal:
1024
+
1025
+ >>> local.equals(local_new)
1026
+ True
1027
+
1028
+ Compare two different FileSystems:
1029
+
1030
+ >>> local2 = fs.LocalFileSystem(use_mmap=True)
1031
+ >>> local.equals(local2)
1032
+ False
1033
+
1034
+ Copy a file and print out the data:
1035
+
1036
+ >>> local.copy_file('/tmp/local_fs.dat', '/tmp/local_fs-copy.dat')
1037
+ >>> with local.open_input_stream('/tmp/local_fs-copy.dat') as stream:
1038
+ ... print(stream.readall())
1039
+ ...
1040
+ b'data'
1041
+
1042
+ Open an output stream for appending, add text and print the new data:
1043
+
1044
+ >>> with local.open_append_stream('/tmp/local_fs-copy.dat') as f:
1045
+ ... f.write(b'+newly added')
1046
+ 12
1047
+
1048
+ >>> with local.open_input_stream('/tmp/local_fs-copy.dat') as f:
1049
+ ... print(f.readall())
1050
+ b'data+newly added'
1051
+
1052
+ Create a directory, copy a file into it and then delete the whole directory:
1053
+
1054
+ >>> local.create_dir('/tmp/new_folder')
1055
+ >>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat')
1056
+ >>> local.get_file_info('/tmp/new_folder')
1057
+ <FileInfo for '/tmp/new_folder': type=FileType.Directory>
1058
+ >>> local.delete_dir('/tmp/new_folder')
1059
+ >>> local.get_file_info('/tmp/new_folder')
1060
+ <FileInfo for '/tmp/new_folder': type=FileType.NotFound>
1061
+
1062
+ Create a directory, copy a file into it and then delete
1063
+ the content of the directory:
1064
+
1065
+ >>> local.create_dir('/tmp/new_folder')
1066
+ >>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat')
1067
+ >>> local.get_file_info('/tmp/new_folder/local_fs.dat')
1068
+ <FileInfo for '/tmp/new_folder/local_fs.dat': type=FileType.File, size=4>
1069
+ >>> local.delete_dir_contents('/tmp/new_folder')
1070
+ >>> local.get_file_info('/tmp/new_folder')
1071
+ <FileInfo for '/tmp/new_folder': type=FileType.Directory>
1072
+ >>> local.get_file_info('/tmp/new_folder/local_fs.dat')
1073
+ <FileInfo for '/tmp/new_folder/local_fs.dat': type=FileType.NotFound>
1074
+
1075
+ Create a directory, copy a file into it and then delete
1076
+ the file from the directory:
1077
+
1078
+ >>> local.create_dir('/tmp/new_folder')
1079
+ >>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat')
1080
+ >>> local.delete_file('/tmp/new_folder/local_fs.dat')
1081
+ >>> local.get_file_info('/tmp/new_folder/local_fs.dat')
1082
+ <FileInfo for '/tmp/new_folder/local_fs.dat': type=FileType.NotFound>
1083
+ >>> local.get_file_info('/tmp/new_folder')
1084
+ <FileInfo for '/tmp/new_folder': type=FileType.Directory>
1085
+
1086
+ Move the file:
1087
+
1088
+ >>> local.move('/tmp/local_fs-copy.dat', '/tmp/new_folder/local_fs-copy.dat')
1089
+ >>> local.get_file_info('/tmp/new_folder/local_fs-copy.dat')
1090
+ <FileInfo for '/tmp/new_folder/local_fs-copy.dat': type=FileType.File, size=16>
1091
+ >>> local.get_file_info('/tmp/local_fs-copy.dat')
1092
+ <FileInfo for '/tmp/local_fs-copy.dat': type=FileType.NotFound>
1093
+
1094
+ To finish delete the file left:
1095
+ >>> local.delete_file('/tmp/local_fs.dat')
1096
+ """
1097
+
1098
+ def __init__(self, *, use_mmap=False):
1099
+ cdef:
1100
+ CLocalFileSystemOptions opts
1101
+ shared_ptr[CLocalFileSystem] fs
1102
+
1103
+ opts = CLocalFileSystemOptions.Defaults()
1104
+ opts.use_mmap = use_mmap
1105
+
1106
+ fs = make_shared[CLocalFileSystem](opts)
1107
+ self.init(<shared_ptr[CFileSystem]> fs)
1108
+
1109
+ cdef init(self, const shared_ptr[CFileSystem]& c_fs):
1110
+ FileSystem.init(self, c_fs)
1111
+ self.localfs = <CLocalFileSystem*> c_fs.get()
1112
+
1113
+ @staticmethod
1114
+ @binding(True) # Required for cython < 3
1115
+ def _reconstruct(kwargs):
1116
+ # __reduce__ doesn't allow passing named arguments directly to the
1117
+ # reconstructor, hence this wrapper.
1118
+ return LocalFileSystem(**kwargs)
1119
+
1120
+ def __reduce__(self):
1121
+ cdef CLocalFileSystemOptions opts = self.localfs.options()
1122
+ return LocalFileSystem._reconstruct, (dict(
1123
+ use_mmap=opts.use_mmap),)
1124
+
1125
+
1126
+ cdef class SubTreeFileSystem(FileSystem):
1127
+ """
1128
+ Delegates to another implementation after prepending a fixed base path.
1129
+
1130
+ This is useful to expose a logical view of a subtree of a filesystem,
1131
+ for example a directory in a LocalFileSystem.
1132
+
1133
+ Note, that this makes no security guarantee. For example, symlinks may
1134
+ allow to "escape" the subtree and access other parts of the underlying
1135
+ filesystem.
1136
+
1137
+ Parameters
1138
+ ----------
1139
+ base_path : str
1140
+ The root of the subtree.
1141
+ base_fs : FileSystem
1142
+ FileSystem object the operations delegated to.
1143
+
1144
+ Examples
1145
+ --------
1146
+ Create a LocalFileSystem instance:
1147
+
1148
+ >>> from pyarrow import fs
1149
+ >>> local = fs.LocalFileSystem()
1150
+ >>> with local.open_output_stream('/tmp/local_fs.dat') as stream:
1151
+ ... stream.write(b'data')
1152
+ 4
1153
+
1154
+ Create a directory and a SubTreeFileSystem instance:
1155
+
1156
+ >>> local.create_dir('/tmp/sub_tree')
1157
+ >>> subtree = fs.SubTreeFileSystem('/tmp/sub_tree', local)
1158
+
1159
+ Write data into the existing file:
1160
+
1161
+ >>> with subtree.open_append_stream('sub_tree_fs.dat') as f:
1162
+ ... f.write(b'+newly added')
1163
+ 12
1164
+
1165
+ Print out the attributes:
1166
+
1167
+ >>> subtree.base_fs
1168
+ <pyarrow._fs.LocalFileSystem object at ...>
1169
+ >>> subtree.base_path
1170
+ '/tmp/sub_tree/'
1171
+
1172
+ Get info for the given directory or given file:
1173
+
1174
+ >>> subtree.get_file_info('')
1175
+ <FileInfo for '': type=FileType.Directory>
1176
+ >>> subtree.get_file_info('sub_tree_fs.dat')
1177
+ <FileInfo for 'sub_tree_fs.dat': type=FileType.File, size=12>
1178
+
1179
+ Delete the file and directory:
1180
+
1181
+ >>> subtree.delete_file('sub_tree_fs.dat')
1182
+ >>> local.delete_dir('/tmp/sub_tree')
1183
+ >>> local.delete_file('/tmp/local_fs.dat')
1184
+
1185
+ For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`.
1186
+ """
1187
+
1188
+ def __init__(self, base_path, FileSystem base_fs):
1189
+ cdef:
1190
+ c_string pathstr
1191
+ shared_ptr[CSubTreeFileSystem] wrapped
1192
+
1193
+ pathstr = _path_as_bytes(base_path)
1194
+ wrapped = make_shared[CSubTreeFileSystem](pathstr, base_fs.wrapped)
1195
+
1196
+ self.init(<shared_ptr[CFileSystem]> wrapped)
1197
+
1198
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
1199
+ FileSystem.init(self, wrapped)
1200
+ self.subtreefs = <CSubTreeFileSystem*> wrapped.get()
1201
+
1202
+ def __repr__(self):
1203
+ return ("SubTreeFileSystem(base_path={}, base_fs={}"
1204
+ .format(self.base_path, self.base_fs))
1205
+
1206
+ def __reduce__(self):
1207
+ return SubTreeFileSystem, (
1208
+ frombytes(self.subtreefs.base_path()),
1209
+ FileSystem.wrap(self.subtreefs.base_fs())
1210
+ )
1211
+
1212
+ @property
1213
+ def base_path(self):
1214
+ return frombytes(self.subtreefs.base_path())
1215
+
1216
+ @property
1217
+ def base_fs(self):
1218
+ return FileSystem.wrap(self.subtreefs.base_fs())
1219
+
1220
+
1221
+ cdef class _MockFileSystem(FileSystem):
1222
+
1223
+ def __init__(self, datetime current_time=None):
1224
+ cdef shared_ptr[CMockFileSystem] wrapped
1225
+
1226
+ current_time = current_time or datetime.now()
1227
+ wrapped = make_shared[CMockFileSystem](
1228
+ PyDateTime_to_TimePoint(<PyDateTime_DateTime*> current_time)
1229
+ )
1230
+
1231
+ self.init(<shared_ptr[CFileSystem]> wrapped)
1232
+
1233
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
1234
+ FileSystem.init(self, wrapped)
1235
+ self.mockfs = <CMockFileSystem*> wrapped.get()
1236
+
1237
+
1238
+ cdef class PyFileSystem(FileSystem):
1239
+ """
1240
+ A FileSystem with behavior implemented in Python.
1241
+
1242
+ Parameters
1243
+ ----------
1244
+ handler : FileSystemHandler
1245
+ The handler object implementing custom filesystem behavior.
1246
+
1247
+ Examples
1248
+ --------
1249
+ Create an fsspec-based filesystem object for GitHub:
1250
+
1251
+ >>> from fsspec.implementations import github
1252
+ >>> gfs = github.GithubFileSystem('apache', 'arrow') # doctest: +SKIP
1253
+
1254
+ Get a PyArrow FileSystem object:
1255
+
1256
+ >>> from pyarrow.fs import PyFileSystem, FSSpecHandler
1257
+ >>> pa_fs = PyFileSystem(FSSpecHandler(gfs)) # doctest: +SKIP
1258
+
1259
+ Use :func:`~pyarrow.fs.FileSystem` functionality ``get_file_info()``:
1260
+
1261
+ >>> pa_fs.get_file_info('README.md') # doctest: +SKIP
1262
+ <FileInfo for 'README.md': type=FileType.File, size=...>
1263
+ """
1264
+
1265
+ def __init__(self, handler):
1266
+ cdef:
1267
+ CPyFileSystemVtable vtable
1268
+ shared_ptr[CPyFileSystem] wrapped
1269
+
1270
+ if not isinstance(handler, FileSystemHandler):
1271
+ raise TypeError("Expected a FileSystemHandler instance, got {0}"
1272
+ .format(type(handler)))
1273
+
1274
+ vtable.get_type_name = _cb_get_type_name
1275
+ vtable.equals = _cb_equals
1276
+ vtable.get_file_info = _cb_get_file_info
1277
+ vtable.get_file_info_vector = _cb_get_file_info_vector
1278
+ vtable.get_file_info_selector = _cb_get_file_info_selector
1279
+ vtable.create_dir = _cb_create_dir
1280
+ vtable.delete_dir = _cb_delete_dir
1281
+ vtable.delete_dir_contents = _cb_delete_dir_contents
1282
+ vtable.delete_root_dir_contents = _cb_delete_root_dir_contents
1283
+ vtable.delete_file = _cb_delete_file
1284
+ vtable.move = _cb_move
1285
+ vtable.copy_file = _cb_copy_file
1286
+ vtable.open_input_stream = _cb_open_input_stream
1287
+ vtable.open_input_file = _cb_open_input_file
1288
+ vtable.open_output_stream = _cb_open_output_stream
1289
+ vtable.open_append_stream = _cb_open_append_stream
1290
+ vtable.normalize_path = _cb_normalize_path
1291
+
1292
+ wrapped = CPyFileSystem.Make(handler, move(vtable))
1293
+ self.init(<shared_ptr[CFileSystem]> wrapped)
1294
+
1295
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
1296
+ FileSystem.init(self, wrapped)
1297
+ self.pyfs = <CPyFileSystem*> wrapped.get()
1298
+
1299
+ @property
1300
+ def handler(self):
1301
+ """
1302
+ The filesystem's underlying handler.
1303
+
1304
+ Returns
1305
+ -------
1306
+ handler : FileSystemHandler
1307
+ """
1308
+ return <object> self.pyfs.handler()
1309
+
1310
+ def __reduce__(self):
1311
+ return PyFileSystem, (self.handler,)
1312
+
1313
+
1314
+ class FileSystemHandler(ABC):
1315
+ """
1316
+ An abstract class exposing methods to implement PyFileSystem's behavior.
1317
+ """
1318
+
1319
+ @abstractmethod
1320
+ def get_type_name(self):
1321
+ """
1322
+ Implement PyFileSystem.type_name.
1323
+ """
1324
+
1325
+ @abstractmethod
1326
+ def get_file_info(self, paths):
1327
+ """
1328
+ Implement PyFileSystem.get_file_info(paths).
1329
+
1330
+ Parameters
1331
+ ----------
1332
+ paths : list of str
1333
+ paths for which we want to retrieve the info.
1334
+ """
1335
+
1336
+ @abstractmethod
1337
+ def get_file_info_selector(self, selector):
1338
+ """
1339
+ Implement PyFileSystem.get_file_info(selector).
1340
+
1341
+ Parameters
1342
+ ----------
1343
+ selector : FileSelector
1344
+ selector for which we want to retrieve the info.
1345
+ """
1346
+
1347
+ @abstractmethod
1348
+ def create_dir(self, path, recursive):
1349
+ """
1350
+ Implement PyFileSystem.create_dir(...).
1351
+
1352
+ Parameters
1353
+ ----------
1354
+ path : str
1355
+ path of the directory.
1356
+ recursive : bool
1357
+ if the parent directories should be created too.
1358
+ """
1359
+
1360
+ @abstractmethod
1361
+ def delete_dir(self, path):
1362
+ """
1363
+ Implement PyFileSystem.delete_dir(...).
1364
+
1365
+ Parameters
1366
+ ----------
1367
+ path : str
1368
+ path of the directory.
1369
+ """
1370
+
1371
+ @abstractmethod
1372
+ def delete_dir_contents(self, path, missing_dir_ok=False):
1373
+ """
1374
+ Implement PyFileSystem.delete_dir_contents(...).
1375
+
1376
+ Parameters
1377
+ ----------
1378
+ path : str
1379
+ path of the directory.
1380
+ missing_dir_ok : bool
1381
+ if False an error should be raised if path does not exist
1382
+ """
1383
+
1384
+ @abstractmethod
1385
+ def delete_root_dir_contents(self):
1386
+ """
1387
+ Implement PyFileSystem.delete_dir_contents("/", accept_root_dir=True).
1388
+ """
1389
+
1390
+ @abstractmethod
1391
+ def delete_file(self, path):
1392
+ """
1393
+ Implement PyFileSystem.delete_file(...).
1394
+
1395
+ Parameters
1396
+ ----------
1397
+ path : str
1398
+ path of the file.
1399
+ """
1400
+
1401
+ @abstractmethod
1402
+ def move(self, src, dest):
1403
+ """
1404
+ Implement PyFileSystem.move(...).
1405
+
1406
+ Parameters
1407
+ ----------
1408
+ src : str
1409
+ path of what should be moved.
1410
+ dest : str
1411
+ path of where it should be moved to.
1412
+ """
1413
+
1414
+ @abstractmethod
1415
+ def copy_file(self, src, dest):
1416
+ """
1417
+ Implement PyFileSystem.copy_file(...).
1418
+
1419
+ Parameters
1420
+ ----------
1421
+ src : str
1422
+ path of what should be copied.
1423
+ dest : str
1424
+ path of where it should be copied to.
1425
+ """
1426
+
1427
+ @abstractmethod
1428
+ def open_input_stream(self, path):
1429
+ """
1430
+ Implement PyFileSystem.open_input_stream(...).
1431
+
1432
+ Parameters
1433
+ ----------
1434
+ path : str
1435
+ path of what should be opened.
1436
+ """
1437
+
1438
+ @abstractmethod
1439
+ def open_input_file(self, path):
1440
+ """
1441
+ Implement PyFileSystem.open_input_file(...).
1442
+
1443
+ Parameters
1444
+ ----------
1445
+ path : str
1446
+ path of what should be opened.
1447
+ """
1448
+
1449
+ @abstractmethod
1450
+ def open_output_stream(self, path, metadata):
1451
+ """
1452
+ Implement PyFileSystem.open_output_stream(...).
1453
+
1454
+ Parameters
1455
+ ----------
1456
+ path : str
1457
+ path of what should be opened.
1458
+ metadata : mapping
1459
+ Mapping of string keys to string values.
1460
+ Some filesystems support storing metadata along the file
1461
+ (such as "Content-Type").
1462
+ """
1463
+
1464
+ @abstractmethod
1465
+ def open_append_stream(self, path, metadata):
1466
+ """
1467
+ Implement PyFileSystem.open_append_stream(...).
1468
+
1469
+ Parameters
1470
+ ----------
1471
+ path : str
1472
+ path of what should be opened.
1473
+ metadata : mapping
1474
+ Mapping of string keys to string values.
1475
+ Some filesystems support storing metadata along the file
1476
+ (such as "Content-Type").
1477
+ """
1478
+
1479
+ @abstractmethod
1480
+ def normalize_path(self, path):
1481
+ """
1482
+ Implement PyFileSystem.normalize_path(...).
1483
+
1484
+ Parameters
1485
+ ----------
1486
+ path : str
1487
+ path of what should be normalized.
1488
+ """
1489
+
1490
+ # Callback definitions for CPyFileSystemVtable
1491
+
1492
+
1493
+ cdef void _cb_get_type_name(handler, c_string* out) except *:
1494
+ out[0] = tobytes("py::" + handler.get_type_name())
1495
+
1496
+ cdef c_bool _cb_equals(handler, const CFileSystem& c_other) except False:
1497
+ if c_other.type_name().startswith(b"py::"):
1498
+ return <object> (<const CPyFileSystem&> c_other).handler() == handler
1499
+
1500
+ return False
1501
+
1502
+ cdef void _cb_get_file_info(handler, const c_string& path,
1503
+ CFileInfo* out) except *:
1504
+ infos = handler.get_file_info([frombytes(path)])
1505
+ if not isinstance(infos, list) or len(infos) != 1:
1506
+ raise TypeError("get_file_info should have returned a 1-element list")
1507
+ out[0] = FileInfo.unwrap_safe(infos[0])
1508
+
1509
+ cdef void _cb_get_file_info_vector(handler, const vector[c_string]& paths,
1510
+ vector[CFileInfo]* out) except *:
1511
+ py_paths = [frombytes(paths[i]) for i in range(len(paths))]
1512
+ infos = handler.get_file_info(py_paths)
1513
+ if not isinstance(infos, list):
1514
+ raise TypeError("get_file_info should have returned a list")
1515
+ out[0].clear()
1516
+ out[0].reserve(len(infos))
1517
+ for info in infos:
1518
+ out[0].push_back(FileInfo.unwrap_safe(info))
1519
+
1520
+ cdef void _cb_get_file_info_selector(handler, const CFileSelector& selector,
1521
+ vector[CFileInfo]* out) except *:
1522
+ infos = handler.get_file_info_selector(FileSelector.wrap(selector))
1523
+ if not isinstance(infos, list):
1524
+ raise TypeError("get_file_info_selector should have returned a list")
1525
+ out[0].clear()
1526
+ out[0].reserve(len(infos))
1527
+ for info in infos:
1528
+ out[0].push_back(FileInfo.unwrap_safe(info))
1529
+
1530
+ cdef void _cb_create_dir(handler, const c_string& path,
1531
+ c_bool recursive) except *:
1532
+ handler.create_dir(frombytes(path), recursive)
1533
+
1534
+ cdef void _cb_delete_dir(handler, const c_string& path) except *:
1535
+ handler.delete_dir(frombytes(path))
1536
+
1537
+ cdef void _cb_delete_dir_contents(handler, const c_string& path,
1538
+ c_bool missing_dir_ok) except *:
1539
+ handler.delete_dir_contents(frombytes(path), missing_dir_ok)
1540
+
1541
+ cdef void _cb_delete_root_dir_contents(handler) except *:
1542
+ handler.delete_root_dir_contents()
1543
+
1544
+ cdef void _cb_delete_file(handler, const c_string& path) except *:
1545
+ handler.delete_file(frombytes(path))
1546
+
1547
+ cdef void _cb_move(handler, const c_string& src,
1548
+ const c_string& dest) except *:
1549
+ handler.move(frombytes(src), frombytes(dest))
1550
+
1551
+ cdef void _cb_copy_file(handler, const c_string& src,
1552
+ const c_string& dest) except *:
1553
+ handler.copy_file(frombytes(src), frombytes(dest))
1554
+
1555
+ cdef void _cb_open_input_stream(handler, const c_string& path,
1556
+ shared_ptr[CInputStream]* out) except *:
1557
+ stream = handler.open_input_stream(frombytes(path))
1558
+ if not isinstance(stream, NativeFile):
1559
+ raise TypeError("open_input_stream should have returned "
1560
+ "a PyArrow file")
1561
+ out[0] = (<NativeFile> stream).get_input_stream()
1562
+
1563
+ cdef void _cb_open_input_file(handler, const c_string& path,
1564
+ shared_ptr[CRandomAccessFile]* out) except *:
1565
+ stream = handler.open_input_file(frombytes(path))
1566
+ if not isinstance(stream, NativeFile):
1567
+ raise TypeError("open_input_file should have returned "
1568
+ "a PyArrow file")
1569
+ out[0] = (<NativeFile> stream).get_random_access_file()
1570
+
1571
+ cdef void _cb_open_output_stream(
1572
+ handler, const c_string& path,
1573
+ const shared_ptr[const CKeyValueMetadata]& metadata,
1574
+ shared_ptr[COutputStream]* out) except *:
1575
+ stream = handler.open_output_stream(
1576
+ frombytes(path), pyarrow_wrap_metadata(metadata))
1577
+ if not isinstance(stream, NativeFile):
1578
+ raise TypeError("open_output_stream should have returned "
1579
+ "a PyArrow file")
1580
+ out[0] = (<NativeFile> stream).get_output_stream()
1581
+
1582
+ cdef void _cb_open_append_stream(
1583
+ handler, const c_string& path,
1584
+ const shared_ptr[const CKeyValueMetadata]& metadata,
1585
+ shared_ptr[COutputStream]* out) except *:
1586
+ stream = handler.open_append_stream(
1587
+ frombytes(path), pyarrow_wrap_metadata(metadata))
1588
+ if not isinstance(stream, NativeFile):
1589
+ raise TypeError("open_append_stream should have returned "
1590
+ "a PyArrow file")
1591
+ out[0] = (<NativeFile> stream).get_output_stream()
1592
+
1593
+ cdef void _cb_normalize_path(handler, const c_string& path,
1594
+ c_string* out) except *:
1595
+ out[0] = tobytes(handler.normalize_path(frombytes(path)))
1596
+
1597
+
1598
+ def _copy_files(FileSystem source_fs, str source_path,
1599
+ FileSystem destination_fs, str destination_path,
1600
+ int64_t chunk_size, c_bool use_threads):
1601
+ # low-level helper exposed through pyarrow/fs.py::copy_files
1602
+ cdef:
1603
+ CFileLocator c_source
1604
+ vector[CFileLocator] c_sources
1605
+ CFileLocator c_destination
1606
+ vector[CFileLocator] c_destinations
1607
+
1608
+ c_source.filesystem = source_fs.unwrap()
1609
+ c_source.path = tobytes(source_path)
1610
+ c_sources.push_back(c_source)
1611
+
1612
+ c_destination.filesystem = destination_fs.unwrap()
1613
+ c_destination.path = tobytes(destination_path)
1614
+ c_destinations.push_back(c_destination)
1615
+
1616
+ with nogil:
1617
+ check_status(CCopyFiles(
1618
+ c_sources, c_destinations,
1619
+ c_default_io_context(), chunk_size, use_threads,
1620
+ ))
1621
+
1622
+
1623
+ def _copy_files_selector(FileSystem source_fs, FileSelector source_sel,
1624
+ FileSystem destination_fs, str destination_base_dir,
1625
+ int64_t chunk_size, c_bool use_threads):
1626
+ # low-level helper exposed through pyarrow/fs.py::copy_files
1627
+ cdef c_string c_destination_base_dir = tobytes(destination_base_dir)
1628
+
1629
+ with nogil:
1630
+ check_status(CCopyFilesWithSelector(
1631
+ source_fs.unwrap(), source_sel.unwrap(),
1632
+ destination_fs.unwrap(), c_destination_base_dir,
1633
+ c_default_io_context(), chunk_size, use_threads,
1634
+ ))
venv/lib/python3.10/site-packages/pyarrow/_gcsfs.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (132 kB). View file
 
venv/lib/python3.10/site-packages/pyarrow/_generated_version.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # file generated by setuptools_scm
2
+ # don't change, don't track in version control
3
+ TYPE_CHECKING = False
4
+ if TYPE_CHECKING:
5
+ from typing import Tuple, Union
6
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
7
+ else:
8
+ VERSION_TUPLE = object
9
+
10
+ version: str
11
+ __version__: str
12
+ __version_tuple__: VERSION_TUPLE
13
+ version_tuple: VERSION_TUPLE
14
+
15
+ __version__ = version = '16.0.0'
16
+ __version_tuple__ = version_tuple = (16, 0, 0)
venv/lib/python3.10/site-packages/pyarrow/_hdfs.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (130 kB). View file
 
venv/lib/python3.10/site-packages/pyarrow/_hdfs.pyx ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from cython cimport binding
21
+
22
+ from pyarrow.includes.common cimport *
23
+ from pyarrow.includes.libarrow cimport *
24
+ from pyarrow.includes.libarrow_fs cimport *
25
+ from pyarrow._fs cimport FileSystem
26
+
27
+ from pyarrow.lib import frombytes, tobytes
28
+ from pyarrow.util import _stringify_path
29
+
30
+
31
+ cdef class HadoopFileSystem(FileSystem):
32
+ """
33
+ HDFS backed FileSystem implementation
34
+
35
+ Parameters
36
+ ----------
37
+ host : str
38
+ HDFS host to connect to. Set to "default" for fs.defaultFS from
39
+ core-site.xml.
40
+ port : int, default 8020
41
+ HDFS port to connect to. Set to 0 for default or logical (HA) nodes.
42
+ user : str, default None
43
+ Username when connecting to HDFS; None implies login user.
44
+ replication : int, default 3
45
+ Number of copies each block will have.
46
+ buffer_size : int, default 0
47
+ If 0, no buffering will happen otherwise the size of the temporary read
48
+ and write buffer.
49
+ default_block_size : int, default None
50
+ None means the default configuration for HDFS, a typical block size is
51
+ 128 MB.
52
+ kerb_ticket : string or path, default None
53
+ If not None, the path to the Kerberos ticket cache.
54
+ extra_conf : dict, default None
55
+ Extra key/value pairs for configuration; will override any
56
+ hdfs-site.xml properties.
57
+
58
+ Examples
59
+ --------
60
+ >>> from pyarrow import fs
61
+ >>> hdfs = fs.HadoopFileSystem(host, port, user=user, kerb_ticket=ticket_cache_path) # doctest: +SKIP
62
+
63
+ For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`.
64
+ """
65
+
66
+ cdef:
67
+ CHadoopFileSystem* hdfs
68
+
69
+ def __init__(self, str host, int port=8020, *, str user=None,
70
+ int replication=3, int buffer_size=0,
71
+ default_block_size=None, kerb_ticket=None,
72
+ extra_conf=None):
73
+ cdef:
74
+ CHdfsOptions options
75
+ shared_ptr[CHadoopFileSystem] wrapped
76
+
77
+ if not host.startswith(('hdfs://', 'viewfs://')) and host != "default":
78
+ # TODO(kszucs): do more sanitization
79
+ host = 'hdfs://{}'.format(host)
80
+
81
+ options.ConfigureEndPoint(tobytes(host), int(port))
82
+ options.ConfigureReplication(replication)
83
+ options.ConfigureBufferSize(buffer_size)
84
+
85
+ if user is not None:
86
+ options.ConfigureUser(tobytes(user))
87
+ if default_block_size is not None:
88
+ options.ConfigureBlockSize(default_block_size)
89
+ if kerb_ticket is not None:
90
+ options.ConfigureKerberosTicketCachePath(
91
+ tobytes(_stringify_path(kerb_ticket)))
92
+ if extra_conf is not None:
93
+ for k, v in extra_conf.items():
94
+ options.ConfigureExtraConf(tobytes(k), tobytes(v))
95
+
96
+ with nogil:
97
+ wrapped = GetResultValue(CHadoopFileSystem.Make(options))
98
+ self.init(<shared_ptr[CFileSystem]> wrapped)
99
+
100
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
101
+ FileSystem.init(self, wrapped)
102
+ self.hdfs = <CHadoopFileSystem*> wrapped.get()
103
+
104
+ @staticmethod
105
+ def from_uri(uri):
106
+ """
107
+ Instantiate HadoopFileSystem object from an URI string.
108
+
109
+ The following two calls are equivalent
110
+
111
+ * ``HadoopFileSystem.from_uri('hdfs://localhost:8020/?user=test\
112
+ &replication=1')``
113
+ * ``HadoopFileSystem('localhost', port=8020, user='test', \
114
+ replication=1)``
115
+
116
+ Parameters
117
+ ----------
118
+ uri : str
119
+ A string URI describing the connection to HDFS.
120
+ In order to change the user, replication, buffer_size or
121
+ default_block_size pass the values as query parts.
122
+
123
+ Returns
124
+ -------
125
+ HadoopFileSystem
126
+ """
127
+ cdef:
128
+ HadoopFileSystem self = HadoopFileSystem.__new__(HadoopFileSystem)
129
+ shared_ptr[CHadoopFileSystem] wrapped
130
+ CHdfsOptions options
131
+
132
+ options = GetResultValue(CHdfsOptions.FromUriString(tobytes(uri)))
133
+ with nogil:
134
+ wrapped = GetResultValue(CHadoopFileSystem.Make(options))
135
+
136
+ self.init(<shared_ptr[CFileSystem]> wrapped)
137
+ return self
138
+
139
+ @staticmethod
140
+ @binding(True) # Required for cython < 3
141
+ def _reconstruct(kwargs):
142
+ # __reduce__ doesn't allow passing named arguments directly to the
143
+ # reconstructor, hence this wrapper.
144
+ return HadoopFileSystem(**kwargs)
145
+
146
+ def __reduce__(self):
147
+ cdef CHdfsOptions opts = self.hdfs.options()
148
+ return (
149
+ HadoopFileSystem._reconstruct, (dict(
150
+ host=frombytes(opts.connection_config.host),
151
+ port=opts.connection_config.port,
152
+ user=frombytes(opts.connection_config.user),
153
+ replication=opts.replication,
154
+ buffer_size=opts.buffer_size,
155
+ default_block_size=opts.default_block_size,
156
+ kerb_ticket=frombytes(opts.connection_config.kerb_ticket),
157
+ extra_conf={frombytes(k): frombytes(v)
158
+ for k, v in opts.connection_config.extra_conf},
159
+ ),)
160
+ )
venv/lib/python3.10/site-packages/pyarrow/_json.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (108 kB). View file
 
venv/lib/python3.10/site-packages/pyarrow/_json.pxd ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.includes.libarrow cimport *
21
+ from pyarrow.lib cimport _Weakrefable
22
+
23
+
24
+ cdef class ParseOptions(_Weakrefable):
25
+ cdef:
26
+ CJSONParseOptions options
27
+
28
+ @staticmethod
29
+ cdef ParseOptions wrap(CJSONParseOptions options)
30
+
31
+ cdef class ReadOptions(_Weakrefable):
32
+ cdef:
33
+ CJSONReadOptions options
34
+
35
+ @staticmethod
36
+ cdef ReadOptions wrap(CJSONReadOptions options)
venv/lib/python3.10/site-packages/pyarrow/_json.pyx ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: profile=False
19
+ # distutils: language = c++
20
+ # cython: language_level = 3
21
+
22
+ from pyarrow.includes.common cimport *
23
+ from pyarrow.includes.libarrow cimport *
24
+ from pyarrow.lib cimport (_Weakrefable, MemoryPool,
25
+ maybe_unbox_memory_pool,
26
+ get_input_stream, pyarrow_wrap_table,
27
+ pyarrow_wrap_schema, pyarrow_unwrap_schema)
28
+
29
+
30
+ cdef class ReadOptions(_Weakrefable):
31
+ """
32
+ Options for reading JSON files.
33
+
34
+ Parameters
35
+ ----------
36
+ use_threads : bool, optional (default True)
37
+ Whether to use multiple threads to accelerate reading
38
+ block_size : int, optional
39
+ How much bytes to process at a time from the input stream.
40
+ This will determine multi-threading granularity as well as
41
+ the size of individual chunks in the Table.
42
+ """
43
+
44
+ # Avoid mistakingly creating attributes
45
+ __slots__ = ()
46
+
47
+ def __init__(self, use_threads=None, block_size=None):
48
+ self.options = CJSONReadOptions.Defaults()
49
+ if use_threads is not None:
50
+ self.use_threads = use_threads
51
+ if block_size is not None:
52
+ self.block_size = block_size
53
+
54
+ @property
55
+ def use_threads(self):
56
+ """
57
+ Whether to use multiple threads to accelerate reading.
58
+ """
59
+ return self.options.use_threads
60
+
61
+ @use_threads.setter
62
+ def use_threads(self, value):
63
+ self.options.use_threads = value
64
+
65
+ @property
66
+ def block_size(self):
67
+ """
68
+ How much bytes to process at a time from the input stream.
69
+
70
+ This will determine multi-threading granularity as well as the size of
71
+ individual chunks in the Table.
72
+ """
73
+ return self.options.block_size
74
+
75
+ @block_size.setter
76
+ def block_size(self, value):
77
+ self.options.block_size = value
78
+
79
+ def __reduce__(self):
80
+ return ReadOptions, (
81
+ self.use_threads,
82
+ self.block_size
83
+ )
84
+
85
+ def equals(self, ReadOptions other):
86
+ """
87
+ Parameters
88
+ ----------
89
+ other : pyarrow.json.ReadOptions
90
+
91
+ Returns
92
+ -------
93
+ bool
94
+ """
95
+ return (
96
+ self.use_threads == other.use_threads and
97
+ self.block_size == other.block_size
98
+ )
99
+
100
+ def __eq__(self, other):
101
+ try:
102
+ return self.equals(other)
103
+ except TypeError:
104
+ return False
105
+
106
+ @staticmethod
107
+ cdef ReadOptions wrap(CJSONReadOptions options):
108
+ out = ReadOptions()
109
+ out.options = options # shallow copy
110
+ return out
111
+
112
+
113
+ cdef class ParseOptions(_Weakrefable):
114
+ """
115
+ Options for parsing JSON files.
116
+
117
+ Parameters
118
+ ----------
119
+ explicit_schema : Schema, optional (default None)
120
+ Optional explicit schema (no type inference, ignores other fields).
121
+ newlines_in_values : bool, optional (default False)
122
+ Whether objects may be printed across multiple lines (for example
123
+ pretty printed). If false, input must end with an empty line.
124
+ unexpected_field_behavior : str, default "infer"
125
+ How JSON fields outside of explicit_schema (if given) are treated.
126
+
127
+ Possible behaviors:
128
+
129
+ - "ignore": unexpected JSON fields are ignored
130
+ - "error": error out on unexpected JSON fields
131
+ - "infer": unexpected JSON fields are type-inferred and included in
132
+ the output
133
+ """
134
+
135
+ __slots__ = ()
136
+
137
+ def __init__(self, explicit_schema=None, newlines_in_values=None,
138
+ unexpected_field_behavior=None):
139
+ self.options = CJSONParseOptions.Defaults()
140
+ if explicit_schema is not None:
141
+ self.explicit_schema = explicit_schema
142
+ if newlines_in_values is not None:
143
+ self.newlines_in_values = newlines_in_values
144
+ if unexpected_field_behavior is not None:
145
+ self.unexpected_field_behavior = unexpected_field_behavior
146
+
147
+ def __reduce__(self):
148
+ return ParseOptions, (
149
+ self.explicit_schema,
150
+ self.newlines_in_values,
151
+ self.unexpected_field_behavior
152
+ )
153
+
154
+ @property
155
+ def explicit_schema(self):
156
+ """
157
+ Optional explicit schema (no type inference, ignores other fields)
158
+ """
159
+ if self.options.explicit_schema.get() == NULL:
160
+ return None
161
+ else:
162
+ return pyarrow_wrap_schema(self.options.explicit_schema)
163
+
164
+ @explicit_schema.setter
165
+ def explicit_schema(self, value):
166
+ self.options.explicit_schema = pyarrow_unwrap_schema(value)
167
+
168
+ @property
169
+ def newlines_in_values(self):
170
+ """
171
+ Whether newline characters are allowed in JSON values.
172
+ Setting this to True reduces the performance of multi-threaded
173
+ JSON reading.
174
+ """
175
+ return self.options.newlines_in_values
176
+
177
+ @newlines_in_values.setter
178
+ def newlines_in_values(self, value):
179
+ self.options.newlines_in_values = value
180
+
181
+ @property
182
+ def unexpected_field_behavior(self):
183
+ """
184
+ How JSON fields outside of explicit_schema (if given) are treated.
185
+
186
+ Possible behaviors:
187
+
188
+ - "ignore": unexpected JSON fields are ignored
189
+ - "error": error out on unexpected JSON fields
190
+ - "infer": unexpected JSON fields are type-inferred and included in
191
+ the output
192
+
193
+ Set to "infer" by default.
194
+ """
195
+ v = self.options.unexpected_field_behavior
196
+ if v == CUnexpectedFieldBehavior_Ignore:
197
+ return "ignore"
198
+ elif v == CUnexpectedFieldBehavior_Error:
199
+ return "error"
200
+ elif v == CUnexpectedFieldBehavior_InferType:
201
+ return "infer"
202
+ else:
203
+ raise ValueError('Unexpected value for unexpected_field_behavior')
204
+
205
+ @unexpected_field_behavior.setter
206
+ def unexpected_field_behavior(self, value):
207
+ cdef CUnexpectedFieldBehavior v
208
+
209
+ if value == "ignore":
210
+ v = CUnexpectedFieldBehavior_Ignore
211
+ elif value == "error":
212
+ v = CUnexpectedFieldBehavior_Error
213
+ elif value == "infer":
214
+ v = CUnexpectedFieldBehavior_InferType
215
+ else:
216
+ raise ValueError(
217
+ "Unexpected value `{}` for `unexpected_field_behavior`, pass "
218
+ "either `ignore`, `error` or `infer`.".format(value)
219
+ )
220
+
221
+ self.options.unexpected_field_behavior = v
222
+
223
+ def equals(self, ParseOptions other):
224
+ """
225
+ Parameters
226
+ ----------
227
+ other : pyarrow.json.ParseOptions
228
+
229
+ Returns
230
+ -------
231
+ bool
232
+ """
233
+ return (
234
+ self.explicit_schema == other.explicit_schema and
235
+ self.newlines_in_values == other.newlines_in_values and
236
+ self.unexpected_field_behavior == other.unexpected_field_behavior
237
+ )
238
+
239
+ def __eq__(self, other):
240
+ try:
241
+ return self.equals(other)
242
+ except TypeError:
243
+ return False
244
+
245
+ @staticmethod
246
+ cdef ParseOptions wrap(CJSONParseOptions options):
247
+ out = ParseOptions()
248
+ out.options = options # shallow copy
249
+ return out
250
+
251
+
252
+ cdef _get_reader(input_file, shared_ptr[CInputStream]* out):
253
+ use_memory_map = False
254
+ get_input_stream(input_file, use_memory_map, out)
255
+
256
+ cdef _get_read_options(ReadOptions read_options, CJSONReadOptions* out):
257
+ if read_options is None:
258
+ out[0] = CJSONReadOptions.Defaults()
259
+ else:
260
+ out[0] = read_options.options
261
+
262
+ cdef _get_parse_options(ParseOptions parse_options, CJSONParseOptions* out):
263
+ if parse_options is None:
264
+ out[0] = CJSONParseOptions.Defaults()
265
+ else:
266
+ out[0] = parse_options.options
267
+
268
+
269
+ def read_json(input_file, read_options=None, parse_options=None,
270
+ MemoryPool memory_pool=None):
271
+ """
272
+ Read a Table from a stream of JSON data.
273
+
274
+ Parameters
275
+ ----------
276
+ input_file : str, path or file-like object
277
+ The location of JSON data. Currently only the line-delimited JSON
278
+ format is supported.
279
+ read_options : pyarrow.json.ReadOptions, optional
280
+ Options for the JSON reader (see ReadOptions constructor for defaults).
281
+ parse_options : pyarrow.json.ParseOptions, optional
282
+ Options for the JSON parser
283
+ (see ParseOptions constructor for defaults).
284
+ memory_pool : MemoryPool, optional
285
+ Pool to allocate Table memory from.
286
+
287
+ Returns
288
+ -------
289
+ :class:`pyarrow.Table`
290
+ Contents of the JSON file as a in-memory table.
291
+ """
292
+ cdef:
293
+ shared_ptr[CInputStream] stream
294
+ CJSONReadOptions c_read_options
295
+ CJSONParseOptions c_parse_options
296
+ shared_ptr[CJSONReader] reader
297
+ shared_ptr[CTable] table
298
+
299
+ _get_reader(input_file, &stream)
300
+ _get_read_options(read_options, &c_read_options)
301
+ _get_parse_options(parse_options, &c_parse_options)
302
+
303
+ reader = GetResultValue(
304
+ CJSONReader.Make(maybe_unbox_memory_pool(memory_pool),
305
+ stream, c_read_options, c_parse_options))
306
+
307
+ with nogil:
308
+ table = GetResultValue(reader.get().Read())
309
+
310
+ return pyarrow_wrap_table(table)
venv/lib/python3.10/site-packages/pyarrow/_orc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (204 kB). View file
 
venv/lib/python3.10/site-packages/pyarrow/_orc.pxd ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # distutils: language = c++
19
+ # cython: language_level = 3
20
+
21
+ from libcpp cimport bool as c_bool
22
+ from libc.string cimport const_char
23
+ from libcpp.vector cimport vector as std_vector
24
+ from pyarrow.includes.common cimport *
25
+ from pyarrow.includes.libarrow cimport (CArray, CSchema, CStatus,
26
+ CResult, CTable, CMemoryPool,
27
+ CKeyValueMetadata,
28
+ CRecordBatch,
29
+ CTable, CCompressionType,
30
+ CRandomAccessFile, COutputStream,
31
+ TimeUnit)
32
+
33
+ cdef extern from "arrow/adapters/orc/options.h" \
34
+ namespace "arrow::adapters::orc" nogil:
35
+ cdef enum CompressionStrategy \
36
+ " arrow::adapters::orc::CompressionStrategy":
37
+ _CompressionStrategy_SPEED \
38
+ " arrow::adapters::orc::CompressionStrategy::kSpeed"
39
+ _CompressionStrategy_COMPRESSION \
40
+ " arrow::adapters::orc::CompressionStrategy::kCompression"
41
+
42
+ cdef enum WriterId" arrow::adapters::orc::WriterId":
43
+ _WriterId_ORC_JAVA_WRITER" arrow::adapters::orc::WriterId::kOrcJava"
44
+ _WriterId_ORC_CPP_WRITER" arrow::adapters::orc::WriterId::kOrcCpp"
45
+ _WriterId_PRESTO_WRITER" arrow::adapters::orc::WriterId::kPresto"
46
+ _WriterId_SCRITCHLEY_GO \
47
+ " arrow::adapters::orc::WriterId::kScritchleyGo"
48
+ _WriterId_TRINO_WRITER" arrow::adapters::orc::WriterId::kTrino"
49
+ _WriterId_UNKNOWN_WRITER" arrow::adapters::orc::WriterId::kUnknown"
50
+
51
+ cdef enum WriterVersion" arrow::adapters::orc::WriterVersion":
52
+ _WriterVersion_ORIGINAL \
53
+ " arrow::adapters::orc::WriterVersion::kOriginal"
54
+ _WriterVersion_HIVE_8732 \
55
+ " arrow::adapters::orc::WriterVersion::kHive8732"
56
+ _WriterVersion_HIVE_4243 \
57
+ " arrow::adapters::orc::WriterVersion::kHive4243"
58
+ _WriterVersion_HIVE_12055 \
59
+ " arrow::adapters::orc::WriterVersion::kHive12055"
60
+ _WriterVersion_HIVE_13083 \
61
+ " arrow::adapters::orc::WriterVersion::kHive13083"
62
+ _WriterVersion_ORC_101" arrow::adapters::orc::WriterVersion::kOrc101"
63
+ _WriterVersion_ORC_135" arrow::adapters::orc::WriterVersion::kOrc135"
64
+ _WriterVersion_ORC_517" arrow::adapters::orc::WriterVersion::kOrc517"
65
+ _WriterVersion_ORC_203" arrow::adapters::orc::WriterVersion::kOrc203"
66
+ _WriterVersion_ORC_14" arrow::adapters::orc::WriterVersion::kOrc14"
67
+ _WriterVersion_MAX" arrow::adapters::orc::WriterVersion::kMax"
68
+
69
+ cdef cppclass FileVersion" arrow::adapters::orc::FileVersion":
70
+ FileVersion(uint32_t major_version, uint32_t minor_version)
71
+ uint32_t major_version()
72
+ uint32_t minor_version()
73
+ c_string ToString()
74
+
75
+ cdef struct WriteOptions" arrow::adapters::orc::WriteOptions":
76
+ int64_t batch_size
77
+ FileVersion file_version
78
+ int64_t stripe_size
79
+ CCompressionType compression
80
+ int64_t compression_block_size
81
+ CompressionStrategy compression_strategy
82
+ int64_t row_index_stride
83
+ double padding_tolerance
84
+ double dictionary_key_size_threshold
85
+ std_vector[int64_t] bloom_filter_columns
86
+ double bloom_filter_fpp
87
+
88
+
89
+ cdef extern from "arrow/adapters/orc/adapter.h" \
90
+ namespace "arrow::adapters::orc" nogil:
91
+
92
+ cdef cppclass ORCFileReader:
93
+ @staticmethod
94
+ CResult[unique_ptr[ORCFileReader]] Open(
95
+ const shared_ptr[CRandomAccessFile]& file,
96
+ CMemoryPool* pool)
97
+
98
+ CResult[shared_ptr[const CKeyValueMetadata]] ReadMetadata()
99
+
100
+ CResult[shared_ptr[CSchema]] ReadSchema()
101
+
102
+ CResult[shared_ptr[CRecordBatch]] ReadStripe(int64_t stripe)
103
+ CResult[shared_ptr[CRecordBatch]] ReadStripe(
104
+ int64_t stripe, std_vector[c_string])
105
+
106
+ CResult[shared_ptr[CTable]] Read()
107
+ CResult[shared_ptr[CTable]] Read(std_vector[c_string])
108
+
109
+ int64_t NumberOfStripes()
110
+ int64_t NumberOfRows()
111
+ FileVersion GetFileVersion()
112
+ c_string GetSoftwareVersion()
113
+ CResult[CCompressionType] GetCompression()
114
+ int64_t GetCompressionSize()
115
+ int64_t GetRowIndexStride()
116
+ WriterId GetWriterId()
117
+ int32_t GetWriterIdValue()
118
+ WriterVersion GetWriterVersion()
119
+ int64_t GetNumberOfStripeStatistics()
120
+ int64_t GetContentLength()
121
+ int64_t GetStripeStatisticsLength()
122
+ int64_t GetFileFooterLength()
123
+ int64_t GetFilePostscriptLength()
124
+ int64_t GetFileLength()
125
+ c_string GetSerializedFileTail()
126
+
127
+ cdef cppclass ORCFileWriter:
128
+ @staticmethod
129
+ CResult[unique_ptr[ORCFileWriter]] Open(
130
+ COutputStream* output_stream, const WriteOptions& writer_options)
131
+
132
+ CStatus Write(const CTable& table)
133
+
134
+ CStatus Close()
venv/lib/python3.10/site-packages/pyarrow/_orc.pyx ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: profile=False
19
+ # distutils: language = c++
20
+
21
+ from cython.operator cimport dereference as deref
22
+ from libcpp.vector cimport vector as std_vector
23
+ from libcpp.utility cimport move
24
+ from pyarrow.includes.common cimport *
25
+ from pyarrow.includes.libarrow cimport *
26
+ from pyarrow.lib cimport (check_status, _Weakrefable,
27
+ MemoryPool, maybe_unbox_memory_pool,
28
+ pyarrow_wrap_schema,
29
+ pyarrow_wrap_batch,
30
+ Table,
31
+ pyarrow_wrap_table,
32
+ pyarrow_wrap_metadata,
33
+ pyarrow_unwrap_table,
34
+ get_reader,
35
+ get_writer)
36
+ from pyarrow.lib import frombytes, tobytes
37
+ from pyarrow.util import _stringify_path
38
+
39
+
40
+ cdef compression_type_from_enum(CCompressionType compression_type):
41
+ compression_map = {
42
+ CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
43
+ CCompressionType_GZIP: 'ZLIB',
44
+ CCompressionType_SNAPPY: 'SNAPPY',
45
+ CCompressionType_LZ4: 'LZ4',
46
+ CCompressionType_ZSTD: 'ZSTD',
47
+ }
48
+ if compression_type in compression_map:
49
+ return compression_map[compression_type]
50
+ raise ValueError('Unsupported compression')
51
+
52
+
53
+ cdef CCompressionType compression_type_from_name(name) except *:
54
+ if not isinstance(name, str):
55
+ raise TypeError('compression must be a string')
56
+ name = name.upper()
57
+ if name == 'ZLIB':
58
+ return CCompressionType_GZIP
59
+ elif name == 'SNAPPY':
60
+ return CCompressionType_SNAPPY
61
+ elif name == 'LZ4':
62
+ return CCompressionType_LZ4
63
+ elif name == 'ZSTD':
64
+ return CCompressionType_ZSTD
65
+ elif name == 'UNCOMPRESSED':
66
+ return CCompressionType_UNCOMPRESSED
67
+ raise ValueError(f'Unknown CompressionKind: {name}')
68
+
69
+
70
+ cdef compression_strategy_from_enum(
71
+ CompressionStrategy compression_strategy
72
+ ):
73
+ compression_strategy_map = {
74
+ _CompressionStrategy_SPEED: 'SPEED',
75
+ _CompressionStrategy_COMPRESSION: 'COMPRESSION',
76
+ }
77
+ if compression_strategy in compression_strategy_map:
78
+ return compression_strategy_map[compression_strategy]
79
+ raise ValueError('Unsupported compression strategy')
80
+
81
+
82
+ cdef CompressionStrategy compression_strategy_from_name(name) except *:
83
+ if not isinstance(name, str):
84
+ raise TypeError('compression strategy must be a string')
85
+ name = name.upper()
86
+ if name == 'COMPRESSION':
87
+ return _CompressionStrategy_COMPRESSION
88
+ elif name == 'SPEED':
89
+ return _CompressionStrategy_SPEED
90
+ raise ValueError(f'Unknown CompressionStrategy: {name}')
91
+
92
+
93
+ cdef file_version_from_class(FileVersion file_version):
94
+ return frombytes(file_version.ToString())
95
+
96
+
97
+ cdef writer_id_from_enum(WriterId writer_id):
98
+ writer_id_map = {
99
+ _WriterId_ORC_JAVA_WRITER: 'ORC_JAVA',
100
+ _WriterId_ORC_CPP_WRITER: 'ORC_CPP',
101
+ _WriterId_PRESTO_WRITER: 'PRESTO',
102
+ _WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO',
103
+ _WriterId_TRINO_WRITER: 'TRINO',
104
+ }
105
+ if writer_id in writer_id_map:
106
+ return writer_id_map[writer_id]
107
+ raise ValueError('Unsupported writer ID')
108
+
109
+
110
+ cdef writer_version_from_enum(WriterVersion writer_version):
111
+ writer_version_map = {
112
+ _WriterVersion_ORIGINAL: 'ORIGINAL',
113
+ _WriterVersion_HIVE_8732: 'HIVE_8732',
114
+ _WriterVersion_HIVE_4243: 'HIVE_4243',
115
+ _WriterVersion_HIVE_12055: 'HIVE_12055',
116
+ _WriterVersion_HIVE_13083: 'HIVE_13083',
117
+ _WriterVersion_ORC_101: 'ORC_101',
118
+ _WriterVersion_ORC_135: 'ORC_135',
119
+ _WriterVersion_ORC_517: 'ORC_517',
120
+ _WriterVersion_ORC_203: 'ORC_203',
121
+ _WriterVersion_ORC_14: 'ORC_14',
122
+ }
123
+ if writer_version in writer_version_map:
124
+ return writer_version_map[writer_version]
125
+ raise ValueError('Unsupported writer version')
126
+
127
+
128
+ cdef shared_ptr[WriteOptions] _create_write_options(
129
+ file_version=None,
130
+ batch_size=None,
131
+ stripe_size=None,
132
+ compression=None,
133
+ compression_block_size=None,
134
+ compression_strategy=None,
135
+ row_index_stride=None,
136
+ padding_tolerance=None,
137
+ dictionary_key_size_threshold=None,
138
+ bloom_filter_columns=None,
139
+ bloom_filter_fpp=None
140
+ ) except *:
141
+ """General writer options"""
142
+ cdef:
143
+ shared_ptr[WriteOptions] options
144
+ options = make_shared[WriteOptions]()
145
+ # batch_size
146
+ if batch_size is not None:
147
+ if isinstance(batch_size, int) and batch_size > 0:
148
+ deref(options).batch_size = batch_size
149
+ else:
150
+ raise ValueError(f"Invalid ORC writer batch size: {batch_size}")
151
+ # file_version
152
+ if file_version is not None:
153
+ if file_version == "0.12":
154
+ deref(options).file_version = FileVersion(0, 12)
155
+ elif file_version == "0.11":
156
+ deref(options).file_version = FileVersion(0, 11)
157
+ else:
158
+ raise ValueError(f"Unsupported ORC file version: {file_version}")
159
+ # stripe_size
160
+ if stripe_size is not None:
161
+ if isinstance(stripe_size, int) and stripe_size > 0:
162
+ deref(options).stripe_size = stripe_size
163
+ else:
164
+ raise ValueError(f"Invalid ORC stripe size: {stripe_size}")
165
+ # compression
166
+ if compression is not None:
167
+ if isinstance(compression, str):
168
+ deref(options).compression = compression_type_from_name(
169
+ compression)
170
+ else:
171
+ raise TypeError("Unsupported ORC compression type: "
172
+ f"{compression}")
173
+ # compression_block_size
174
+ if compression_block_size is not None:
175
+ if (isinstance(compression_block_size, int) and
176
+ compression_block_size > 0):
177
+ deref(options).compression_block_size = compression_block_size
178
+ else:
179
+ raise ValueError("Invalid ORC compression block size: "
180
+ f"{compression_block_size}")
181
+ # compression_strategy
182
+ if compression_strategy is not None:
183
+ if isinstance(compression, str):
184
+ deref(options).compression_strategy = \
185
+ compression_strategy_from_name(compression_strategy)
186
+ else:
187
+ raise TypeError("Unsupported ORC compression strategy: "
188
+ f"{compression_strategy}")
189
+ # row_index_stride
190
+ if row_index_stride is not None:
191
+ if isinstance(row_index_stride, int) and row_index_stride > 0:
192
+ deref(options).row_index_stride = row_index_stride
193
+ else:
194
+ raise ValueError("Invalid ORC row index stride: "
195
+ f"{row_index_stride}")
196
+ # padding_tolerance
197
+ if padding_tolerance is not None:
198
+ try:
199
+ padding_tolerance = float(padding_tolerance)
200
+ deref(options).padding_tolerance = padding_tolerance
201
+ except Exception:
202
+ raise ValueError("Invalid ORC padding tolerance: "
203
+ f"{padding_tolerance}")
204
+ # dictionary_key_size_threshold
205
+ if dictionary_key_size_threshold is not None:
206
+ try:
207
+ dictionary_key_size_threshold = float(
208
+ dictionary_key_size_threshold)
209
+ assert 0 <= dictionary_key_size_threshold <= 1
210
+ deref(options).dictionary_key_size_threshold = \
211
+ dictionary_key_size_threshold
212
+ except Exception:
213
+ raise ValueError("Invalid ORC dictionary key size threshold: "
214
+ f"{dictionary_key_size_threshold}")
215
+ # bloom_filter_columns
216
+ if bloom_filter_columns is not None:
217
+ try:
218
+ bloom_filter_columns = list(bloom_filter_columns)
219
+ for col in bloom_filter_columns:
220
+ assert isinstance(col, int) and col >= 0
221
+ deref(options).bloom_filter_columns = bloom_filter_columns
222
+ except Exception:
223
+ raise ValueError("Invalid ORC BloomFilter columns: "
224
+ f"{bloom_filter_columns}")
225
+ # Max false positive rate of the Bloom Filter
226
+ if bloom_filter_fpp is not None:
227
+ try:
228
+ bloom_filter_fpp = float(bloom_filter_fpp)
229
+ assert 0 <= bloom_filter_fpp <= 1
230
+ deref(options).bloom_filter_fpp = bloom_filter_fpp
231
+ except Exception:
232
+ raise ValueError("Invalid ORC BloomFilter false positive rate: "
233
+ f"{bloom_filter_fpp}")
234
+ return options
235
+
236
+
237
+ cdef class ORCReader(_Weakrefable):
238
+ cdef:
239
+ object source
240
+ CMemoryPool* allocator
241
+ unique_ptr[ORCFileReader] reader
242
+
243
+ def __cinit__(self, MemoryPool memory_pool=None):
244
+ self.allocator = maybe_unbox_memory_pool(memory_pool)
245
+
246
+ def open(self, object source, c_bool use_memory_map=True):
247
+ cdef:
248
+ shared_ptr[CRandomAccessFile] rd_handle
249
+
250
+ self.source = source
251
+
252
+ get_reader(source, use_memory_map, &rd_handle)
253
+ with nogil:
254
+ self.reader = move(GetResultValue(
255
+ ORCFileReader.Open(rd_handle, self.allocator)
256
+ ))
257
+
258
+ def metadata(self):
259
+ """
260
+ The arrow metadata for this file.
261
+
262
+ Returns
263
+ -------
264
+ metadata : pyarrow.KeyValueMetadata
265
+ """
266
+ cdef:
267
+ shared_ptr[const CKeyValueMetadata] sp_arrow_metadata
268
+
269
+ with nogil:
270
+ sp_arrow_metadata = GetResultValue(
271
+ deref(self.reader).ReadMetadata()
272
+ )
273
+
274
+ return pyarrow_wrap_metadata(sp_arrow_metadata)
275
+
276
+ def schema(self):
277
+ """
278
+ The arrow schema for this file.
279
+
280
+ Returns
281
+ -------
282
+ schema : pyarrow.Schema
283
+ """
284
+ cdef:
285
+ shared_ptr[CSchema] sp_arrow_schema
286
+
287
+ with nogil:
288
+ sp_arrow_schema = GetResultValue(deref(self.reader).ReadSchema())
289
+
290
+ return pyarrow_wrap_schema(sp_arrow_schema)
291
+
292
+ def nrows(self):
293
+ return deref(self.reader).NumberOfRows()
294
+
295
+ def nstripes(self):
296
+ return deref(self.reader).NumberOfStripes()
297
+
298
+ def file_version(self):
299
+ return file_version_from_class(deref(self.reader).GetFileVersion())
300
+
301
+ def software_version(self):
302
+ return frombytes(deref(self.reader).GetSoftwareVersion())
303
+
304
+ def compression(self):
305
+ return compression_type_from_enum(
306
+ GetResultValue(deref(self.reader).GetCompression()))
307
+
308
+ def compression_size(self):
309
+ return deref(self.reader).GetCompressionSize()
310
+
311
+ def row_index_stride(self):
312
+ return deref(self.reader).GetRowIndexStride()
313
+
314
+ def writer(self):
315
+ writer_name = writer_id_from_enum(deref(self.reader).GetWriterId())
316
+ if writer_name == 'UNKNOWN':
317
+ return deref(self.reader).GetWriterIdValue()
318
+ else:
319
+ return writer_name
320
+
321
+ def writer_version(self):
322
+ return writer_version_from_enum(deref(self.reader).GetWriterVersion())
323
+
324
+ def nstripe_statistics(self):
325
+ return deref(self.reader).GetNumberOfStripeStatistics()
326
+
327
+ def content_length(self):
328
+ return deref(self.reader).GetContentLength()
329
+
330
+ def stripe_statistics_length(self):
331
+ return deref(self.reader).GetStripeStatisticsLength()
332
+
333
+ def file_footer_length(self):
334
+ return deref(self.reader).GetFileFooterLength()
335
+
336
+ def file_postscript_length(self):
337
+ return deref(self.reader).GetFilePostscriptLength()
338
+
339
+ def file_length(self):
340
+ return deref(self.reader).GetFileLength()
341
+
342
+ def serialized_file_tail(self):
343
+ return deref(self.reader).GetSerializedFileTail()
344
+
345
+ def read_stripe(self, n, columns=None):
346
+ cdef:
347
+ shared_ptr[CRecordBatch] sp_record_batch
348
+ int64_t stripe
349
+ std_vector[c_string] c_names
350
+
351
+ stripe = n
352
+
353
+ if columns is None:
354
+ with nogil:
355
+ sp_record_batch = GetResultValue(
356
+ deref(self.reader).ReadStripe(stripe)
357
+ )
358
+ else:
359
+ c_names = [tobytes(name) for name in columns]
360
+ with nogil:
361
+ sp_record_batch = GetResultValue(
362
+ deref(self.reader).ReadStripe(stripe, c_names)
363
+ )
364
+
365
+ return pyarrow_wrap_batch(sp_record_batch)
366
+
367
+ def read(self, columns=None):
368
+ cdef:
369
+ shared_ptr[CTable] sp_table
370
+ std_vector[c_string] c_names
371
+
372
+ if columns is None:
373
+ with nogil:
374
+ sp_table = GetResultValue(deref(self.reader).Read())
375
+ else:
376
+ c_names = [tobytes(name) for name in columns]
377
+ with nogil:
378
+ sp_table = GetResultValue(deref(self.reader).Read(c_names))
379
+
380
+ return pyarrow_wrap_table(sp_table)
381
+
382
+
383
+ cdef class ORCWriter(_Weakrefable):
384
+ cdef:
385
+ unique_ptr[ORCFileWriter] writer
386
+ shared_ptr[COutputStream] sink
387
+ c_bool own_sink
388
+
389
+ def open(self, object where, *,
390
+ file_version=None,
391
+ batch_size=None,
392
+ stripe_size=None,
393
+ compression=None,
394
+ compression_block_size=None,
395
+ compression_strategy=None,
396
+ row_index_stride=None,
397
+ padding_tolerance=None,
398
+ dictionary_key_size_threshold=None,
399
+ bloom_filter_columns=None,
400
+ bloom_filter_fpp=None):
401
+ cdef:
402
+ shared_ptr[WriteOptions] write_options
403
+ c_string c_where
404
+ try:
405
+ where = _stringify_path(where)
406
+ except TypeError:
407
+ get_writer(where, &self.sink)
408
+ self.own_sink = False
409
+ else:
410
+ c_where = tobytes(where)
411
+ with nogil:
412
+ self.sink = GetResultValue(FileOutputStream.Open(c_where))
413
+ self.own_sink = True
414
+
415
+ write_options = _create_write_options(
416
+ file_version=file_version,
417
+ batch_size=batch_size,
418
+ stripe_size=stripe_size,
419
+ compression=compression,
420
+ compression_block_size=compression_block_size,
421
+ compression_strategy=compression_strategy,
422
+ row_index_stride=row_index_stride,
423
+ padding_tolerance=padding_tolerance,
424
+ dictionary_key_size_threshold=dictionary_key_size_threshold,
425
+ bloom_filter_columns=bloom_filter_columns,
426
+ bloom_filter_fpp=bloom_filter_fpp
427
+ )
428
+
429
+ with nogil:
430
+ self.writer = move(GetResultValue(
431
+ ORCFileWriter.Open(self.sink.get(),
432
+ deref(write_options))))
433
+
434
+ def write(self, Table table):
435
+ cdef:
436
+ shared_ptr[CTable] sp_table
437
+ sp_table = pyarrow_unwrap_table(table)
438
+ with nogil:
439
+ check_status(deref(self.writer).Write(deref(sp_table)))
440
+
441
+ def close(self):
442
+ with nogil:
443
+ check_status(deref(self.writer).Close())
444
+ if self.own_sink:
445
+ check_status(deref(self.sink).Close())
venv/lib/python3.10/site-packages/pyarrow/_parquet.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (593 kB). View file
 
venv/lib/python3.10/site-packages/pyarrow/_parquet.pxd ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # distutils: language = c++
19
+ # cython: language_level = 3
20
+
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport (CChunkedArray, CScalar, CSchema, CStatus,
23
+ CTable, CMemoryPool, CBuffer,
24
+ CKeyValueMetadata, CRandomAccessFile,
25
+ COutputStream, CCacheOptions,
26
+ TimeUnit, CRecordBatchReader)
27
+ from pyarrow.lib cimport _Weakrefable
28
+
29
+
30
+ cdef extern from "parquet/api/schema.h" namespace "parquet::schema" nogil:
31
+ cdef cppclass Node:
32
+ pass
33
+
34
+ cdef cppclass GroupNode(Node):
35
+ pass
36
+
37
+ cdef cppclass PrimitiveNode(Node):
38
+ pass
39
+
40
+ cdef cppclass ColumnPath:
41
+ c_string ToDotString()
42
+ vector[c_string] ToDotVector()
43
+
44
+
45
+ cdef extern from "parquet/api/schema.h" namespace "parquet" nogil:
46
+ enum ParquetType" parquet::Type::type":
47
+ ParquetType_BOOLEAN" parquet::Type::BOOLEAN"
48
+ ParquetType_INT32" parquet::Type::INT32"
49
+ ParquetType_INT64" parquet::Type::INT64"
50
+ ParquetType_INT96" parquet::Type::INT96"
51
+ ParquetType_FLOAT" parquet::Type::FLOAT"
52
+ ParquetType_DOUBLE" parquet::Type::DOUBLE"
53
+ ParquetType_BYTE_ARRAY" parquet::Type::BYTE_ARRAY"
54
+ ParquetType_FIXED_LEN_BYTE_ARRAY" parquet::Type::FIXED_LEN_BYTE_ARRAY"
55
+
56
+ enum ParquetLogicalTypeId" parquet::LogicalType::Type::type":
57
+ ParquetLogicalType_UNDEFINED" parquet::LogicalType::Type::UNDEFINED"
58
+ ParquetLogicalType_STRING" parquet::LogicalType::Type::STRING"
59
+ ParquetLogicalType_MAP" parquet::LogicalType::Type::MAP"
60
+ ParquetLogicalType_LIST" parquet::LogicalType::Type::LIST"
61
+ ParquetLogicalType_ENUM" parquet::LogicalType::Type::ENUM"
62
+ ParquetLogicalType_DECIMAL" parquet::LogicalType::Type::DECIMAL"
63
+ ParquetLogicalType_DATE" parquet::LogicalType::Type::DATE"
64
+ ParquetLogicalType_TIME" parquet::LogicalType::Type::TIME"
65
+ ParquetLogicalType_TIMESTAMP" parquet::LogicalType::Type::TIMESTAMP"
66
+ ParquetLogicalType_INT" parquet::LogicalType::Type::INT"
67
+ ParquetLogicalType_JSON" parquet::LogicalType::Type::JSON"
68
+ ParquetLogicalType_BSON" parquet::LogicalType::Type::BSON"
69
+ ParquetLogicalType_UUID" parquet::LogicalType::Type::UUID"
70
+ ParquetLogicalType_NONE" parquet::LogicalType::Type::NONE"
71
+
72
+ enum ParquetTimeUnit" parquet::LogicalType::TimeUnit::unit":
73
+ ParquetTimeUnit_UNKNOWN" parquet::LogicalType::TimeUnit::UNKNOWN"
74
+ ParquetTimeUnit_MILLIS" parquet::LogicalType::TimeUnit::MILLIS"
75
+ ParquetTimeUnit_MICROS" parquet::LogicalType::TimeUnit::MICROS"
76
+ ParquetTimeUnit_NANOS" parquet::LogicalType::TimeUnit::NANOS"
77
+
78
+ enum ParquetConvertedType" parquet::ConvertedType::type":
79
+ ParquetConvertedType_NONE" parquet::ConvertedType::NONE"
80
+ ParquetConvertedType_UTF8" parquet::ConvertedType::UTF8"
81
+ ParquetConvertedType_MAP" parquet::ConvertedType::MAP"
82
+ ParquetConvertedType_MAP_KEY_VALUE \
83
+ " parquet::ConvertedType::MAP_KEY_VALUE"
84
+ ParquetConvertedType_LIST" parquet::ConvertedType::LIST"
85
+ ParquetConvertedType_ENUM" parquet::ConvertedType::ENUM"
86
+ ParquetConvertedType_DECIMAL" parquet::ConvertedType::DECIMAL"
87
+ ParquetConvertedType_DATE" parquet::ConvertedType::DATE"
88
+ ParquetConvertedType_TIME_MILLIS" parquet::ConvertedType::TIME_MILLIS"
89
+ ParquetConvertedType_TIME_MICROS" parquet::ConvertedType::TIME_MICROS"
90
+ ParquetConvertedType_TIMESTAMP_MILLIS \
91
+ " parquet::ConvertedType::TIMESTAMP_MILLIS"
92
+ ParquetConvertedType_TIMESTAMP_MICROS \
93
+ " parquet::ConvertedType::TIMESTAMP_MICROS"
94
+ ParquetConvertedType_UINT_8" parquet::ConvertedType::UINT_8"
95
+ ParquetConvertedType_UINT_16" parquet::ConvertedType::UINT_16"
96
+ ParquetConvertedType_UINT_32" parquet::ConvertedType::UINT_32"
97
+ ParquetConvertedType_UINT_64" parquet::ConvertedType::UINT_64"
98
+ ParquetConvertedType_INT_8" parquet::ConvertedType::INT_8"
99
+ ParquetConvertedType_INT_16" parquet::ConvertedType::INT_16"
100
+ ParquetConvertedType_INT_32" parquet::ConvertedType::INT_32"
101
+ ParquetConvertedType_INT_64" parquet::ConvertedType::INT_64"
102
+ ParquetConvertedType_JSON" parquet::ConvertedType::JSON"
103
+ ParquetConvertedType_BSON" parquet::ConvertedType::BSON"
104
+ ParquetConvertedType_INTERVAL" parquet::ConvertedType::INTERVAL"
105
+
106
+ enum ParquetRepetition" parquet::Repetition::type":
107
+ ParquetRepetition_REQUIRED" parquet::REPETITION::REQUIRED"
108
+ ParquetRepetition_OPTIONAL" parquet::REPETITION::OPTIONAL"
109
+ ParquetRepetition_REPEATED" parquet::REPETITION::REPEATED"
110
+
111
+ enum ParquetEncoding" parquet::Encoding::type":
112
+ ParquetEncoding_PLAIN" parquet::Encoding::PLAIN"
113
+ ParquetEncoding_PLAIN_DICTIONARY" parquet::Encoding::PLAIN_DICTIONARY"
114
+ ParquetEncoding_RLE" parquet::Encoding::RLE"
115
+ ParquetEncoding_BIT_PACKED" parquet::Encoding::BIT_PACKED"
116
+ ParquetEncoding_DELTA_BINARY_PACKED \
117
+ " parquet::Encoding::DELTA_BINARY_PACKED"
118
+ ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY \
119
+ " parquet::Encoding::DELTA_LENGTH_BYTE_ARRAY"
120
+ ParquetEncoding_DELTA_BYTE_ARRAY" parquet::Encoding::DELTA_BYTE_ARRAY"
121
+ ParquetEncoding_RLE_DICTIONARY" parquet::Encoding::RLE_DICTIONARY"
122
+ ParquetEncoding_BYTE_STREAM_SPLIT \
123
+ " parquet::Encoding::BYTE_STREAM_SPLIT"
124
+
125
+ enum ParquetCompression" parquet::Compression::type":
126
+ ParquetCompression_UNCOMPRESSED" parquet::Compression::UNCOMPRESSED"
127
+ ParquetCompression_SNAPPY" parquet::Compression::SNAPPY"
128
+ ParquetCompression_GZIP" parquet::Compression::GZIP"
129
+ ParquetCompression_LZO" parquet::Compression::LZO"
130
+ ParquetCompression_BROTLI" parquet::Compression::BROTLI"
131
+ ParquetCompression_LZ4" parquet::Compression::LZ4"
132
+ ParquetCompression_ZSTD" parquet::Compression::ZSTD"
133
+
134
+ enum ParquetVersion" parquet::ParquetVersion::type":
135
+ ParquetVersion_V1" parquet::ParquetVersion::PARQUET_1_0"
136
+ ParquetVersion_V2_0" parquet::ParquetVersion::PARQUET_2_0"
137
+ ParquetVersion_V2_4" parquet::ParquetVersion::PARQUET_2_4"
138
+ ParquetVersion_V2_6" parquet::ParquetVersion::PARQUET_2_6"
139
+
140
+ enum ParquetSortOrder" parquet::SortOrder::type":
141
+ ParquetSortOrder_SIGNED" parquet::SortOrder::SIGNED"
142
+ ParquetSortOrder_UNSIGNED" parquet::SortOrder::UNSIGNED"
143
+ ParquetSortOrder_UNKNOWN" parquet::SortOrder::UNKNOWN"
144
+
145
+ cdef cppclass CParquetLogicalType" parquet::LogicalType":
146
+ c_string ToString() const
147
+ c_string ToJSON() const
148
+ ParquetLogicalTypeId type() const
149
+
150
+ cdef cppclass CParquetDecimalType \
151
+ " parquet::DecimalLogicalType"(CParquetLogicalType):
152
+ int32_t precision() const
153
+ int32_t scale() const
154
+
155
+ cdef cppclass CParquetIntType \
156
+ " parquet::IntLogicalType"(CParquetLogicalType):
157
+ int bit_width() const
158
+ c_bool is_signed() const
159
+
160
+ cdef cppclass CParquetTimeType \
161
+ " parquet::TimeLogicalType"(CParquetLogicalType):
162
+ c_bool is_adjusted_to_utc() const
163
+ ParquetTimeUnit time_unit() const
164
+
165
+ cdef cppclass CParquetTimestampType \
166
+ " parquet::TimestampLogicalType"(CParquetLogicalType):
167
+ c_bool is_adjusted_to_utc() const
168
+ ParquetTimeUnit time_unit() const
169
+
170
+ cdef cppclass ColumnDescriptor" parquet::ColumnDescriptor":
171
+ c_bool Equals(const ColumnDescriptor& other)
172
+
173
+ shared_ptr[ColumnPath] path()
174
+ int16_t max_definition_level()
175
+ int16_t max_repetition_level()
176
+
177
+ ParquetType physical_type()
178
+ const shared_ptr[const CParquetLogicalType]& logical_type()
179
+ ParquetConvertedType converted_type()
180
+ const c_string& name()
181
+ int type_length()
182
+ int type_precision()
183
+ int type_scale()
184
+
185
+ cdef cppclass SchemaDescriptor:
186
+ const ColumnDescriptor* Column(int i)
187
+ shared_ptr[Node] schema()
188
+ GroupNode* group()
189
+ c_bool Equals(const SchemaDescriptor& other)
190
+ c_string ToString()
191
+ int num_columns()
192
+
193
+ cdef c_string FormatStatValue(ParquetType parquet_type, c_string val)
194
+
195
+ enum ParquetCipher" parquet::ParquetCipher::type":
196
+ ParquetCipher_AES_GCM_V1" parquet::ParquetCipher::AES_GCM_V1"
197
+ ParquetCipher_AES_GCM_CTR_V1" parquet::ParquetCipher::AES_GCM_CTR_V1"
198
+
199
+ struct AadMetadata:
200
+ c_string aad_prefix
201
+ c_string aad_file_unique
202
+ c_bool supply_aad_prefix
203
+
204
+ struct EncryptionAlgorithm:
205
+ ParquetCipher algorithm
206
+ AadMetadata aad
207
+
208
+ cdef extern from "parquet/api/reader.h" namespace "parquet" nogil:
209
+ cdef cppclass ColumnReader:
210
+ pass
211
+
212
+ cdef cppclass BoolReader(ColumnReader):
213
+ pass
214
+
215
+ cdef cppclass Int32Reader(ColumnReader):
216
+ pass
217
+
218
+ cdef cppclass Int64Reader(ColumnReader):
219
+ pass
220
+
221
+ cdef cppclass Int96Reader(ColumnReader):
222
+ pass
223
+
224
+ cdef cppclass FloatReader(ColumnReader):
225
+ pass
226
+
227
+ cdef cppclass DoubleReader(ColumnReader):
228
+ pass
229
+
230
+ cdef cppclass ByteArrayReader(ColumnReader):
231
+ pass
232
+
233
+ cdef cppclass RowGroupReader:
234
+ pass
235
+
236
+ cdef cppclass CEncodedStatistics" parquet::EncodedStatistics":
237
+ const c_string& max() const
238
+ const c_string& min() const
239
+ int64_t null_count
240
+ int64_t distinct_count
241
+ bint has_min
242
+ bint has_max
243
+ bint has_null_count
244
+ bint has_distinct_count
245
+
246
+ cdef cppclass ParquetByteArray" parquet::ByteArray":
247
+ uint32_t len
248
+ const uint8_t* ptr
249
+
250
+ cdef cppclass ParquetFLBA" parquet::FLBA":
251
+ const uint8_t* ptr
252
+
253
+ cdef cppclass CStatistics" parquet::Statistics":
254
+ int64_t null_count() const
255
+ int64_t distinct_count() const
256
+ int64_t num_values() const
257
+ bint HasMinMax()
258
+ bint HasNullCount()
259
+ bint HasDistinctCount()
260
+ c_bool Equals(const CStatistics&) const
261
+ void Reset()
262
+ c_string EncodeMin()
263
+ c_string EncodeMax()
264
+ CEncodedStatistics Encode()
265
+ void SetComparator()
266
+ ParquetType physical_type() const
267
+ const ColumnDescriptor* descr() const
268
+
269
+ cdef cppclass CBoolStatistics" parquet::BoolStatistics"(CStatistics):
270
+ c_bool min()
271
+ c_bool max()
272
+
273
+ cdef cppclass CInt32Statistics" parquet::Int32Statistics"(CStatistics):
274
+ int32_t min()
275
+ int32_t max()
276
+
277
+ cdef cppclass CInt64Statistics" parquet::Int64Statistics"(CStatistics):
278
+ int64_t min()
279
+ int64_t max()
280
+
281
+ cdef cppclass CFloatStatistics" parquet::FloatStatistics"(CStatistics):
282
+ float min()
283
+ float max()
284
+
285
+ cdef cppclass CDoubleStatistics" parquet::DoubleStatistics"(CStatistics):
286
+ double min()
287
+ double max()
288
+
289
+ cdef cppclass CByteArrayStatistics \
290
+ " parquet::ByteArrayStatistics"(CStatistics):
291
+ ParquetByteArray min()
292
+ ParquetByteArray max()
293
+
294
+ cdef cppclass CFLBAStatistics" parquet::FLBAStatistics"(CStatistics):
295
+ ParquetFLBA min()
296
+ ParquetFLBA max()
297
+
298
+ cdef cppclass CColumnCryptoMetaData" parquet::ColumnCryptoMetaData":
299
+ shared_ptr[ColumnPath] path_in_schema() const
300
+ c_bool encrypted_with_footer_key() const
301
+ const c_string& key_metadata() const
302
+
303
+ cdef cppclass ParquetIndexLocation" parquet::IndexLocation":
304
+ int64_t offset
305
+ int32_t length
306
+
307
+ cdef cppclass CColumnChunkMetaData" parquet::ColumnChunkMetaData":
308
+ int64_t file_offset() const
309
+ const c_string& file_path() const
310
+
311
+ c_bool is_metadata_set() const
312
+ ParquetType type() const
313
+ int64_t num_values() const
314
+ shared_ptr[ColumnPath] path_in_schema() const
315
+ bint is_stats_set() const
316
+ shared_ptr[CStatistics] statistics() const
317
+ ParquetCompression compression() const
318
+ const vector[ParquetEncoding]& encodings() const
319
+ c_bool Equals(const CColumnChunkMetaData&) const
320
+
321
+ int64_t has_dictionary_page() const
322
+ int64_t dictionary_page_offset() const
323
+ int64_t data_page_offset() const
324
+ int64_t index_page_offset() const
325
+ int64_t total_compressed_size() const
326
+ int64_t total_uncompressed_size() const
327
+ unique_ptr[CColumnCryptoMetaData] crypto_metadata() const
328
+ optional[ParquetIndexLocation] GetColumnIndexLocation() const
329
+ optional[ParquetIndexLocation] GetOffsetIndexLocation() const
330
+
331
+ struct CSortingColumn" parquet::SortingColumn":
332
+ int column_idx
333
+ c_bool descending
334
+ c_bool nulls_first
335
+
336
+ cdef cppclass CRowGroupMetaData" parquet::RowGroupMetaData":
337
+ c_bool Equals(const CRowGroupMetaData&) const
338
+ int num_columns() const
339
+ int64_t num_rows() const
340
+ int64_t total_byte_size() const
341
+ vector[CSortingColumn] sorting_columns() const
342
+ unique_ptr[CColumnChunkMetaData] ColumnChunk(int i) const
343
+
344
+ cdef cppclass CFileMetaData" parquet::FileMetaData":
345
+ c_bool Equals(const CFileMetaData&) const
346
+ uint32_t size()
347
+ int num_columns()
348
+ int64_t num_rows()
349
+ int num_row_groups()
350
+ ParquetVersion version()
351
+ const c_string created_by()
352
+ int num_schema_elements()
353
+
354
+ void set_file_path(const c_string& path)
355
+ void AppendRowGroups(const CFileMetaData& other) except +
356
+
357
+ unique_ptr[CRowGroupMetaData] RowGroup(int i)
358
+ const SchemaDescriptor* schema()
359
+ shared_ptr[const CKeyValueMetadata] key_value_metadata() const
360
+ void WriteTo(COutputStream* dst) const
361
+
362
+ inline c_bool is_encryption_algorithm_set() const
363
+ inline EncryptionAlgorithm encryption_algorithm() const
364
+ inline const c_string& footer_signing_key_metadata() const
365
+
366
+ cdef shared_ptr[CFileMetaData] CFileMetaData_Make \
367
+ " parquet::FileMetaData::Make"(const void* serialized_metadata,
368
+ uint32_t* metadata_len)
369
+
370
+ cdef cppclass CReaderProperties" parquet::ReaderProperties":
371
+ c_bool is_buffered_stream_enabled() const
372
+ void enable_buffered_stream()
373
+ void disable_buffered_stream()
374
+
375
+ void set_buffer_size(int64_t buf_size)
376
+ int64_t buffer_size() const
377
+
378
+ void set_thrift_string_size_limit(int32_t size)
379
+ int32_t thrift_string_size_limit() const
380
+
381
+ void set_thrift_container_size_limit(int32_t size)
382
+ int32_t thrift_container_size_limit() const
383
+
384
+ void file_decryption_properties(shared_ptr[CFileDecryptionProperties]
385
+ decryption)
386
+ shared_ptr[CFileDecryptionProperties] file_decryption_properties() \
387
+ const
388
+
389
+ c_bool page_checksum_verification() const
390
+ void set_page_checksum_verification(c_bool check_crc)
391
+
392
+ CReaderProperties default_reader_properties()
393
+
394
+ cdef cppclass ArrowReaderProperties:
395
+ ArrowReaderProperties()
396
+ void set_read_dictionary(int column_index, c_bool read_dict)
397
+ c_bool read_dictionary()
398
+ void set_batch_size(int64_t batch_size)
399
+ int64_t batch_size()
400
+ void set_pre_buffer(c_bool pre_buffer)
401
+ c_bool pre_buffer() const
402
+ void set_cache_options(CCacheOptions options)
403
+ CCacheOptions cache_options() const
404
+ void set_coerce_int96_timestamp_unit(TimeUnit unit)
405
+ TimeUnit coerce_int96_timestamp_unit() const
406
+
407
+ ArrowReaderProperties default_arrow_reader_properties()
408
+
409
+ cdef cppclass ParquetFileReader:
410
+ shared_ptr[CFileMetaData] metadata()
411
+
412
+
413
+ cdef extern from "parquet/api/writer.h" namespace "parquet" nogil:
414
+ cdef cppclass WriterProperties:
415
+ cppclass Builder:
416
+ Builder* data_page_version(ParquetDataPageVersion version)
417
+ Builder* version(ParquetVersion version)
418
+ Builder* compression(ParquetCompression codec)
419
+ Builder* compression(const c_string& path,
420
+ ParquetCompression codec)
421
+ Builder* compression_level(int compression_level)
422
+ Builder* compression_level(const c_string& path,
423
+ int compression_level)
424
+ Builder* encryption(
425
+ shared_ptr[CFileEncryptionProperties]
426
+ file_encryption_properties)
427
+ Builder* disable_dictionary()
428
+ Builder* enable_dictionary()
429
+ Builder* enable_dictionary(const c_string& path)
430
+ Builder* set_sorting_columns(vector[CSortingColumn] sorting_columns)
431
+ Builder* disable_statistics()
432
+ Builder* enable_statistics()
433
+ Builder* enable_statistics(const c_string& path)
434
+ Builder* data_pagesize(int64_t size)
435
+ Builder* encoding(ParquetEncoding encoding)
436
+ Builder* encoding(const c_string& path,
437
+ ParquetEncoding encoding)
438
+ Builder* max_row_group_length(int64_t size)
439
+ Builder* write_batch_size(int64_t batch_size)
440
+ Builder* dictionary_pagesize_limit(int64_t dictionary_pagesize_limit)
441
+ Builder* enable_write_page_index()
442
+ Builder* disable_write_page_index()
443
+ Builder* enable_page_checksum()
444
+ Builder* disable_page_checksum()
445
+ shared_ptr[WriterProperties] build()
446
+
447
+ cdef cppclass ArrowWriterProperties:
448
+ cppclass Builder:
449
+ Builder()
450
+ Builder* disable_deprecated_int96_timestamps()
451
+ Builder* enable_deprecated_int96_timestamps()
452
+ Builder* coerce_timestamps(TimeUnit unit)
453
+ Builder* allow_truncated_timestamps()
454
+ Builder* disallow_truncated_timestamps()
455
+ Builder* store_schema()
456
+ Builder* enable_compliant_nested_types()
457
+ Builder* disable_compliant_nested_types()
458
+ Builder* set_engine_version(ArrowWriterEngineVersion version)
459
+ shared_ptr[ArrowWriterProperties] build()
460
+ c_bool support_deprecated_int96_timestamps()
461
+
462
+
463
+ cdef extern from "parquet/arrow/reader.h" namespace "parquet::arrow" nogil:
464
+ cdef cppclass FileReader:
465
+ FileReader(CMemoryPool* pool, unique_ptr[ParquetFileReader] reader)
466
+
467
+ CStatus GetSchema(shared_ptr[CSchema]* out)
468
+
469
+ CStatus ReadColumn(int i, shared_ptr[CChunkedArray]* out)
470
+ CStatus ReadSchemaField(int i, shared_ptr[CChunkedArray]* out)
471
+
472
+ int num_row_groups()
473
+ CStatus ReadRowGroup(int i, shared_ptr[CTable]* out)
474
+ CStatus ReadRowGroup(int i, const vector[int]& column_indices,
475
+ shared_ptr[CTable]* out)
476
+
477
+ CStatus ReadRowGroups(const vector[int]& row_groups,
478
+ shared_ptr[CTable]* out)
479
+ CStatus ReadRowGroups(const vector[int]& row_groups,
480
+ const vector[int]& column_indices,
481
+ shared_ptr[CTable]* out)
482
+
483
+ CStatus GetRecordBatchReader(const vector[int]& row_group_indices,
484
+ const vector[int]& column_indices,
485
+ unique_ptr[CRecordBatchReader]* out)
486
+ CStatus GetRecordBatchReader(const vector[int]& row_group_indices,
487
+ unique_ptr[CRecordBatchReader]* out)
488
+
489
+ CStatus ReadTable(shared_ptr[CTable]* out)
490
+ CStatus ReadTable(const vector[int]& column_indices,
491
+ shared_ptr[CTable]* out)
492
+
493
+ CStatus ScanContents(vector[int] columns, int32_t column_batch_size,
494
+ int64_t* num_rows)
495
+
496
+ const ParquetFileReader* parquet_reader()
497
+
498
+ void set_use_threads(c_bool use_threads)
499
+
500
+ void set_batch_size(int64_t batch_size)
501
+
502
+ cdef cppclass FileReaderBuilder:
503
+ FileReaderBuilder()
504
+ CStatus Open(const shared_ptr[CRandomAccessFile]& file,
505
+ const CReaderProperties& properties,
506
+ const shared_ptr[CFileMetaData]& metadata)
507
+
508
+ ParquetFileReader* raw_reader()
509
+ FileReaderBuilder* memory_pool(CMemoryPool*)
510
+ FileReaderBuilder* properties(const ArrowReaderProperties&)
511
+ CStatus Build(unique_ptr[FileReader]* out)
512
+
513
+ CStatus FromParquetSchema(
514
+ const SchemaDescriptor* parquet_schema,
515
+ const ArrowReaderProperties& properties,
516
+ const shared_ptr[const CKeyValueMetadata]& key_value_metadata,
517
+ shared_ptr[CSchema]* out)
518
+
519
+ CStatus StatisticsAsScalars(const CStatistics& Statistics,
520
+ shared_ptr[CScalar]* min,
521
+ shared_ptr[CScalar]* max)
522
+
523
+ cdef extern from "parquet/arrow/schema.h" namespace "parquet::arrow" nogil:
524
+
525
+ CStatus ToParquetSchema(
526
+ const CSchema* arrow_schema,
527
+ const WriterProperties& properties,
528
+ const ArrowWriterProperties& arrow_properties,
529
+ shared_ptr[SchemaDescriptor]* out)
530
+
531
+
532
+ cdef extern from "parquet/properties.h" namespace "parquet" nogil:
533
+ cdef enum ArrowWriterEngineVersion:
534
+ V1 "parquet::ArrowWriterProperties::V1",
535
+ V2 "parquet::ArrowWriterProperties::V2"
536
+
537
+ cdef cppclass ParquetDataPageVersion:
538
+ pass
539
+
540
+ cdef ParquetDataPageVersion ParquetDataPageVersion_V1 \
541
+ " parquet::ParquetDataPageVersion::V1"
542
+ cdef ParquetDataPageVersion ParquetDataPageVersion_V2 \
543
+ " parquet::ParquetDataPageVersion::V2"
544
+
545
+ cdef extern from "parquet/arrow/writer.h" namespace "parquet::arrow" nogil:
546
+ cdef cppclass FileWriter:
547
+
548
+ @staticmethod
549
+ CResult[unique_ptr[FileWriter]] Open(const CSchema& schema, CMemoryPool* pool,
550
+ const shared_ptr[COutputStream]& sink,
551
+ const shared_ptr[WriterProperties]& properties,
552
+ const shared_ptr[ArrowWriterProperties]& arrow_properties)
553
+
554
+ CStatus WriteTable(const CTable& table, int64_t chunk_size)
555
+ CStatus NewRowGroup(int64_t chunk_size)
556
+ CStatus Close()
557
+
558
+ const shared_ptr[CFileMetaData] metadata() const
559
+
560
+ CStatus WriteMetaDataFile(
561
+ const CFileMetaData& file_metadata,
562
+ const COutputStream* sink)
563
+
564
+ cdef class FileEncryptionProperties:
565
+ """File-level encryption properties for the low-level API"""
566
+ cdef:
567
+ shared_ptr[CFileEncryptionProperties] properties
568
+
569
+ @staticmethod
570
+ cdef inline FileEncryptionProperties wrap(
571
+ shared_ptr[CFileEncryptionProperties] properties):
572
+
573
+ result = FileEncryptionProperties()
574
+ result.properties = properties
575
+ return result
576
+
577
+ cdef inline shared_ptr[CFileEncryptionProperties] unwrap(self):
578
+ return self.properties
579
+
580
+ cdef shared_ptr[WriterProperties] _create_writer_properties(
581
+ use_dictionary=*,
582
+ compression=*,
583
+ version=*,
584
+ write_statistics=*,
585
+ data_page_size=*,
586
+ compression_level=*,
587
+ use_byte_stream_split=*,
588
+ column_encoding=*,
589
+ data_page_version=*,
590
+ FileEncryptionProperties encryption_properties=*,
591
+ write_batch_size=*,
592
+ dictionary_pagesize_limit=*,
593
+ write_page_index=*,
594
+ write_page_checksum=*,
595
+ sorting_columns=*,
596
+ ) except *
597
+
598
+
599
+ cdef shared_ptr[ArrowWriterProperties] _create_arrow_writer_properties(
600
+ use_deprecated_int96_timestamps=*,
601
+ coerce_timestamps=*,
602
+ allow_truncated_timestamps=*,
603
+ writer_engine_version=*,
604
+ use_compliant_nested_type=*,
605
+ store_schema=*,
606
+ ) except *
607
+
608
+ cdef class ParquetSchema(_Weakrefable):
609
+ cdef:
610
+ FileMetaData parent # the FileMetaData owning the SchemaDescriptor
611
+ const SchemaDescriptor* schema
612
+
613
+ cdef class FileMetaData(_Weakrefable):
614
+ cdef:
615
+ shared_ptr[CFileMetaData] sp_metadata
616
+ CFileMetaData* _metadata
617
+ ParquetSchema _schema
618
+
619
+ cdef inline init(self, const shared_ptr[CFileMetaData]& metadata):
620
+ self.sp_metadata = metadata
621
+ self._metadata = metadata.get()
622
+
623
+ cdef class RowGroupMetaData(_Weakrefable):
624
+ cdef:
625
+ int index # for pickling support
626
+ unique_ptr[CRowGroupMetaData] up_metadata
627
+ CRowGroupMetaData* metadata
628
+ FileMetaData parent
629
+
630
+ cdef class ColumnChunkMetaData(_Weakrefable):
631
+ cdef:
632
+ unique_ptr[CColumnChunkMetaData] up_metadata
633
+ CColumnChunkMetaData* metadata
634
+ RowGroupMetaData parent
635
+
636
+ cdef inline init(self, RowGroupMetaData parent, int i):
637
+ self.up_metadata = parent.metadata.ColumnChunk(i)
638
+ self.metadata = self.up_metadata.get()
639
+ self.parent = parent
640
+
641
+ cdef class Statistics(_Weakrefable):
642
+ cdef:
643
+ shared_ptr[CStatistics] statistics
644
+ ColumnChunkMetaData parent
645
+
646
+ cdef inline init(self, const shared_ptr[CStatistics]& statistics,
647
+ ColumnChunkMetaData parent):
648
+ self.statistics = statistics
649
+ self.parent = parent
650
+
651
+ cdef extern from "parquet/encryption/encryption.h" namespace "parquet" nogil:
652
+ cdef cppclass CFileDecryptionProperties\
653
+ " parquet::FileDecryptionProperties":
654
+ pass
655
+
656
+ cdef cppclass CFileEncryptionProperties\
657
+ " parquet::FileEncryptionProperties":
658
+ pass
659
+
660
+ cdef class FileDecryptionProperties:
661
+ """File-level decryption properties for the low-level API"""
662
+ cdef:
663
+ shared_ptr[CFileDecryptionProperties] properties
664
+
665
+ @staticmethod
666
+ cdef inline FileDecryptionProperties wrap(
667
+ shared_ptr[CFileDecryptionProperties] properties):
668
+
669
+ result = FileDecryptionProperties()
670
+ result.properties = properties
671
+ return result
672
+
673
+ cdef inline shared_ptr[CFileDecryptionProperties] unwrap(self):
674
+ return self.properties
venv/lib/python3.10/site-packages/pyarrow/_parquet.pyx ADDED
@@ -0,0 +1,2205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: profile=False
19
+ # distutils: language = c++
20
+
21
+ from collections.abc import Sequence
22
+ from textwrap import indent
23
+ import warnings
24
+
25
+ from cython.operator cimport dereference as deref
26
+ from pyarrow.includes.common cimport *
27
+ from pyarrow.includes.libarrow cimport *
28
+ from pyarrow.includes.libarrow_python cimport *
29
+ from pyarrow.lib cimport (_Weakrefable, Buffer, Schema,
30
+ check_status,
31
+ MemoryPool, maybe_unbox_memory_pool,
32
+ Table, NativeFile,
33
+ pyarrow_wrap_chunked_array,
34
+ pyarrow_wrap_schema,
35
+ pyarrow_unwrap_schema,
36
+ pyarrow_wrap_table,
37
+ pyarrow_wrap_batch,
38
+ pyarrow_wrap_scalar,
39
+ NativeFile, get_reader, get_writer,
40
+ string_to_timeunit)
41
+
42
+ from pyarrow.lib import (ArrowException, NativeFile, BufferOutputStream,
43
+ _stringify_path,
44
+ tobytes, frombytes)
45
+
46
+ cimport cpython as cp
47
+
48
+ _DEFAULT_ROW_GROUP_SIZE = 1024*1024
49
+ _MAX_ROW_GROUP_SIZE = 64*1024*1024
50
+
51
+ cdef class Statistics(_Weakrefable):
52
+ """Statistics for a single column in a single row group."""
53
+
54
+ def __cinit__(self):
55
+ pass
56
+
57
+ def __repr__(self):
58
+ return """{}
59
+ has_min_max: {}
60
+ min: {}
61
+ max: {}
62
+ null_count: {}
63
+ distinct_count: {}
64
+ num_values: {}
65
+ physical_type: {}
66
+ logical_type: {}
67
+ converted_type (legacy): {}""".format(object.__repr__(self),
68
+ self.has_min_max,
69
+ self.min,
70
+ self.max,
71
+ self.null_count,
72
+ self.distinct_count,
73
+ self.num_values,
74
+ self.physical_type,
75
+ str(self.logical_type),
76
+ self.converted_type)
77
+
78
+ def to_dict(self):
79
+ """
80
+ Get dictionary representation of statistics.
81
+
82
+ Returns
83
+ -------
84
+ dict
85
+ Dictionary with a key for each attribute of this class.
86
+ """
87
+ d = dict(
88
+ has_min_max=self.has_min_max,
89
+ min=self.min,
90
+ max=self.max,
91
+ null_count=self.null_count,
92
+ distinct_count=self.distinct_count,
93
+ num_values=self.num_values,
94
+ physical_type=self.physical_type
95
+ )
96
+ return d
97
+
98
+ def __eq__(self, other):
99
+ try:
100
+ return self.equals(other)
101
+ except TypeError:
102
+ return NotImplemented
103
+
104
+ def equals(self, Statistics other):
105
+ """
106
+ Return whether the two column statistics objects are equal.
107
+
108
+ Parameters
109
+ ----------
110
+ other : Statistics
111
+ Statistics to compare against.
112
+
113
+ Returns
114
+ -------
115
+ are_equal : bool
116
+ """
117
+ return self.statistics.get().Equals(deref(other.statistics.get()))
118
+
119
+ @property
120
+ def has_min_max(self):
121
+ """Whether min and max are present (bool)."""
122
+ return self.statistics.get().HasMinMax()
123
+
124
+ @property
125
+ def has_null_count(self):
126
+ """Whether null count is present (bool)."""
127
+ return self.statistics.get().HasNullCount()
128
+
129
+ @property
130
+ def has_distinct_count(self):
131
+ """Whether distinct count is preset (bool)."""
132
+ return self.statistics.get().HasDistinctCount()
133
+
134
+ @property
135
+ def min_raw(self):
136
+ """Min value as physical type (bool, int, float, or bytes)."""
137
+ if self.has_min_max:
138
+ return _cast_statistic_raw_min(self.statistics.get())
139
+ else:
140
+ return None
141
+
142
+ @property
143
+ def max_raw(self):
144
+ """Max value as physical type (bool, int, float, or bytes)."""
145
+ if self.has_min_max:
146
+ return _cast_statistic_raw_max(self.statistics.get())
147
+ else:
148
+ return None
149
+
150
+ @property
151
+ def min(self):
152
+ """
153
+ Min value as logical type.
154
+
155
+ Returned as the Python equivalent of logical type, such as datetime.date
156
+ for dates and decimal.Decimal for decimals.
157
+ """
158
+ if self.has_min_max:
159
+ min_scalar, _ = _cast_statistics(self.statistics.get())
160
+ return min_scalar.as_py()
161
+ else:
162
+ return None
163
+
164
+ @property
165
+ def max(self):
166
+ """
167
+ Max value as logical type.
168
+
169
+ Returned as the Python equivalent of logical type, such as datetime.date
170
+ for dates and decimal.Decimal for decimals.
171
+ """
172
+ if self.has_min_max:
173
+ _, max_scalar = _cast_statistics(self.statistics.get())
174
+ return max_scalar.as_py()
175
+ else:
176
+ return None
177
+
178
+ @property
179
+ def null_count(self):
180
+ """Number of null values in chunk (int)."""
181
+ if self.has_null_count:
182
+ return self.statistics.get().null_count()
183
+ else:
184
+ return None
185
+
186
+ @property
187
+ def distinct_count(self):
188
+ """Distinct number of values in chunk (int)."""
189
+ if self.has_distinct_count:
190
+ return self.statistics.get().distinct_count()
191
+ else:
192
+ return None
193
+
194
+ @property
195
+ def num_values(self):
196
+ """Number of non-null values (int)."""
197
+ return self.statistics.get().num_values()
198
+
199
+ @property
200
+ def physical_type(self):
201
+ """Physical type of column (str)."""
202
+ raw_physical_type = self.statistics.get().physical_type()
203
+ return physical_type_name_from_enum(raw_physical_type)
204
+
205
+ @property
206
+ def logical_type(self):
207
+ """Logical type of column (:class:`ParquetLogicalType`)."""
208
+ return wrap_logical_type(self.statistics.get().descr().logical_type())
209
+
210
+ @property
211
+ def converted_type(self):
212
+ """Legacy converted type (str or None)."""
213
+ raw_converted_type = self.statistics.get().descr().converted_type()
214
+ return converted_type_name_from_enum(raw_converted_type)
215
+
216
+
217
+ cdef class ParquetLogicalType(_Weakrefable):
218
+ """Logical type of parquet type."""
219
+ cdef:
220
+ shared_ptr[const CParquetLogicalType] type
221
+
222
+ def __cinit__(self):
223
+ pass
224
+
225
+ cdef init(self, const shared_ptr[const CParquetLogicalType]& type):
226
+ self.type = type
227
+
228
+ def __repr__(self):
229
+ return "{}\n {}".format(object.__repr__(self), str(self))
230
+
231
+ def __str__(self):
232
+ return frombytes(self.type.get().ToString(), safe=True)
233
+
234
+ def to_json(self):
235
+ """
236
+ Get a JSON string containing type and type parameters.
237
+
238
+ Returns
239
+ -------
240
+ json : str
241
+ JSON representation of type, with at least a field called 'Type'
242
+ which contains the type name. If the type is parameterized, such
243
+ as a decimal with scale and precision, will contain those as fields
244
+ as well.
245
+ """
246
+ return frombytes(self.type.get().ToJSON())
247
+
248
+ @property
249
+ def type(self):
250
+ """Name of the logical type (str)."""
251
+ return logical_type_name_from_enum(self.type.get().type())
252
+
253
+
254
+ cdef wrap_logical_type(const shared_ptr[const CParquetLogicalType]& type):
255
+ cdef ParquetLogicalType out = ParquetLogicalType()
256
+ out.init(type)
257
+ return out
258
+
259
+
260
+ cdef _cast_statistic_raw_min(CStatistics* statistics):
261
+ cdef ParquetType physical_type = statistics.physical_type()
262
+ cdef uint32_t type_length = statistics.descr().type_length()
263
+ if physical_type == ParquetType_BOOLEAN:
264
+ return (<CBoolStatistics*> statistics).min()
265
+ elif physical_type == ParquetType_INT32:
266
+ return (<CInt32Statistics*> statistics).min()
267
+ elif physical_type == ParquetType_INT64:
268
+ return (<CInt64Statistics*> statistics).min()
269
+ elif physical_type == ParquetType_FLOAT:
270
+ return (<CFloatStatistics*> statistics).min()
271
+ elif physical_type == ParquetType_DOUBLE:
272
+ return (<CDoubleStatistics*> statistics).min()
273
+ elif physical_type == ParquetType_BYTE_ARRAY:
274
+ return _box_byte_array((<CByteArrayStatistics*> statistics).min())
275
+ elif physical_type == ParquetType_FIXED_LEN_BYTE_ARRAY:
276
+ return _box_flba((<CFLBAStatistics*> statistics).min(), type_length)
277
+
278
+
279
+ cdef _cast_statistic_raw_max(CStatistics* statistics):
280
+ cdef ParquetType physical_type = statistics.physical_type()
281
+ cdef uint32_t type_length = statistics.descr().type_length()
282
+ if physical_type == ParquetType_BOOLEAN:
283
+ return (<CBoolStatistics*> statistics).max()
284
+ elif physical_type == ParquetType_INT32:
285
+ return (<CInt32Statistics*> statistics).max()
286
+ elif physical_type == ParquetType_INT64:
287
+ return (<CInt64Statistics*> statistics).max()
288
+ elif physical_type == ParquetType_FLOAT:
289
+ return (<CFloatStatistics*> statistics).max()
290
+ elif physical_type == ParquetType_DOUBLE:
291
+ return (<CDoubleStatistics*> statistics).max()
292
+ elif physical_type == ParquetType_BYTE_ARRAY:
293
+ return _box_byte_array((<CByteArrayStatistics*> statistics).max())
294
+ elif physical_type == ParquetType_FIXED_LEN_BYTE_ARRAY:
295
+ return _box_flba((<CFLBAStatistics*> statistics).max(), type_length)
296
+
297
+
298
+ cdef _cast_statistics(CStatistics* statistics):
299
+ cdef:
300
+ shared_ptr[CScalar] c_min
301
+ shared_ptr[CScalar] c_max
302
+ check_status(StatisticsAsScalars(statistics[0], &c_min, &c_max))
303
+ return (pyarrow_wrap_scalar(c_min), pyarrow_wrap_scalar(c_max))
304
+
305
+
306
+ cdef _box_byte_array(ParquetByteArray val):
307
+ return cp.PyBytes_FromStringAndSize(<char*> val.ptr, <Py_ssize_t> val.len)
308
+
309
+
310
+ cdef _box_flba(ParquetFLBA val, uint32_t len):
311
+ return cp.PyBytes_FromStringAndSize(<char*> val.ptr, <Py_ssize_t> len)
312
+
313
+
314
+ cdef class ColumnChunkMetaData(_Weakrefable):
315
+ """Column metadata for a single row group."""
316
+
317
+ def __cinit__(self):
318
+ pass
319
+
320
+ def __repr__(self):
321
+ statistics = indent(repr(self.statistics), 4 * ' ')
322
+ return """{0}
323
+ file_offset: {1}
324
+ file_path: {2}
325
+ physical_type: {3}
326
+ num_values: {4}
327
+ path_in_schema: {5}
328
+ is_stats_set: {6}
329
+ statistics:
330
+ {7}
331
+ compression: {8}
332
+ encodings: {9}
333
+ has_dictionary_page: {10}
334
+ dictionary_page_offset: {11}
335
+ data_page_offset: {12}
336
+ total_compressed_size: {13}
337
+ total_uncompressed_size: {14}""".format(object.__repr__(self),
338
+ self.file_offset,
339
+ self.file_path,
340
+ self.physical_type,
341
+ self.num_values,
342
+ self.path_in_schema,
343
+ self.is_stats_set,
344
+ statistics,
345
+ self.compression,
346
+ self.encodings,
347
+ self.has_dictionary_page,
348
+ self.dictionary_page_offset,
349
+ self.data_page_offset,
350
+ self.total_compressed_size,
351
+ self.total_uncompressed_size)
352
+
353
+ def to_dict(self):
354
+ """
355
+ Get dictionary representation of the column chunk metadata.
356
+
357
+ Returns
358
+ -------
359
+ dict
360
+ Dictionary with a key for each attribute of this class.
361
+ """
362
+ statistics = self.statistics.to_dict() if self.is_stats_set else None
363
+ d = dict(
364
+ file_offset=self.file_offset,
365
+ file_path=self.file_path,
366
+ physical_type=self.physical_type,
367
+ num_values=self.num_values,
368
+ path_in_schema=self.path_in_schema,
369
+ is_stats_set=self.is_stats_set,
370
+ statistics=statistics,
371
+ compression=self.compression,
372
+ encodings=self.encodings,
373
+ has_dictionary_page=self.has_dictionary_page,
374
+ dictionary_page_offset=self.dictionary_page_offset,
375
+ data_page_offset=self.data_page_offset,
376
+ total_compressed_size=self.total_compressed_size,
377
+ total_uncompressed_size=self.total_uncompressed_size
378
+ )
379
+ return d
380
+
381
+ def __eq__(self, other):
382
+ try:
383
+ return self.equals(other)
384
+ except TypeError:
385
+ return NotImplemented
386
+
387
+ def equals(self, ColumnChunkMetaData other):
388
+ """
389
+ Return whether the two column chunk metadata objects are equal.
390
+
391
+ Parameters
392
+ ----------
393
+ other : ColumnChunkMetaData
394
+ Metadata to compare against.
395
+
396
+ Returns
397
+ -------
398
+ are_equal : bool
399
+ """
400
+ return self.metadata.Equals(deref(other.metadata))
401
+
402
+ @property
403
+ def file_offset(self):
404
+ """Offset into file where column chunk is located (int)."""
405
+ return self.metadata.file_offset()
406
+
407
+ @property
408
+ def file_path(self):
409
+ """Optional file path if set (str or None)."""
410
+ return frombytes(self.metadata.file_path())
411
+
412
+ @property
413
+ def physical_type(self):
414
+ """Physical type of column (str)."""
415
+ return physical_type_name_from_enum(self.metadata.type())
416
+
417
+ @property
418
+ def num_values(self):
419
+ """Total number of values (int)."""
420
+ return self.metadata.num_values()
421
+
422
+ @property
423
+ def path_in_schema(self):
424
+ """Nested path to field, separated by periods (str)."""
425
+ path = self.metadata.path_in_schema().get().ToDotString()
426
+ return frombytes(path)
427
+
428
+ @property
429
+ def is_stats_set(self):
430
+ """Whether or not statistics are present in metadata (bool)."""
431
+ return self.metadata.is_stats_set()
432
+
433
+ @property
434
+ def statistics(self):
435
+ """Statistics for column chunk (:class:`Statistics`)."""
436
+ if not self.metadata.is_stats_set():
437
+ return None
438
+ statistics = Statistics()
439
+ statistics.init(self.metadata.statistics(), self)
440
+ return statistics
441
+
442
+ @property
443
+ def compression(self):
444
+ """
445
+ Type of compression used for column (str).
446
+
447
+ One of 'UNCOMPRESSED', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4', 'ZSTD',
448
+ or 'UNKNOWN'.
449
+ """
450
+ return compression_name_from_enum(self.metadata.compression())
451
+
452
+ @property
453
+ def encodings(self):
454
+ """
455
+ Encodings used for column (tuple of str).
456
+
457
+ One of 'PLAIN', 'BIT_PACKED', 'RLE', 'BYTE_STREAM_SPLIT', 'DELTA_BINARY_PACKED',
458
+ 'DELTA_LENGTH_BYTE_ARRAY', 'DELTA_BYTE_ARRAY'.
459
+ """
460
+ return tuple(map(encoding_name_from_enum, self.metadata.encodings()))
461
+
462
+ @property
463
+ def has_dictionary_page(self):
464
+ """Whether there is dictionary data present in the column chunk (bool)."""
465
+ return bool(self.metadata.has_dictionary_page())
466
+
467
+ @property
468
+ def dictionary_page_offset(self):
469
+ """Offset of dictionary page relative to column chunk offset (int)."""
470
+ if self.has_dictionary_page:
471
+ return self.metadata.dictionary_page_offset()
472
+ else:
473
+ return None
474
+
475
+ @property
476
+ def data_page_offset(self):
477
+ """Offset of data page relative to column chunk offset (int)."""
478
+ return self.metadata.data_page_offset()
479
+
480
+ @property
481
+ def has_index_page(self):
482
+ """Not yet supported."""
483
+ raise NotImplementedError('not supported in parquet-cpp')
484
+
485
+ @property
486
+ def index_page_offset(self):
487
+ """Not yet supported."""
488
+ raise NotImplementedError("parquet-cpp doesn't return valid values")
489
+
490
+ @property
491
+ def total_compressed_size(self):
492
+ """Compressed size in bytes (int)."""
493
+ return self.metadata.total_compressed_size()
494
+
495
+ @property
496
+ def total_uncompressed_size(self):
497
+ """Uncompressed size in bytes (int)."""
498
+ return self.metadata.total_uncompressed_size()
499
+
500
+ @property
501
+ def has_offset_index(self):
502
+ """Whether the column chunk has an offset index"""
503
+ return self.metadata.GetOffsetIndexLocation().has_value()
504
+
505
+ @property
506
+ def has_column_index(self):
507
+ """Whether the column chunk has a column index"""
508
+ return self.metadata.GetColumnIndexLocation().has_value()
509
+
510
+
511
+ cdef class SortingColumn:
512
+ """
513
+ Sorting specification for a single column.
514
+
515
+ Returned by :meth:`RowGroupMetaData.sorting_columns` and used in
516
+ :class:`ParquetWriter` to specify the sort order of the data.
517
+
518
+ Parameters
519
+ ----------
520
+ column_index : int
521
+ Index of column that data is sorted by.
522
+ descending : bool, default False
523
+ Whether column is sorted in descending order.
524
+ nulls_first : bool, default False
525
+ Whether null values appear before valid values.
526
+
527
+ Notes
528
+ -----
529
+
530
+ Column indices are zero-based, refer only to leaf fields, and are in
531
+ depth-first order. This may make the column indices for nested schemas
532
+ different from what you expect. In most cases, it will be easier to
533
+ specify the sort order using column names instead of column indices
534
+ and converting using the ``from_ordering`` method.
535
+
536
+ Examples
537
+ --------
538
+
539
+ In other APIs, sort order is specified by names, such as:
540
+
541
+ >>> sort_order = [('id', 'ascending'), ('timestamp', 'descending')]
542
+
543
+ For Parquet, the column index must be used instead:
544
+
545
+ >>> import pyarrow.parquet as pq
546
+ >>> [pq.SortingColumn(0), pq.SortingColumn(1, descending=True)]
547
+ [SortingColumn(column_index=0, descending=False, nulls_first=False), SortingColumn(column_index=1, descending=True, nulls_first=False)]
548
+
549
+ Convert the sort_order into the list of sorting columns with
550
+ ``from_ordering`` (note that the schema must be provided as well):
551
+
552
+ >>> import pyarrow as pa
553
+ >>> schema = pa.schema([('id', pa.int64()), ('timestamp', pa.timestamp('ms'))])
554
+ >>> sorting_columns = pq.SortingColumn.from_ordering(schema, sort_order)
555
+ >>> sorting_columns
556
+ (SortingColumn(column_index=0, descending=False, nulls_first=False), SortingColumn(column_index=1, descending=True, nulls_first=False))
557
+
558
+ Convert back to the sort order with ``to_ordering``:
559
+
560
+ >>> pq.SortingColumn.to_ordering(schema, sorting_columns)
561
+ ((('id', 'ascending'), ('timestamp', 'descending')), 'at_end')
562
+
563
+ See Also
564
+ --------
565
+ RowGroupMetaData.sorting_columns
566
+ """
567
+ cdef int column_index
568
+ cdef c_bool descending
569
+ cdef c_bool nulls_first
570
+
571
+ def __init__(self, int column_index, c_bool descending=False, c_bool nulls_first=False):
572
+ self.column_index = column_index
573
+ self.descending = descending
574
+ self.nulls_first = nulls_first
575
+
576
+ @classmethod
577
+ def from_ordering(cls, Schema schema, sort_keys, null_placement='at_end'):
578
+ """
579
+ Create a tuple of SortingColumn objects from the same arguments as
580
+ :class:`pyarrow.compute.SortOptions`.
581
+
582
+ Parameters
583
+ ----------
584
+ schema : Schema
585
+ Schema of the input data.
586
+ sort_keys : Sequence of (name, order) tuples
587
+ Names of field/column keys (str) to sort the input on,
588
+ along with the order each field/column is sorted in.
589
+ Accepted values for `order` are "ascending", "descending".
590
+ null_placement : {'at_start', 'at_end'}, default 'at_end'
591
+ Where null values should appear in the sort order.
592
+
593
+ Returns
594
+ -------
595
+ sorting_columns : tuple of SortingColumn
596
+ """
597
+ if null_placement == 'at_start':
598
+ nulls_first = True
599
+ elif null_placement == 'at_end':
600
+ nulls_first = False
601
+ else:
602
+ raise ValueError('null_placement must be "at_start" or "at_end"')
603
+
604
+ col_map = _name_to_index_map(schema)
605
+
606
+ sorting_columns = []
607
+
608
+ for sort_key in sort_keys:
609
+ if isinstance(sort_key, str):
610
+ name = sort_key
611
+ descending = False
612
+ elif (isinstance(sort_key, tuple) and len(sort_key) == 2 and
613
+ isinstance(sort_key[0], str) and
614
+ isinstance(sort_key[1], str)):
615
+ name, descending = sort_key
616
+ if descending == "descending":
617
+ descending = True
618
+ elif descending == "ascending":
619
+ descending = False
620
+ else:
621
+ raise ValueError("Invalid sort key direction: {0}"
622
+ .format(descending))
623
+ else:
624
+ raise ValueError("Invalid sort key: {0}".format(sort_key))
625
+
626
+ try:
627
+ column_index = col_map[name]
628
+ except KeyError:
629
+ raise ValueError("Sort key name '{0}' not found in schema:\n{1}"
630
+ .format(name, schema))
631
+
632
+ sorting_columns.append(
633
+ cls(column_index, descending=descending, nulls_first=nulls_first)
634
+ )
635
+
636
+ return tuple(sorting_columns)
637
+
638
+ @staticmethod
639
+ def to_ordering(Schema schema, sorting_columns):
640
+ """
641
+ Convert a tuple of SortingColumn objects to the same format as
642
+ :class:`pyarrow.compute.SortOptions`.
643
+
644
+ Parameters
645
+ ----------
646
+ schema : Schema
647
+ Schema of the input data.
648
+ sorting_columns : tuple of SortingColumn
649
+ Columns to sort the input on.
650
+
651
+ Returns
652
+ -------
653
+ sort_keys : tuple of (name, order) tuples
654
+ null_placement : {'at_start', 'at_end'}
655
+ """
656
+ col_map = {i: name for name, i in _name_to_index_map(schema).items()}
657
+
658
+ sort_keys = []
659
+ nulls_first = None
660
+
661
+ for sorting_column in sorting_columns:
662
+ name = col_map[sorting_column.column_index]
663
+ if sorting_column.descending:
664
+ order = "descending"
665
+ else:
666
+ order = "ascending"
667
+ sort_keys.append((name, order))
668
+ if nulls_first is None:
669
+ nulls_first = sorting_column.nulls_first
670
+ elif nulls_first != sorting_column.nulls_first:
671
+ raise ValueError("Sorting columns have inconsistent null placement")
672
+
673
+ if nulls_first:
674
+ null_placement = "at_start"
675
+ else:
676
+ null_placement = "at_end"
677
+
678
+ return tuple(sort_keys), null_placement
679
+
680
+ def __repr__(self):
681
+ return """{}(column_index={}, descending={}, nulls_first={})""".format(
682
+ self.__class__.__name__,
683
+ self.column_index, self.descending, self.nulls_first)
684
+
685
+ def __eq__(self, SortingColumn other):
686
+ return (self.column_index == other.column_index and
687
+ self.descending == other.descending and
688
+ self.nulls_first == other.nulls_first)
689
+
690
+ def __hash__(self):
691
+ return hash((self.column_index, self.descending, self.nulls_first))
692
+
693
+ @property
694
+ def column_index(self):
695
+ """"Index of column data is sorted by (int)."""
696
+ return self.column_index
697
+
698
+ @property
699
+ def descending(self):
700
+ """Whether column is sorted in descending order (bool)."""
701
+ return self.descending
702
+
703
+ @property
704
+ def nulls_first(self):
705
+ """Whether null values appear before valid values (bool)."""
706
+ return self.nulls_first
707
+
708
+
709
+ cdef class RowGroupMetaData(_Weakrefable):
710
+ """Metadata for a single row group."""
711
+
712
+ def __cinit__(self, FileMetaData parent, int index):
713
+ if index < 0 or index >= parent.num_row_groups:
714
+ raise IndexError('{0} out of bounds'.format(index))
715
+ self.up_metadata = parent._metadata.RowGroup(index)
716
+ self.metadata = self.up_metadata.get()
717
+ self.parent = parent
718
+ self.index = index
719
+
720
+ def __reduce__(self):
721
+ return RowGroupMetaData, (self.parent, self.index)
722
+
723
+ def __eq__(self, other):
724
+ try:
725
+ return self.equals(other)
726
+ except TypeError:
727
+ return NotImplemented
728
+
729
+ def equals(self, RowGroupMetaData other):
730
+ """
731
+ Return whether the two row group metadata objects are equal.
732
+
733
+ Parameters
734
+ ----------
735
+ other : RowGroupMetaData
736
+ Metadata to compare against.
737
+
738
+ Returns
739
+ -------
740
+ are_equal : bool
741
+ """
742
+ return self.metadata.Equals(deref(other.metadata))
743
+
744
+ def column(self, int i):
745
+ """
746
+ Get column metadata at given index.
747
+
748
+ Parameters
749
+ ----------
750
+ i : int
751
+ Index of column to get metadata for.
752
+
753
+ Returns
754
+ -------
755
+ ColumnChunkMetaData
756
+ Metadata for column within this chunk.
757
+ """
758
+ if i < 0 or i >= self.num_columns:
759
+ raise IndexError('{0} out of bounds'.format(i))
760
+ chunk = ColumnChunkMetaData()
761
+ chunk.init(self, i)
762
+ return chunk
763
+
764
+ def __repr__(self):
765
+ return """{0}
766
+ num_columns: {1}
767
+ num_rows: {2}
768
+ total_byte_size: {3}
769
+ sorting_columns: {4}""".format(object.__repr__(self),
770
+ self.num_columns,
771
+ self.num_rows,
772
+ self.total_byte_size,
773
+ self.sorting_columns)
774
+
775
+ def to_dict(self):
776
+ """
777
+ Get dictionary representation of the row group metadata.
778
+
779
+ Returns
780
+ -------
781
+ dict
782
+ Dictionary with a key for each attribute of this class.
783
+ """
784
+ columns = []
785
+ d = dict(
786
+ num_columns=self.num_columns,
787
+ num_rows=self.num_rows,
788
+ total_byte_size=self.total_byte_size,
789
+ columns=columns,
790
+ sorting_columns=[col.to_dict() for col in self.sorting_columns]
791
+ )
792
+ for i in range(self.num_columns):
793
+ columns.append(self.column(i).to_dict())
794
+ return d
795
+
796
+ @property
797
+ def num_columns(self):
798
+ """Number of columns in this row group (int)."""
799
+ return self.metadata.num_columns()
800
+
801
+ @property
802
+ def num_rows(self):
803
+ """Number of rows in this row group (int)."""
804
+ return self.metadata.num_rows()
805
+
806
+ @property
807
+ def total_byte_size(self):
808
+ """Total byte size of all the uncompressed column data in this row group (int)."""
809
+ return self.metadata.total_byte_size()
810
+
811
+ @property
812
+ def sorting_columns(self):
813
+ """Columns the row group is sorted by (tuple of :class:`SortingColumn`))."""
814
+ out = []
815
+ cdef vector[CSortingColumn] sorting_columns = self.metadata.sorting_columns()
816
+ for sorting_col in sorting_columns:
817
+ out.append(SortingColumn(
818
+ sorting_col.column_idx,
819
+ sorting_col.descending,
820
+ sorting_col.nulls_first
821
+ ))
822
+ return tuple(out)
823
+
824
+
825
+ def _reconstruct_filemetadata(Buffer serialized):
826
+ cdef:
827
+ FileMetaData metadata = FileMetaData.__new__(FileMetaData)
828
+ CBuffer *buffer = serialized.buffer.get()
829
+ uint32_t metadata_len = <uint32_t>buffer.size()
830
+
831
+ metadata.init(CFileMetaData_Make(buffer.data(), &metadata_len))
832
+
833
+ return metadata
834
+
835
+
836
+ cdef class FileMetaData(_Weakrefable):
837
+ """Parquet metadata for a single file."""
838
+
839
+ def __cinit__(self):
840
+ pass
841
+
842
+ def __reduce__(self):
843
+ cdef:
844
+ NativeFile sink = BufferOutputStream()
845
+ COutputStream* c_sink = sink.get_output_stream().get()
846
+ with nogil:
847
+ self._metadata.WriteTo(c_sink)
848
+
849
+ cdef Buffer buffer = sink.getvalue()
850
+ return _reconstruct_filemetadata, (buffer,)
851
+
852
+ def __hash__(self):
853
+ return hash((self.schema,
854
+ self.num_rows,
855
+ self.num_row_groups,
856
+ self.format_version,
857
+ self.serialized_size))
858
+
859
+ def __repr__(self):
860
+ return """{0}
861
+ created_by: {1}
862
+ num_columns: {2}
863
+ num_rows: {3}
864
+ num_row_groups: {4}
865
+ format_version: {5}
866
+ serialized_size: {6}""".format(object.__repr__(self),
867
+ self.created_by, self.num_columns,
868
+ self.num_rows, self.num_row_groups,
869
+ self.format_version,
870
+ self.serialized_size)
871
+
872
+ def to_dict(self):
873
+ """
874
+ Get dictionary representation of the file metadata.
875
+
876
+ Returns
877
+ -------
878
+ dict
879
+ Dictionary with a key for each attribute of this class.
880
+ """
881
+ row_groups = []
882
+ d = dict(
883
+ created_by=self.created_by,
884
+ num_columns=self.num_columns,
885
+ num_rows=self.num_rows,
886
+ num_row_groups=self.num_row_groups,
887
+ row_groups=row_groups,
888
+ format_version=self.format_version,
889
+ serialized_size=self.serialized_size
890
+ )
891
+ for i in range(self.num_row_groups):
892
+ row_groups.append(self.row_group(i).to_dict())
893
+ return d
894
+
895
+ def __eq__(self, other):
896
+ try:
897
+ return self.equals(other)
898
+ except TypeError:
899
+ return NotImplemented
900
+
901
+ def equals(self, FileMetaData other not None):
902
+ """
903
+ Return whether the two file metadata objects are equal.
904
+
905
+ Parameters
906
+ ----------
907
+ other : FileMetaData
908
+ Metadata to compare against.
909
+
910
+ Returns
911
+ -------
912
+ are_equal : bool
913
+ """
914
+ return self._metadata.Equals(deref(other._metadata))
915
+
916
+ @property
917
+ def schema(self):
918
+ """Schema of the file (:class:`ParquetSchema`)."""
919
+ if self._schema is None:
920
+ self._schema = ParquetSchema(self)
921
+ return self._schema
922
+
923
+ @property
924
+ def serialized_size(self):
925
+ """Size of the original thrift encoded metadata footer (int)."""
926
+ return self._metadata.size()
927
+
928
+ @property
929
+ def num_columns(self):
930
+ """Number of columns in file (int)."""
931
+ return self._metadata.num_columns()
932
+
933
+ @property
934
+ def num_rows(self):
935
+ """Total number of rows in file (int)."""
936
+ return self._metadata.num_rows()
937
+
938
+ @property
939
+ def num_row_groups(self):
940
+ """Number of row groups in file (int)."""
941
+ return self._metadata.num_row_groups()
942
+
943
+ @property
944
+ def format_version(self):
945
+ """
946
+ Parquet format version used in file (str, such as '1.0', '2.4').
947
+
948
+ If version is missing or unparsable, will default to assuming '2.6'.
949
+ """
950
+ cdef ParquetVersion version = self._metadata.version()
951
+ if version == ParquetVersion_V1:
952
+ return '1.0'
953
+ elif version == ParquetVersion_V2_0:
954
+ return 'pseudo-2.0'
955
+ elif version == ParquetVersion_V2_4:
956
+ return '2.4'
957
+ elif version == ParquetVersion_V2_6:
958
+ return '2.6'
959
+ else:
960
+ warnings.warn('Unrecognized file version, assuming 2.6: {}'
961
+ .format(version))
962
+ return '2.6'
963
+
964
+ @property
965
+ def created_by(self):
966
+ """
967
+ String describing source of the parquet file (str).
968
+
969
+ This typically includes library name and version number. For example, Arrow 7.0's
970
+ writer returns 'parquet-cpp-arrow version 7.0.0'.
971
+ """
972
+ return frombytes(self._metadata.created_by())
973
+
974
+ @property
975
+ def metadata(self):
976
+ """Additional metadata as key value pairs (dict[bytes, bytes])."""
977
+ cdef:
978
+ unordered_map[c_string, c_string] metadata
979
+ const CKeyValueMetadata* underlying_metadata
980
+ underlying_metadata = self._metadata.key_value_metadata().get()
981
+ if underlying_metadata != NULL:
982
+ underlying_metadata.ToUnorderedMap(&metadata)
983
+ return metadata
984
+ else:
985
+ return None
986
+
987
+ def row_group(self, int i):
988
+ """
989
+ Get metadata for row group at index i.
990
+
991
+ Parameters
992
+ ----------
993
+ i : int
994
+ Row group index to get.
995
+
996
+ Returns
997
+ -------
998
+ row_group_metadata : RowGroupMetaData
999
+ """
1000
+ return RowGroupMetaData(self, i)
1001
+
1002
+ def set_file_path(self, path):
1003
+ """
1004
+ Set ColumnChunk file paths to the given value.
1005
+
1006
+ This method modifies the ``file_path`` field of each ColumnChunk
1007
+ in the FileMetaData to be a particular value.
1008
+
1009
+ Parameters
1010
+ ----------
1011
+ path : str
1012
+ The file path to set on all ColumnChunks.
1013
+ """
1014
+ cdef:
1015
+ c_string c_path = tobytes(path)
1016
+ self._metadata.set_file_path(c_path)
1017
+
1018
+ def append_row_groups(self, FileMetaData other):
1019
+ """
1020
+ Append row groups from other FileMetaData object.
1021
+
1022
+ Parameters
1023
+ ----------
1024
+ other : FileMetaData
1025
+ Other metadata to append row groups from.
1026
+ """
1027
+ cdef shared_ptr[CFileMetaData] c_metadata
1028
+
1029
+ c_metadata = other.sp_metadata
1030
+ self._metadata.AppendRowGroups(deref(c_metadata))
1031
+
1032
+ def write_metadata_file(self, where):
1033
+ """
1034
+ Write the metadata to a metadata-only Parquet file.
1035
+
1036
+ Parameters
1037
+ ----------
1038
+ where : path or file-like object
1039
+ Where to write the metadata. Should be a writable path on
1040
+ the local filesystem, or a writable file-like object.
1041
+ """
1042
+ cdef:
1043
+ shared_ptr[COutputStream] sink
1044
+ c_string c_where
1045
+
1046
+ try:
1047
+ where = _stringify_path(where)
1048
+ except TypeError:
1049
+ get_writer(where, &sink)
1050
+ else:
1051
+ c_where = tobytes(where)
1052
+ with nogil:
1053
+ sink = GetResultValue(FileOutputStream.Open(c_where))
1054
+
1055
+ with nogil:
1056
+ check_status(
1057
+ WriteMetaDataFile(deref(self._metadata), sink.get()))
1058
+
1059
+
1060
+ cdef class ParquetSchema(_Weakrefable):
1061
+ """A Parquet schema."""
1062
+
1063
+ def __cinit__(self, FileMetaData container):
1064
+ self.parent = container
1065
+ self.schema = container._metadata.schema()
1066
+
1067
+ def __repr__(self):
1068
+ return "{0}\n{1}".format(
1069
+ object.__repr__(self),
1070
+ frombytes(self.schema.ToString(), safe=True))
1071
+
1072
+ def __reduce__(self):
1073
+ return ParquetSchema, (self.parent,)
1074
+
1075
+ def __len__(self):
1076
+ return self.schema.num_columns()
1077
+
1078
+ def __getitem__(self, i):
1079
+ return self.column(i)
1080
+
1081
+ def __hash__(self):
1082
+ return hash(self.schema.ToString())
1083
+
1084
+ @property
1085
+ def names(self):
1086
+ """Name of each field (list of str)."""
1087
+ return [self[i].name for i in range(len(self))]
1088
+
1089
+ def to_arrow_schema(self):
1090
+ """
1091
+ Convert Parquet schema to effective Arrow schema.
1092
+
1093
+ Returns
1094
+ -------
1095
+ schema : Schema
1096
+ """
1097
+ cdef shared_ptr[CSchema] sp_arrow_schema
1098
+
1099
+ with nogil:
1100
+ check_status(FromParquetSchema(
1101
+ self.schema, default_arrow_reader_properties(),
1102
+ self.parent._metadata.key_value_metadata(),
1103
+ &sp_arrow_schema))
1104
+
1105
+ return pyarrow_wrap_schema(sp_arrow_schema)
1106
+
1107
+ def __eq__(self, other):
1108
+ try:
1109
+ return self.equals(other)
1110
+ except TypeError:
1111
+ return NotImplemented
1112
+
1113
+ def equals(self, ParquetSchema other):
1114
+ """
1115
+ Return whether the two schemas are equal.
1116
+
1117
+ Parameters
1118
+ ----------
1119
+ other : ParquetSchema
1120
+ Schema to compare against.
1121
+
1122
+ Returns
1123
+ -------
1124
+ are_equal : bool
1125
+ """
1126
+ return self.schema.Equals(deref(other.schema))
1127
+
1128
+ def column(self, i):
1129
+ """
1130
+ Return the schema for a single column.
1131
+
1132
+ Parameters
1133
+ ----------
1134
+ i : int
1135
+ Index of column in schema.
1136
+
1137
+ Returns
1138
+ -------
1139
+ column_schema : ColumnSchema
1140
+ """
1141
+ if i < 0 or i >= len(self):
1142
+ raise IndexError('{0} out of bounds'.format(i))
1143
+
1144
+ return ColumnSchema(self, i)
1145
+
1146
+
1147
+ cdef class ColumnSchema(_Weakrefable):
1148
+ """Schema for a single column."""
1149
+ cdef:
1150
+ int index
1151
+ ParquetSchema parent
1152
+ const ColumnDescriptor* descr
1153
+
1154
+ def __cinit__(self, ParquetSchema schema, int index):
1155
+ self.parent = schema
1156
+ self.index = index # for pickling support
1157
+ self.descr = schema.schema.Column(index)
1158
+
1159
+ def __eq__(self, other):
1160
+ try:
1161
+ return self.equals(other)
1162
+ except TypeError:
1163
+ return NotImplemented
1164
+
1165
+ def __reduce__(self):
1166
+ return ColumnSchema, (self.parent, self.index)
1167
+
1168
+ def equals(self, ColumnSchema other):
1169
+ """
1170
+ Return whether the two column schemas are equal.
1171
+
1172
+ Parameters
1173
+ ----------
1174
+ other : ColumnSchema
1175
+ Schema to compare against.
1176
+
1177
+ Returns
1178
+ -------
1179
+ are_equal : bool
1180
+ """
1181
+ return self.descr.Equals(deref(other.descr))
1182
+
1183
+ def __repr__(self):
1184
+ physical_type = self.physical_type
1185
+ converted_type = self.converted_type
1186
+ if converted_type == 'DECIMAL':
1187
+ converted_type = 'DECIMAL({0}, {1})'.format(self.precision,
1188
+ self.scale)
1189
+ elif physical_type == 'FIXED_LEN_BYTE_ARRAY':
1190
+ converted_type = ('FIXED_LEN_BYTE_ARRAY(length={0})'
1191
+ .format(self.length))
1192
+
1193
+ return """<ParquetColumnSchema>
1194
+ name: {0}
1195
+ path: {1}
1196
+ max_definition_level: {2}
1197
+ max_repetition_level: {3}
1198
+ physical_type: {4}
1199
+ logical_type: {5}
1200
+ converted_type (legacy): {6}""".format(self.name, self.path,
1201
+ self.max_definition_level,
1202
+ self.max_repetition_level,
1203
+ physical_type,
1204
+ str(self.logical_type),
1205
+ converted_type)
1206
+
1207
+ @property
1208
+ def name(self):
1209
+ """Name of field (str)."""
1210
+ return frombytes(self.descr.name())
1211
+
1212
+ @property
1213
+ def path(self):
1214
+ """Nested path to field, separated by periods (str)."""
1215
+ return frombytes(self.descr.path().get().ToDotString())
1216
+
1217
+ @property
1218
+ def max_definition_level(self):
1219
+ """Maximum definition level (int)."""
1220
+ return self.descr.max_definition_level()
1221
+
1222
+ @property
1223
+ def max_repetition_level(self):
1224
+ """Maximum repetition level (int)."""
1225
+ return self.descr.max_repetition_level()
1226
+
1227
+ @property
1228
+ def physical_type(self):
1229
+ """Name of physical type (str)."""
1230
+ return physical_type_name_from_enum(self.descr.physical_type())
1231
+
1232
+ @property
1233
+ def logical_type(self):
1234
+ """Logical type of column (:class:`ParquetLogicalType`)."""
1235
+ return wrap_logical_type(self.descr.logical_type())
1236
+
1237
+ @property
1238
+ def converted_type(self):
1239
+ """Legacy converted type (str or None)."""
1240
+ return converted_type_name_from_enum(self.descr.converted_type())
1241
+
1242
+ # FIXED_LEN_BYTE_ARRAY attribute
1243
+ @property
1244
+ def length(self):
1245
+ """Array length if fixed length byte array type, None otherwise (int or None)."""
1246
+ return self.descr.type_length()
1247
+
1248
+ # Decimal attributes
1249
+ @property
1250
+ def precision(self):
1251
+ """Precision if decimal type, None otherwise (int or None)."""
1252
+ return self.descr.type_precision()
1253
+
1254
+ @property
1255
+ def scale(self):
1256
+ """Scale if decimal type, None otherwise (int or None)."""
1257
+ return self.descr.type_scale()
1258
+
1259
+
1260
+ cdef physical_type_name_from_enum(ParquetType type_):
1261
+ return {
1262
+ ParquetType_BOOLEAN: 'BOOLEAN',
1263
+ ParquetType_INT32: 'INT32',
1264
+ ParquetType_INT64: 'INT64',
1265
+ ParquetType_INT96: 'INT96',
1266
+ ParquetType_FLOAT: 'FLOAT',
1267
+ ParquetType_DOUBLE: 'DOUBLE',
1268
+ ParquetType_BYTE_ARRAY: 'BYTE_ARRAY',
1269
+ ParquetType_FIXED_LEN_BYTE_ARRAY: 'FIXED_LEN_BYTE_ARRAY',
1270
+ }.get(type_, 'UNKNOWN')
1271
+
1272
+
1273
+ cdef logical_type_name_from_enum(ParquetLogicalTypeId type_):
1274
+ return {
1275
+ ParquetLogicalType_UNDEFINED: 'UNDEFINED',
1276
+ ParquetLogicalType_STRING: 'STRING',
1277
+ ParquetLogicalType_MAP: 'MAP',
1278
+ ParquetLogicalType_LIST: 'LIST',
1279
+ ParquetLogicalType_ENUM: 'ENUM',
1280
+ ParquetLogicalType_DECIMAL: 'DECIMAL',
1281
+ ParquetLogicalType_DATE: 'DATE',
1282
+ ParquetLogicalType_TIME: 'TIME',
1283
+ ParquetLogicalType_TIMESTAMP: 'TIMESTAMP',
1284
+ ParquetLogicalType_INT: 'INT',
1285
+ ParquetLogicalType_JSON: 'JSON',
1286
+ ParquetLogicalType_BSON: 'BSON',
1287
+ ParquetLogicalType_UUID: 'UUID',
1288
+ ParquetLogicalType_NONE: 'NONE',
1289
+ }.get(type_, 'UNKNOWN')
1290
+
1291
+
1292
+ cdef converted_type_name_from_enum(ParquetConvertedType type_):
1293
+ return {
1294
+ ParquetConvertedType_NONE: 'NONE',
1295
+ ParquetConvertedType_UTF8: 'UTF8',
1296
+ ParquetConvertedType_MAP: 'MAP',
1297
+ ParquetConvertedType_MAP_KEY_VALUE: 'MAP_KEY_VALUE',
1298
+ ParquetConvertedType_LIST: 'LIST',
1299
+ ParquetConvertedType_ENUM: 'ENUM',
1300
+ ParquetConvertedType_DECIMAL: 'DECIMAL',
1301
+ ParquetConvertedType_DATE: 'DATE',
1302
+ ParquetConvertedType_TIME_MILLIS: 'TIME_MILLIS',
1303
+ ParquetConvertedType_TIME_MICROS: 'TIME_MICROS',
1304
+ ParquetConvertedType_TIMESTAMP_MILLIS: 'TIMESTAMP_MILLIS',
1305
+ ParquetConvertedType_TIMESTAMP_MICROS: 'TIMESTAMP_MICROS',
1306
+ ParquetConvertedType_UINT_8: 'UINT_8',
1307
+ ParquetConvertedType_UINT_16: 'UINT_16',
1308
+ ParquetConvertedType_UINT_32: 'UINT_32',
1309
+ ParquetConvertedType_UINT_64: 'UINT_64',
1310
+ ParquetConvertedType_INT_8: 'INT_8',
1311
+ ParquetConvertedType_INT_16: 'INT_16',
1312
+ ParquetConvertedType_INT_32: 'INT_32',
1313
+ ParquetConvertedType_INT_64: 'INT_64',
1314
+ ParquetConvertedType_JSON: 'JSON',
1315
+ ParquetConvertedType_BSON: 'BSON',
1316
+ ParquetConvertedType_INTERVAL: 'INTERVAL',
1317
+ }.get(type_, 'UNKNOWN')
1318
+
1319
+
1320
+ cdef encoding_name_from_enum(ParquetEncoding encoding_):
1321
+ return {
1322
+ ParquetEncoding_PLAIN: 'PLAIN',
1323
+ ParquetEncoding_PLAIN_DICTIONARY: 'PLAIN_DICTIONARY',
1324
+ ParquetEncoding_RLE: 'RLE',
1325
+ ParquetEncoding_BIT_PACKED: 'BIT_PACKED',
1326
+ ParquetEncoding_DELTA_BINARY_PACKED: 'DELTA_BINARY_PACKED',
1327
+ ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY: 'DELTA_LENGTH_BYTE_ARRAY',
1328
+ ParquetEncoding_DELTA_BYTE_ARRAY: 'DELTA_BYTE_ARRAY',
1329
+ ParquetEncoding_RLE_DICTIONARY: 'RLE_DICTIONARY',
1330
+ ParquetEncoding_BYTE_STREAM_SPLIT: 'BYTE_STREAM_SPLIT',
1331
+ }.get(encoding_, 'UNKNOWN')
1332
+
1333
+
1334
+ cdef encoding_enum_from_name(str encoding_name):
1335
+ enc = {
1336
+ 'PLAIN': ParquetEncoding_PLAIN,
1337
+ 'BIT_PACKED': ParquetEncoding_BIT_PACKED,
1338
+ 'RLE': ParquetEncoding_RLE,
1339
+ 'BYTE_STREAM_SPLIT': ParquetEncoding_BYTE_STREAM_SPLIT,
1340
+ 'DELTA_BINARY_PACKED': ParquetEncoding_DELTA_BINARY_PACKED,
1341
+ 'DELTA_LENGTH_BYTE_ARRAY': ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY,
1342
+ 'DELTA_BYTE_ARRAY': ParquetEncoding_DELTA_BYTE_ARRAY,
1343
+ 'RLE_DICTIONARY': 'dict',
1344
+ 'PLAIN_DICTIONARY': 'dict',
1345
+ }.get(encoding_name, None)
1346
+ if enc is None:
1347
+ raise ValueError(f"Unsupported column encoding: {encoding_name!r}")
1348
+ elif enc == 'dict':
1349
+ raise ValueError(f"{encoding_name!r} is already used by default.")
1350
+ else:
1351
+ return enc
1352
+
1353
+
1354
+ cdef compression_name_from_enum(ParquetCompression compression_):
1355
+ return {
1356
+ ParquetCompression_UNCOMPRESSED: 'UNCOMPRESSED',
1357
+ ParquetCompression_SNAPPY: 'SNAPPY',
1358
+ ParquetCompression_GZIP: 'GZIP',
1359
+ ParquetCompression_LZO: 'LZO',
1360
+ ParquetCompression_BROTLI: 'BROTLI',
1361
+ ParquetCompression_LZ4: 'LZ4',
1362
+ ParquetCompression_ZSTD: 'ZSTD',
1363
+ }.get(compression_, 'UNKNOWN')
1364
+
1365
+
1366
+ cdef int check_compression_name(name) except -1:
1367
+ if name.upper() not in {'NONE', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4',
1368
+ 'ZSTD'}:
1369
+ raise ArrowException("Unsupported compression: " + name)
1370
+ return 0
1371
+
1372
+
1373
+ cdef ParquetCompression compression_from_name(name):
1374
+ name = name.upper()
1375
+ if name == 'SNAPPY':
1376
+ return ParquetCompression_SNAPPY
1377
+ elif name == 'GZIP':
1378
+ return ParquetCompression_GZIP
1379
+ elif name == 'LZO':
1380
+ return ParquetCompression_LZO
1381
+ elif name == 'BROTLI':
1382
+ return ParquetCompression_BROTLI
1383
+ elif name == 'LZ4':
1384
+ return ParquetCompression_LZ4
1385
+ elif name == 'ZSTD':
1386
+ return ParquetCompression_ZSTD
1387
+ else:
1388
+ return ParquetCompression_UNCOMPRESSED
1389
+
1390
+
1391
+ cdef class ParquetReader(_Weakrefable):
1392
+ cdef:
1393
+ object source
1394
+ CMemoryPool* pool
1395
+ UniquePtrNoGIL[FileReader] reader
1396
+ FileMetaData _metadata
1397
+ shared_ptr[CRandomAccessFile] rd_handle
1398
+
1399
+ cdef public:
1400
+ _column_idx_map
1401
+
1402
+ def __cinit__(self, MemoryPool memory_pool=None):
1403
+ self.pool = maybe_unbox_memory_pool(memory_pool)
1404
+ self._metadata = None
1405
+
1406
+ def open(self, object source not None, *, bint use_memory_map=False,
1407
+ read_dictionary=None, FileMetaData metadata=None,
1408
+ int buffer_size=0, bint pre_buffer=False,
1409
+ coerce_int96_timestamp_unit=None,
1410
+ FileDecryptionProperties decryption_properties=None,
1411
+ thrift_string_size_limit=None,
1412
+ thrift_container_size_limit=None,
1413
+ page_checksum_verification=False):
1414
+ """
1415
+ Open a parquet file for reading.
1416
+
1417
+ Parameters
1418
+ ----------
1419
+ source : str, pathlib.Path, pyarrow.NativeFile, or file-like object
1420
+ use_memory_map : bool, default False
1421
+ read_dictionary : iterable[int or str], optional
1422
+ metadata : FileMetaData, optional
1423
+ buffer_size : int, default 0
1424
+ pre_buffer : bool, default False
1425
+ coerce_int96_timestamp_unit : str, optional
1426
+ decryption_properties : FileDecryptionProperties, optional
1427
+ thrift_string_size_limit : int, optional
1428
+ thrift_container_size_limit : int, optional
1429
+ page_checksum_verification : bool, default False
1430
+ """
1431
+ cdef:
1432
+ shared_ptr[CFileMetaData] c_metadata
1433
+ CReaderProperties properties = default_reader_properties()
1434
+ ArrowReaderProperties arrow_props = (
1435
+ default_arrow_reader_properties())
1436
+ FileReaderBuilder builder
1437
+
1438
+ if metadata is not None:
1439
+ c_metadata = metadata.sp_metadata
1440
+
1441
+ if buffer_size > 0:
1442
+ properties.enable_buffered_stream()
1443
+ properties.set_buffer_size(buffer_size)
1444
+ elif buffer_size == 0:
1445
+ properties.disable_buffered_stream()
1446
+ else:
1447
+ raise ValueError('Buffer size must be larger than zero')
1448
+
1449
+ if thrift_string_size_limit is not None:
1450
+ if thrift_string_size_limit <= 0:
1451
+ raise ValueError("thrift_string_size_limit "
1452
+ "must be larger than zero")
1453
+ properties.set_thrift_string_size_limit(thrift_string_size_limit)
1454
+ if thrift_container_size_limit is not None:
1455
+ if thrift_container_size_limit <= 0:
1456
+ raise ValueError("thrift_container_size_limit "
1457
+ "must be larger than zero")
1458
+ properties.set_thrift_container_size_limit(
1459
+ thrift_container_size_limit)
1460
+
1461
+ if decryption_properties is not None:
1462
+ properties.file_decryption_properties(
1463
+ decryption_properties.unwrap())
1464
+
1465
+ arrow_props.set_pre_buffer(pre_buffer)
1466
+
1467
+ properties.set_page_checksum_verification(page_checksum_verification)
1468
+
1469
+ if coerce_int96_timestamp_unit is None:
1470
+ # use the default defined in default_arrow_reader_properties()
1471
+ pass
1472
+ else:
1473
+ arrow_props.set_coerce_int96_timestamp_unit(
1474
+ string_to_timeunit(coerce_int96_timestamp_unit))
1475
+
1476
+ self.source = source
1477
+ get_reader(source, use_memory_map, &self.rd_handle)
1478
+
1479
+ with nogil:
1480
+ check_status(builder.Open(self.rd_handle, properties, c_metadata))
1481
+
1482
+ # Set up metadata
1483
+ with nogil:
1484
+ c_metadata = builder.raw_reader().metadata()
1485
+ self._metadata = result = FileMetaData()
1486
+ result.init(c_metadata)
1487
+
1488
+ if read_dictionary is not None:
1489
+ self._set_read_dictionary(read_dictionary, &arrow_props)
1490
+
1491
+ with nogil:
1492
+ check_status(builder.memory_pool(self.pool)
1493
+ .properties(arrow_props)
1494
+ .Build(&self.reader))
1495
+
1496
+ cdef _set_read_dictionary(self, read_dictionary,
1497
+ ArrowReaderProperties* props):
1498
+ for column in read_dictionary:
1499
+ if not isinstance(column, int):
1500
+ column = self.column_name_idx(column)
1501
+ props.set_read_dictionary(column, True)
1502
+
1503
+ @property
1504
+ def column_paths(self):
1505
+ cdef:
1506
+ FileMetaData container = self.metadata
1507
+ const CFileMetaData* metadata = container._metadata
1508
+ vector[c_string] path
1509
+ int i = 0
1510
+
1511
+ paths = []
1512
+ for i in range(0, metadata.num_columns()):
1513
+ path = (metadata.schema().Column(i)
1514
+ .path().get().ToDotVector())
1515
+ paths.append([frombytes(x) for x in path])
1516
+
1517
+ return paths
1518
+
1519
+ @property
1520
+ def metadata(self):
1521
+ return self._metadata
1522
+
1523
+ @property
1524
+ def schema_arrow(self):
1525
+ cdef shared_ptr[CSchema] out
1526
+ with nogil:
1527
+ check_status(self.reader.get().GetSchema(&out))
1528
+ return pyarrow_wrap_schema(out)
1529
+
1530
+ @property
1531
+ def num_row_groups(self):
1532
+ return self.reader.get().num_row_groups()
1533
+
1534
+ def set_use_threads(self, bint use_threads):
1535
+ """
1536
+ Parameters
1537
+ ----------
1538
+ use_threads : bool
1539
+ """
1540
+ self.reader.get().set_use_threads(use_threads)
1541
+
1542
+ def set_batch_size(self, int64_t batch_size):
1543
+ """
1544
+ Parameters
1545
+ ----------
1546
+ batch_size : int64
1547
+ """
1548
+ self.reader.get().set_batch_size(batch_size)
1549
+
1550
+ def iter_batches(self, int64_t batch_size, row_groups, column_indices=None,
1551
+ bint use_threads=True):
1552
+ """
1553
+ Parameters
1554
+ ----------
1555
+ batch_size : int64
1556
+ row_groups : list[int]
1557
+ column_indices : list[int], optional
1558
+ use_threads : bool, default True
1559
+
1560
+ Yields
1561
+ ------
1562
+ next : RecordBatch
1563
+ """
1564
+ cdef:
1565
+ vector[int] c_row_groups
1566
+ vector[int] c_column_indices
1567
+ shared_ptr[CRecordBatch] record_batch
1568
+ UniquePtrNoGIL[CRecordBatchReader] recordbatchreader
1569
+
1570
+ self.set_batch_size(batch_size)
1571
+
1572
+ if use_threads:
1573
+ self.set_use_threads(use_threads)
1574
+
1575
+ for row_group in row_groups:
1576
+ c_row_groups.push_back(row_group)
1577
+
1578
+ if column_indices is not None:
1579
+ for index in column_indices:
1580
+ c_column_indices.push_back(index)
1581
+ with nogil:
1582
+ check_status(
1583
+ self.reader.get().GetRecordBatchReader(
1584
+ c_row_groups, c_column_indices, &recordbatchreader
1585
+ )
1586
+ )
1587
+ else:
1588
+ with nogil:
1589
+ check_status(
1590
+ self.reader.get().GetRecordBatchReader(
1591
+ c_row_groups, &recordbatchreader
1592
+ )
1593
+ )
1594
+
1595
+ while True:
1596
+ with nogil:
1597
+ check_status(
1598
+ recordbatchreader.get().ReadNext(&record_batch)
1599
+ )
1600
+ if record_batch.get() == NULL:
1601
+ break
1602
+
1603
+ yield pyarrow_wrap_batch(record_batch)
1604
+
1605
+ def read_row_group(self, int i, column_indices=None,
1606
+ bint use_threads=True):
1607
+ """
1608
+ Parameters
1609
+ ----------
1610
+ i : int
1611
+ column_indices : list[int], optional
1612
+ use_threads : bool, default True
1613
+
1614
+ Returns
1615
+ -------
1616
+ table : pyarrow.Table
1617
+ """
1618
+ return self.read_row_groups([i], column_indices, use_threads)
1619
+
1620
+ def read_row_groups(self, row_groups not None, column_indices=None,
1621
+ bint use_threads=True):
1622
+ """
1623
+ Parameters
1624
+ ----------
1625
+ row_groups : list[int]
1626
+ column_indices : list[int], optional
1627
+ use_threads : bool, default True
1628
+
1629
+ Returns
1630
+ -------
1631
+ table : pyarrow.Table
1632
+ """
1633
+ cdef:
1634
+ shared_ptr[CTable] ctable
1635
+ vector[int] c_row_groups
1636
+ vector[int] c_column_indices
1637
+
1638
+ self.set_use_threads(use_threads)
1639
+
1640
+ for row_group in row_groups:
1641
+ c_row_groups.push_back(row_group)
1642
+
1643
+ if column_indices is not None:
1644
+ for index in column_indices:
1645
+ c_column_indices.push_back(index)
1646
+
1647
+ with nogil:
1648
+ check_status(self.reader.get()
1649
+ .ReadRowGroups(c_row_groups, c_column_indices,
1650
+ &ctable))
1651
+ else:
1652
+ # Read all columns
1653
+ with nogil:
1654
+ check_status(self.reader.get()
1655
+ .ReadRowGroups(c_row_groups, &ctable))
1656
+ return pyarrow_wrap_table(ctable)
1657
+
1658
+ def read_all(self, column_indices=None, bint use_threads=True):
1659
+ """
1660
+ Parameters
1661
+ ----------
1662
+ column_indices : list[int], optional
1663
+ use_threads : bool, default True
1664
+
1665
+ Returns
1666
+ -------
1667
+ table : pyarrow.Table
1668
+ """
1669
+ cdef:
1670
+ shared_ptr[CTable] ctable
1671
+ vector[int] c_column_indices
1672
+
1673
+ self.set_use_threads(use_threads)
1674
+
1675
+ if column_indices is not None:
1676
+ for index in column_indices:
1677
+ c_column_indices.push_back(index)
1678
+
1679
+ with nogil:
1680
+ check_status(self.reader.get()
1681
+ .ReadTable(c_column_indices, &ctable))
1682
+ else:
1683
+ # Read all columns
1684
+ with nogil:
1685
+ check_status(self.reader.get()
1686
+ .ReadTable(&ctable))
1687
+ return pyarrow_wrap_table(ctable)
1688
+
1689
+ def scan_contents(self, column_indices=None, batch_size=65536):
1690
+ """
1691
+ Parameters
1692
+ ----------
1693
+ column_indices : list[int], optional
1694
+ batch_size : int32, default 65536
1695
+
1696
+ Returns
1697
+ -------
1698
+ num_rows : int64
1699
+ """
1700
+ cdef:
1701
+ vector[int] c_column_indices
1702
+ int32_t c_batch_size
1703
+ int64_t c_num_rows
1704
+
1705
+ if column_indices is not None:
1706
+ for index in column_indices:
1707
+ c_column_indices.push_back(index)
1708
+
1709
+ c_batch_size = batch_size
1710
+
1711
+ with nogil:
1712
+ check_status(self.reader.get()
1713
+ .ScanContents(c_column_indices, c_batch_size,
1714
+ &c_num_rows))
1715
+
1716
+ return c_num_rows
1717
+
1718
+ def column_name_idx(self, column_name):
1719
+ """
1720
+ Find the index of a column by its name.
1721
+
1722
+ Parameters
1723
+ ----------
1724
+ column_name : str
1725
+ Name of the column; separation of nesting levels is done via ".".
1726
+
1727
+ Returns
1728
+ -------
1729
+ column_idx : int
1730
+ Integer index of the column in the schema.
1731
+ """
1732
+ cdef:
1733
+ FileMetaData container = self.metadata
1734
+ const CFileMetaData* metadata = container._metadata
1735
+ int i = 0
1736
+
1737
+ if self._column_idx_map is None:
1738
+ self._column_idx_map = {}
1739
+ for i in range(0, metadata.num_columns()):
1740
+ col_bytes = tobytes(metadata.schema().Column(i)
1741
+ .path().get().ToDotString())
1742
+ self._column_idx_map[col_bytes] = i
1743
+
1744
+ return self._column_idx_map[tobytes(column_name)]
1745
+
1746
+ def read_column(self, int column_index):
1747
+ """
1748
+ Read the column at the specified index.
1749
+
1750
+ Parameters
1751
+ ----------
1752
+ column_index : int
1753
+ Index of the column.
1754
+
1755
+ Returns
1756
+ -------
1757
+ column : pyarrow.ChunkedArray
1758
+ """
1759
+ cdef shared_ptr[CChunkedArray] out
1760
+ with nogil:
1761
+ check_status(self.reader.get()
1762
+ .ReadColumn(column_index, &out))
1763
+ return pyarrow_wrap_chunked_array(out)
1764
+
1765
+ def close(self):
1766
+ if not self.closed:
1767
+ with nogil:
1768
+ check_status(self.rd_handle.get().Close())
1769
+
1770
+ @property
1771
+ def closed(self):
1772
+ if self.rd_handle == NULL:
1773
+ return True
1774
+ with nogil:
1775
+ closed = self.rd_handle.get().closed()
1776
+ return closed
1777
+
1778
+
1779
+ cdef CSortingColumn _convert_sorting_column(SortingColumn sorting_column):
1780
+ cdef CSortingColumn c_sorting_column
1781
+
1782
+ c_sorting_column.column_idx = sorting_column.column_index
1783
+ c_sorting_column.descending = sorting_column.descending
1784
+ c_sorting_column.nulls_first = sorting_column.nulls_first
1785
+
1786
+ return c_sorting_column
1787
+
1788
+
1789
+ cdef vector[CSortingColumn] _convert_sorting_columns(sorting_columns) except *:
1790
+ if not (isinstance(sorting_columns, Sequence)
1791
+ and all(isinstance(col, SortingColumn) for col in sorting_columns)):
1792
+ raise ValueError(
1793
+ "'sorting_columns' must be a list of `SortingColumn`")
1794
+
1795
+ cdef vector[CSortingColumn] c_sorting_columns = [_convert_sorting_column(col)
1796
+ for col in sorting_columns]
1797
+
1798
+ return c_sorting_columns
1799
+
1800
+
1801
+ cdef shared_ptr[WriterProperties] _create_writer_properties(
1802
+ use_dictionary=None,
1803
+ compression=None,
1804
+ version=None,
1805
+ write_statistics=None,
1806
+ data_page_size=None,
1807
+ compression_level=None,
1808
+ use_byte_stream_split=False,
1809
+ column_encoding=None,
1810
+ data_page_version=None,
1811
+ FileEncryptionProperties encryption_properties=None,
1812
+ write_batch_size=None,
1813
+ dictionary_pagesize_limit=None,
1814
+ write_page_index=False,
1815
+ write_page_checksum=False,
1816
+ sorting_columns=None) except *:
1817
+ """General writer properties"""
1818
+ cdef:
1819
+ shared_ptr[WriterProperties] properties
1820
+ WriterProperties.Builder props
1821
+
1822
+ # data_page_version
1823
+
1824
+ if data_page_version is not None:
1825
+ if data_page_version == "1.0":
1826
+ props.data_page_version(ParquetDataPageVersion_V1)
1827
+ elif data_page_version == "2.0":
1828
+ props.data_page_version(ParquetDataPageVersion_V2)
1829
+ else:
1830
+ raise ValueError("Unsupported Parquet data page version: {0}"
1831
+ .format(data_page_version))
1832
+
1833
+ # version
1834
+
1835
+ if version is not None:
1836
+ if version == "1.0":
1837
+ props.version(ParquetVersion_V1)
1838
+ elif version in ("2.0", "pseudo-2.0"):
1839
+ warnings.warn(
1840
+ "Parquet format '2.0' pseudo version is deprecated, use "
1841
+ "'2.4' or '2.6' for fine-grained feature selection",
1842
+ FutureWarning, stacklevel=2)
1843
+ props.version(ParquetVersion_V2_0)
1844
+ elif version == "2.4":
1845
+ props.version(ParquetVersion_V2_4)
1846
+ elif version == "2.6":
1847
+ props.version(ParquetVersion_V2_6)
1848
+ else:
1849
+ raise ValueError("Unsupported Parquet format version: {0}"
1850
+ .format(version))
1851
+
1852
+ # compression
1853
+
1854
+ if isinstance(compression, basestring):
1855
+ check_compression_name(compression)
1856
+ props.compression(compression_from_name(compression))
1857
+ elif compression is not None:
1858
+ for column, codec in compression.iteritems():
1859
+ check_compression_name(codec)
1860
+ props.compression(tobytes(column), compression_from_name(codec))
1861
+
1862
+ if isinstance(compression_level, int):
1863
+ props.compression_level(compression_level)
1864
+ elif compression_level is not None:
1865
+ for column, level in compression_level.iteritems():
1866
+ props.compression_level(tobytes(column), level)
1867
+
1868
+ # use_dictionary
1869
+
1870
+ if isinstance(use_dictionary, bool):
1871
+ if use_dictionary:
1872
+ props.enable_dictionary()
1873
+ if column_encoding is not None:
1874
+ raise ValueError(
1875
+ "To use 'column_encoding' set 'use_dictionary' to False")
1876
+ else:
1877
+ props.disable_dictionary()
1878
+ elif use_dictionary is not None:
1879
+ # Deactivate dictionary encoding by default
1880
+ props.disable_dictionary()
1881
+ for column in use_dictionary:
1882
+ props.enable_dictionary(tobytes(column))
1883
+ if (column_encoding is not None and
1884
+ column_encoding.get(column) is not None):
1885
+ raise ValueError(
1886
+ "To use 'column_encoding' set 'use_dictionary' to False")
1887
+
1888
+ # write_statistics
1889
+
1890
+ if isinstance(write_statistics, bool):
1891
+ if write_statistics:
1892
+ props.enable_statistics()
1893
+ else:
1894
+ props.disable_statistics()
1895
+ elif write_statistics is not None:
1896
+ # Deactivate statistics by default and enable for specified columns
1897
+ props.disable_statistics()
1898
+ for column in write_statistics:
1899
+ props.enable_statistics(tobytes(column))
1900
+
1901
+ # sorting_columns
1902
+
1903
+ if sorting_columns is not None:
1904
+ props.set_sorting_columns(_convert_sorting_columns(sorting_columns))
1905
+
1906
+ # use_byte_stream_split
1907
+
1908
+ if isinstance(use_byte_stream_split, bool):
1909
+ if use_byte_stream_split:
1910
+ if column_encoding is not None:
1911
+ raise ValueError(
1912
+ "'use_byte_stream_split' cannot be passed"
1913
+ "together with 'column_encoding'")
1914
+ else:
1915
+ props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT)
1916
+ elif use_byte_stream_split is not None:
1917
+ for column in use_byte_stream_split:
1918
+ if column_encoding is None:
1919
+ column_encoding = {column: 'BYTE_STREAM_SPLIT'}
1920
+ elif column_encoding.get(column, None) is None:
1921
+ column_encoding[column] = 'BYTE_STREAM_SPLIT'
1922
+ else:
1923
+ raise ValueError(
1924
+ "'use_byte_stream_split' cannot be passed"
1925
+ "together with 'column_encoding'")
1926
+
1927
+ # column_encoding
1928
+ # encoding map - encode individual columns
1929
+
1930
+ if column_encoding is not None:
1931
+ if isinstance(column_encoding, dict):
1932
+ for column, _encoding in column_encoding.items():
1933
+ props.encoding(tobytes(column),
1934
+ encoding_enum_from_name(_encoding))
1935
+ elif isinstance(column_encoding, str):
1936
+ props.encoding(encoding_enum_from_name(column_encoding))
1937
+ else:
1938
+ raise TypeError(
1939
+ "'column_encoding' should be a dictionary or a string")
1940
+
1941
+ if data_page_size is not None:
1942
+ props.data_pagesize(data_page_size)
1943
+
1944
+ if write_batch_size is not None:
1945
+ props.write_batch_size(write_batch_size)
1946
+
1947
+ if dictionary_pagesize_limit is not None:
1948
+ props.dictionary_pagesize_limit(dictionary_pagesize_limit)
1949
+
1950
+ # encryption
1951
+
1952
+ if encryption_properties is not None:
1953
+ props.encryption(
1954
+ (<FileEncryptionProperties>encryption_properties).unwrap())
1955
+
1956
+ # For backwards compatibility reasons we cap the maximum row group size
1957
+ # at 64Mi rows. This could be changed in the future, though it would be
1958
+ # a breaking change.
1959
+ #
1960
+ # The user can always specify a smaller row group size (and the default
1961
+ # is smaller) when calling write_table. If the call to write_table uses
1962
+ # a size larger than this then it will be latched to this value.
1963
+ props.max_row_group_length(_MAX_ROW_GROUP_SIZE)
1964
+
1965
+ # checksum
1966
+
1967
+ if write_page_checksum:
1968
+ props.enable_page_checksum()
1969
+ else:
1970
+ props.disable_page_checksum()
1971
+
1972
+ # page index
1973
+
1974
+ if write_page_index:
1975
+ props.enable_write_page_index()
1976
+ else:
1977
+ props.disable_write_page_index()
1978
+
1979
+ properties = props.build()
1980
+
1981
+ return properties
1982
+
1983
+
1984
+ cdef shared_ptr[ArrowWriterProperties] _create_arrow_writer_properties(
1985
+ use_deprecated_int96_timestamps=False,
1986
+ coerce_timestamps=None,
1987
+ allow_truncated_timestamps=False,
1988
+ writer_engine_version=None,
1989
+ use_compliant_nested_type=True,
1990
+ store_schema=True) except *:
1991
+ """Arrow writer properties"""
1992
+ cdef:
1993
+ shared_ptr[ArrowWriterProperties] arrow_properties
1994
+ ArrowWriterProperties.Builder arrow_props
1995
+
1996
+ # Store the original Arrow schema so things like dictionary types can
1997
+ # be automatically reconstructed
1998
+ if store_schema:
1999
+ arrow_props.store_schema()
2000
+
2001
+ # int96 support
2002
+
2003
+ if use_deprecated_int96_timestamps:
2004
+ arrow_props.enable_deprecated_int96_timestamps()
2005
+ else:
2006
+ arrow_props.disable_deprecated_int96_timestamps()
2007
+
2008
+ # coerce_timestamps
2009
+
2010
+ if coerce_timestamps == 'ms':
2011
+ arrow_props.coerce_timestamps(TimeUnit_MILLI)
2012
+ elif coerce_timestamps == 'us':
2013
+ arrow_props.coerce_timestamps(TimeUnit_MICRO)
2014
+ elif coerce_timestamps is not None:
2015
+ raise ValueError('Invalid value for coerce_timestamps: {0}'
2016
+ .format(coerce_timestamps))
2017
+
2018
+ # allow_truncated_timestamps
2019
+
2020
+ if allow_truncated_timestamps:
2021
+ arrow_props.allow_truncated_timestamps()
2022
+ else:
2023
+ arrow_props.disallow_truncated_timestamps()
2024
+
2025
+ # use_compliant_nested_type
2026
+
2027
+ if use_compliant_nested_type:
2028
+ arrow_props.enable_compliant_nested_types()
2029
+ else:
2030
+ arrow_props.disable_compliant_nested_types()
2031
+
2032
+ # writer_engine_version
2033
+
2034
+ if writer_engine_version == "V1":
2035
+ warnings.warn("V1 parquet writer engine is a no-op. Use V2.")
2036
+ arrow_props.set_engine_version(ArrowWriterEngineVersion.V1)
2037
+ elif writer_engine_version != "V2":
2038
+ raise ValueError("Unsupported Writer Engine Version: {0}"
2039
+ .format(writer_engine_version))
2040
+
2041
+ arrow_properties = arrow_props.build()
2042
+
2043
+ return arrow_properties
2044
+
2045
+ cdef _name_to_index_map(Schema arrow_schema):
2046
+ cdef:
2047
+ shared_ptr[CSchema] sp_arrow_schema
2048
+ shared_ptr[SchemaDescriptor] sp_parquet_schema
2049
+ shared_ptr[WriterProperties] props = _create_writer_properties()
2050
+ shared_ptr[ArrowWriterProperties] arrow_props = _create_arrow_writer_properties(
2051
+ use_deprecated_int96_timestamps=False,
2052
+ coerce_timestamps=None,
2053
+ allow_truncated_timestamps=False,
2054
+ writer_engine_version="V2"
2055
+ )
2056
+
2057
+ sp_arrow_schema = pyarrow_unwrap_schema(arrow_schema)
2058
+
2059
+ with nogil:
2060
+ check_status(ToParquetSchema(
2061
+ sp_arrow_schema.get(), deref(props.get()), deref(arrow_props.get()), &sp_parquet_schema))
2062
+
2063
+ out = dict()
2064
+
2065
+ cdef SchemaDescriptor* parquet_schema = sp_parquet_schema.get()
2066
+
2067
+ for i in range(parquet_schema.num_columns()):
2068
+ name = frombytes(parquet_schema.Column(i).path().get().ToDotString())
2069
+ out[name] = i
2070
+
2071
+ return out
2072
+
2073
+
2074
+ cdef class ParquetWriter(_Weakrefable):
2075
+ cdef:
2076
+ unique_ptr[FileWriter] writer
2077
+ shared_ptr[COutputStream] sink
2078
+ bint own_sink
2079
+
2080
+ cdef readonly:
2081
+ object use_dictionary
2082
+ object use_deprecated_int96_timestamps
2083
+ object use_byte_stream_split
2084
+ object column_encoding
2085
+ object coerce_timestamps
2086
+ object allow_truncated_timestamps
2087
+ object compression
2088
+ object compression_level
2089
+ object data_page_version
2090
+ object use_compliant_nested_type
2091
+ object version
2092
+ object write_statistics
2093
+ object writer_engine_version
2094
+ int row_group_size
2095
+ int64_t data_page_size
2096
+ FileEncryptionProperties encryption_properties
2097
+ int64_t write_batch_size
2098
+ int64_t dictionary_pagesize_limit
2099
+ object store_schema
2100
+
2101
+ def __cinit__(self, where, Schema schema not None, use_dictionary=None,
2102
+ compression=None, version=None,
2103
+ write_statistics=None,
2104
+ MemoryPool memory_pool=None,
2105
+ use_deprecated_int96_timestamps=False,
2106
+ coerce_timestamps=None,
2107
+ data_page_size=None,
2108
+ allow_truncated_timestamps=False,
2109
+ compression_level=None,
2110
+ use_byte_stream_split=False,
2111
+ column_encoding=None,
2112
+ writer_engine_version=None,
2113
+ data_page_version=None,
2114
+ use_compliant_nested_type=True,
2115
+ encryption_properties=None,
2116
+ write_batch_size=None,
2117
+ dictionary_pagesize_limit=None,
2118
+ store_schema=True,
2119
+ write_page_index=False,
2120
+ write_page_checksum=False,
2121
+ sorting_columns=None):
2122
+ cdef:
2123
+ shared_ptr[WriterProperties] properties
2124
+ shared_ptr[ArrowWriterProperties] arrow_properties
2125
+ c_string c_where
2126
+ CMemoryPool* pool
2127
+
2128
+ try:
2129
+ where = _stringify_path(where)
2130
+ except TypeError:
2131
+ get_writer(where, &self.sink)
2132
+ self.own_sink = False
2133
+ else:
2134
+ c_where = tobytes(where)
2135
+ with nogil:
2136
+ self.sink = GetResultValue(FileOutputStream.Open(c_where))
2137
+ self.own_sink = True
2138
+
2139
+ properties = _create_writer_properties(
2140
+ use_dictionary=use_dictionary,
2141
+ compression=compression,
2142
+ version=version,
2143
+ write_statistics=write_statistics,
2144
+ data_page_size=data_page_size,
2145
+ compression_level=compression_level,
2146
+ use_byte_stream_split=use_byte_stream_split,
2147
+ column_encoding=column_encoding,
2148
+ data_page_version=data_page_version,
2149
+ encryption_properties=encryption_properties,
2150
+ write_batch_size=write_batch_size,
2151
+ dictionary_pagesize_limit=dictionary_pagesize_limit,
2152
+ write_page_index=write_page_index,
2153
+ write_page_checksum=write_page_checksum,
2154
+ sorting_columns=sorting_columns,
2155
+ )
2156
+ arrow_properties = _create_arrow_writer_properties(
2157
+ use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
2158
+ coerce_timestamps=coerce_timestamps,
2159
+ allow_truncated_timestamps=allow_truncated_timestamps,
2160
+ writer_engine_version=writer_engine_version,
2161
+ use_compliant_nested_type=use_compliant_nested_type,
2162
+ store_schema=store_schema,
2163
+ )
2164
+
2165
+ pool = maybe_unbox_memory_pool(memory_pool)
2166
+ with nogil:
2167
+ self.writer = move(GetResultValue(
2168
+ FileWriter.Open(deref(schema.schema), pool,
2169
+ self.sink, properties, arrow_properties)))
2170
+
2171
+ def close(self):
2172
+ with nogil:
2173
+ check_status(self.writer.get().Close())
2174
+ if self.own_sink:
2175
+ check_status(self.sink.get().Close())
2176
+
2177
+ def write_table(self, Table table, row_group_size=None):
2178
+ cdef:
2179
+ CTable* ctable = table.table
2180
+ int64_t c_row_group_size
2181
+
2182
+ if row_group_size is None or row_group_size == -1:
2183
+ c_row_group_size = min(ctable.num_rows(), _DEFAULT_ROW_GROUP_SIZE)
2184
+ elif row_group_size == 0:
2185
+ raise ValueError('Row group size cannot be 0')
2186
+ else:
2187
+ c_row_group_size = row_group_size
2188
+
2189
+ with nogil:
2190
+ check_status(self.writer.get()
2191
+ .WriteTable(deref(ctable), c_row_group_size))
2192
+
2193
+ @property
2194
+ def metadata(self):
2195
+ cdef:
2196
+ shared_ptr[CFileMetaData] metadata
2197
+ FileMetaData result
2198
+ with nogil:
2199
+ metadata = self.writer.get().metadata()
2200
+ if metadata:
2201
+ result = FileMetaData()
2202
+ result.init(metadata)
2203
+ return result
2204
+ raise RuntimeError(
2205
+ 'file metadata is only available after writer close')
venv/lib/python3.10/site-packages/pyarrow/_parquet_encryption.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (280 kB). View file
 
venv/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # distutils: language = c++
19
+ # cython: language_level = 3
20
+
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libparquet_encryption cimport *
23
+ from pyarrow._parquet cimport (ParquetCipher,
24
+ CFileEncryptionProperties,
25
+ CFileDecryptionProperties,
26
+ FileEncryptionProperties,
27
+ FileDecryptionProperties,
28
+ ParquetCipher_AES_GCM_V1,
29
+ ParquetCipher_AES_GCM_CTR_V1)
30
+ from pyarrow.lib cimport _Weakrefable
31
+
32
+ cdef class CryptoFactory(_Weakrefable):
33
+ cdef shared_ptr[CPyCryptoFactory] factory
34
+ cdef init(self, callable_client_factory)
35
+ cdef inline shared_ptr[CPyCryptoFactory] unwrap(self)
36
+
37
+ cdef class EncryptionConfiguration(_Weakrefable):
38
+ cdef shared_ptr[CEncryptionConfiguration] configuration
39
+ cdef inline shared_ptr[CEncryptionConfiguration] unwrap(self) nogil
40
+
41
+ cdef class DecryptionConfiguration(_Weakrefable):
42
+ cdef shared_ptr[CDecryptionConfiguration] configuration
43
+ cdef inline shared_ptr[CDecryptionConfiguration] unwrap(self) nogil
44
+
45
+ cdef class KmsConnectionConfig(_Weakrefable):
46
+ cdef shared_ptr[CKmsConnectionConfig] configuration
47
+ cdef inline shared_ptr[CKmsConnectionConfig] unwrap(self) nogil
48
+
49
+ @staticmethod
50
+ cdef wrap(const CKmsConnectionConfig& config)
51
+
52
+
53
+ cdef shared_ptr[CCryptoFactory] pyarrow_unwrap_cryptofactory(object crypto_factory) except *
54
+ cdef shared_ptr[CKmsConnectionConfig] pyarrow_unwrap_kmsconnectionconfig(object kmsconnectionconfig) except *
55
+ cdef shared_ptr[CEncryptionConfiguration] pyarrow_unwrap_encryptionconfig(object encryptionconfig) except *
56
+ cdef shared_ptr[CDecryptionConfiguration] pyarrow_unwrap_decryptionconfig(object decryptionconfig) except *
venv/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: profile=False
19
+ # distutils: language = c++
20
+
21
+ from datetime import timedelta
22
+
23
+ from cython.operator cimport dereference as deref
24
+ from libcpp.memory cimport shared_ptr
25
+ from pyarrow.includes.common cimport *
26
+ from pyarrow.includes.libarrow cimport *
27
+ from pyarrow.lib cimport _Weakrefable
28
+ from pyarrow.lib import tobytes, frombytes
29
+
30
+
31
+ cdef ParquetCipher cipher_from_name(name):
32
+ name = name.upper()
33
+ if name == 'AES_GCM_V1':
34
+ return ParquetCipher_AES_GCM_V1
35
+ elif name == 'AES_GCM_CTR_V1':
36
+ return ParquetCipher_AES_GCM_CTR_V1
37
+ else:
38
+ raise ValueError(f'Invalid cipher name: {name!r}')
39
+
40
+
41
+ cdef cipher_to_name(ParquetCipher cipher):
42
+ if ParquetCipher_AES_GCM_V1 == cipher:
43
+ return 'AES_GCM_V1'
44
+ elif ParquetCipher_AES_GCM_CTR_V1 == cipher:
45
+ return 'AES_GCM_CTR_V1'
46
+ else:
47
+ raise ValueError('Invalid cipher value: {0}'.format(cipher))
48
+
49
+ cdef class EncryptionConfiguration(_Weakrefable):
50
+ """Configuration of the encryption, such as which columns to encrypt"""
51
+ # Avoid mistakingly creating attributes
52
+ __slots__ = ()
53
+
54
+ def __init__(self, footer_key, *, column_keys=None,
55
+ encryption_algorithm=None,
56
+ plaintext_footer=None, double_wrapping=None,
57
+ cache_lifetime=None, internal_key_material=None,
58
+ data_key_length_bits=None):
59
+ self.configuration.reset(
60
+ new CEncryptionConfiguration(tobytes(footer_key)))
61
+ if column_keys is not None:
62
+ self.column_keys = column_keys
63
+ if encryption_algorithm is not None:
64
+ self.encryption_algorithm = encryption_algorithm
65
+ if plaintext_footer is not None:
66
+ self.plaintext_footer = plaintext_footer
67
+ if double_wrapping is not None:
68
+ self.double_wrapping = double_wrapping
69
+ if cache_lifetime is not None:
70
+ self.cache_lifetime = cache_lifetime
71
+ if internal_key_material is not None:
72
+ self.internal_key_material = internal_key_material
73
+ if data_key_length_bits is not None:
74
+ self.data_key_length_bits = data_key_length_bits
75
+
76
+ @property
77
+ def footer_key(self):
78
+ """ID of the master key for footer encryption/signing"""
79
+ return frombytes(self.configuration.get().footer_key)
80
+
81
+ @property
82
+ def column_keys(self):
83
+ """
84
+ List of columns to encrypt, with master key IDs.
85
+ """
86
+ column_keys_str = frombytes(self.configuration.get().column_keys)
87
+ # Convert from "masterKeyID:colName,colName;masterKeyID:colName..."
88
+ # (see HIVE-21848) to dictionary of master key ID to column name lists
89
+ column_keys_to_key_list_str = dict(subString.replace(" ", "").split(
90
+ ":") for subString in column_keys_str.split(";"))
91
+ column_keys_dict = {k: v.split(
92
+ ",") for k, v in column_keys_to_key_list_str.items()}
93
+ return column_keys_dict
94
+
95
+ @column_keys.setter
96
+ def column_keys(self, dict value):
97
+ if value is not None:
98
+ # convert a dictionary such as
99
+ # '{"key1": ["col1 ", "col2"], "key2": ["col3 ", "col4"]}''
100
+ # to the string defined by the spec
101
+ # 'key1: col1 , col2; key2: col3 , col4'
102
+ column_keys = "; ".join(
103
+ ["{}: {}".format(k, ", ".join(v)) for k, v in value.items()])
104
+ self.configuration.get().column_keys = tobytes(column_keys)
105
+
106
+ @property
107
+ def encryption_algorithm(self):
108
+ """Parquet encryption algorithm.
109
+ Can be "AES_GCM_V1" (default), or "AES_GCM_CTR_V1"."""
110
+ return cipher_to_name(self.configuration.get().encryption_algorithm)
111
+
112
+ @encryption_algorithm.setter
113
+ def encryption_algorithm(self, value):
114
+ cipher = cipher_from_name(value)
115
+ self.configuration.get().encryption_algorithm = cipher
116
+
117
+ @property
118
+ def plaintext_footer(self):
119
+ """Write files with plaintext footer."""
120
+ return self.configuration.get().plaintext_footer
121
+
122
+ @plaintext_footer.setter
123
+ def plaintext_footer(self, value):
124
+ self.configuration.get().plaintext_footer = value
125
+
126
+ @property
127
+ def double_wrapping(self):
128
+ """Use double wrapping - where data encryption keys (DEKs) are
129
+ encrypted with key encryption keys (KEKs), which in turn are
130
+ encrypted with master keys.
131
+ If set to false, use single wrapping - where DEKs are
132
+ encrypted directly with master keys."""
133
+ return self.configuration.get().double_wrapping
134
+
135
+ @double_wrapping.setter
136
+ def double_wrapping(self, value):
137
+ self.configuration.get().double_wrapping = value
138
+
139
+ @property
140
+ def cache_lifetime(self):
141
+ """Lifetime of cached entities (key encryption keys,
142
+ local wrapping keys, KMS client objects)."""
143
+ return timedelta(
144
+ seconds=self.configuration.get().cache_lifetime_seconds)
145
+
146
+ @cache_lifetime.setter
147
+ def cache_lifetime(self, value):
148
+ if not isinstance(value, timedelta):
149
+ raise TypeError("cache_lifetime should be a timedelta")
150
+ self.configuration.get().cache_lifetime_seconds = value.total_seconds()
151
+
152
+ @property
153
+ def internal_key_material(self):
154
+ """Store key material inside Parquet file footers; this mode doesn’t
155
+ produce additional files. If set to false, key material is stored in
156
+ separate files in the same folder, which enables key rotation for
157
+ immutable Parquet files."""
158
+ return self.configuration.get().internal_key_material
159
+
160
+ @internal_key_material.setter
161
+ def internal_key_material(self, value):
162
+ self.configuration.get().internal_key_material = value
163
+
164
+ @property
165
+ def data_key_length_bits(self):
166
+ """Length of data encryption keys (DEKs), randomly generated by parquet key
167
+ management tools. Can be 128, 192 or 256 bits."""
168
+ return self.configuration.get().data_key_length_bits
169
+
170
+ @data_key_length_bits.setter
171
+ def data_key_length_bits(self, value):
172
+ self.configuration.get().data_key_length_bits = value
173
+
174
+ cdef inline shared_ptr[CEncryptionConfiguration] unwrap(self) nogil:
175
+ return self.configuration
176
+
177
+
178
+ cdef class DecryptionConfiguration(_Weakrefable):
179
+ """Configuration of the decryption, such as cache timeout."""
180
+ # Avoid mistakingly creating attributes
181
+ __slots__ = ()
182
+
183
+ def __init__(self, *, cache_lifetime=None):
184
+ self.configuration.reset(new CDecryptionConfiguration())
185
+
186
+ @property
187
+ def cache_lifetime(self):
188
+ """Lifetime of cached entities (key encryption keys,
189
+ local wrapping keys, KMS client objects)."""
190
+ return timedelta(
191
+ seconds=self.configuration.get().cache_lifetime_seconds)
192
+
193
+ @cache_lifetime.setter
194
+ def cache_lifetime(self, value):
195
+ self.configuration.get().cache_lifetime_seconds = value.total_seconds()
196
+
197
+ cdef inline shared_ptr[CDecryptionConfiguration] unwrap(self) nogil:
198
+ return self.configuration
199
+
200
+
201
+ cdef class KmsConnectionConfig(_Weakrefable):
202
+ """Configuration of the connection to the Key Management Service (KMS)"""
203
+ # Avoid mistakingly creating attributes
204
+ __slots__ = ()
205
+
206
+ def __init__(self, *, kms_instance_id=None, kms_instance_url=None,
207
+ key_access_token=None, custom_kms_conf=None):
208
+ self.configuration.reset(new CKmsConnectionConfig())
209
+ if kms_instance_id is not None:
210
+ self.kms_instance_id = kms_instance_id
211
+ if kms_instance_url is not None:
212
+ self.kms_instance_url = kms_instance_url
213
+ if key_access_token is None:
214
+ self.key_access_token = b'DEFAULT'
215
+ else:
216
+ self.key_access_token = key_access_token
217
+ if custom_kms_conf is not None:
218
+ self.custom_kms_conf = custom_kms_conf
219
+
220
+ @property
221
+ def kms_instance_id(self):
222
+ """ID of the KMS instance that will be used for encryption
223
+ (if multiple KMS instances are available)."""
224
+ return frombytes(self.configuration.get().kms_instance_id)
225
+
226
+ @kms_instance_id.setter
227
+ def kms_instance_id(self, value):
228
+ self.configuration.get().kms_instance_id = tobytes(value)
229
+
230
+ @property
231
+ def kms_instance_url(self):
232
+ """URL of the KMS instance."""
233
+ return frombytes(self.configuration.get().kms_instance_url)
234
+
235
+ @kms_instance_url.setter
236
+ def kms_instance_url(self, value):
237
+ self.configuration.get().kms_instance_url = tobytes(value)
238
+
239
+ @property
240
+ def key_access_token(self):
241
+ """Authorization token that will be passed to KMS."""
242
+ return frombytes(self.configuration.get()
243
+ .refreshable_key_access_token.get().value())
244
+
245
+ @key_access_token.setter
246
+ def key_access_token(self, value):
247
+ self.refresh_key_access_token(value)
248
+
249
+ @property
250
+ def custom_kms_conf(self):
251
+ """A dictionary with KMS-type-specific configuration"""
252
+ custom_kms_conf = {
253
+ frombytes(k): frombytes(v)
254
+ for k, v in self.configuration.get().custom_kms_conf
255
+ }
256
+ return custom_kms_conf
257
+
258
+ @custom_kms_conf.setter
259
+ def custom_kms_conf(self, dict value):
260
+ if value is not None:
261
+ for k, v in value.items():
262
+ if isinstance(k, str) and isinstance(v, str):
263
+ self.configuration.get().custom_kms_conf[tobytes(k)] = \
264
+ tobytes(v)
265
+ else:
266
+ raise TypeError("Expected custom_kms_conf to be " +
267
+ "a dictionary of strings")
268
+
269
+ def refresh_key_access_token(self, value):
270
+ cdef:
271
+ shared_ptr[CKeyAccessToken] c_key_access_token = \
272
+ self.configuration.get().refreshable_key_access_token
273
+
274
+ c_key_access_token.get().Refresh(tobytes(value))
275
+
276
+ cdef inline shared_ptr[CKmsConnectionConfig] unwrap(self) nogil:
277
+ return self.configuration
278
+
279
+ @staticmethod
280
+ cdef wrap(const CKmsConnectionConfig& config):
281
+ result = KmsConnectionConfig()
282
+ result.configuration = make_shared[CKmsConnectionConfig](move(config))
283
+ return result
284
+
285
+
286
+ # Callback definitions for CPyKmsClientVtable
287
+ cdef void _cb_wrap_key(
288
+ handler, const c_string& key_bytes,
289
+ const c_string& master_key_identifier, c_string* out) except *:
290
+ mkid_str = frombytes(master_key_identifier)
291
+ wrapped_key = handler.wrap_key(key_bytes, mkid_str)
292
+ out[0] = tobytes(wrapped_key)
293
+
294
+
295
+ cdef void _cb_unwrap_key(
296
+ handler, const c_string& wrapped_key,
297
+ const c_string& master_key_identifier, c_string* out) except *:
298
+ mkid_str = frombytes(master_key_identifier)
299
+ wk_str = frombytes(wrapped_key)
300
+ key = handler.unwrap_key(wk_str, mkid_str)
301
+ out[0] = tobytes(key)
302
+
303
+
304
+ cdef class KmsClient(_Weakrefable):
305
+ """The abstract base class for KmsClient implementations."""
306
+ cdef:
307
+ shared_ptr[CKmsClient] client
308
+
309
+ def __init__(self):
310
+ self.init()
311
+
312
+ cdef init(self):
313
+ cdef:
314
+ CPyKmsClientVtable vtable = CPyKmsClientVtable()
315
+
316
+ vtable.wrap_key = _cb_wrap_key
317
+ vtable.unwrap_key = _cb_unwrap_key
318
+
319
+ self.client.reset(new CPyKmsClient(self, vtable))
320
+
321
+ def wrap_key(self, key_bytes, master_key_identifier):
322
+ """Wrap a key - encrypt it with the master key."""
323
+ raise NotImplementedError()
324
+
325
+ def unwrap_key(self, wrapped_key, master_key_identifier):
326
+ """Unwrap a key - decrypt it with the master key."""
327
+ raise NotImplementedError()
328
+
329
+ cdef inline shared_ptr[CKmsClient] unwrap(self) nogil:
330
+ return self.client
331
+
332
+
333
+ # Callback definition for CPyKmsClientFactoryVtable
334
+ cdef void _cb_create_kms_client(
335
+ handler,
336
+ const CKmsConnectionConfig& kms_connection_config,
337
+ shared_ptr[CKmsClient]* out) except *:
338
+ connection_config = KmsConnectionConfig.wrap(kms_connection_config)
339
+
340
+ result = handler(connection_config)
341
+ if not isinstance(result, KmsClient):
342
+ raise TypeError(
343
+ "callable must return KmsClient instances, but got {}".format(
344
+ type(result)))
345
+
346
+ out[0] = (<KmsClient> result).unwrap()
347
+
348
+
349
+ cdef class CryptoFactory(_Weakrefable):
350
+ """ A factory that produces the low-level FileEncryptionProperties and
351
+ FileDecryptionProperties objects, from the high-level parameters."""
352
+ # Avoid mistakingly creating attributes
353
+ __slots__ = ()
354
+
355
+ def __init__(self, kms_client_factory):
356
+ """Create CryptoFactory.
357
+
358
+ Parameters
359
+ ----------
360
+ kms_client_factory : a callable that accepts KmsConnectionConfig
361
+ and returns a KmsClient
362
+ """
363
+ self.factory.reset(new CPyCryptoFactory())
364
+
365
+ if callable(kms_client_factory):
366
+ self.init(kms_client_factory)
367
+ else:
368
+ raise TypeError("Parameter kms_client_factory must be a callable")
369
+
370
+ cdef init(self, callable_client_factory):
371
+ cdef:
372
+ CPyKmsClientFactoryVtable vtable
373
+ shared_ptr[CPyKmsClientFactory] kms_client_factory
374
+
375
+ vtable.create_kms_client = _cb_create_kms_client
376
+ kms_client_factory.reset(
377
+ new CPyKmsClientFactory(callable_client_factory, vtable))
378
+ # A KmsClientFactory object must be registered
379
+ # via this method before calling any of
380
+ # file_encryption_properties()/file_decryption_properties() methods.
381
+ self.factory.get().RegisterKmsClientFactory(
382
+ static_pointer_cast[CKmsClientFactory, CPyKmsClientFactory](
383
+ kms_client_factory))
384
+
385
+ def file_encryption_properties(self,
386
+ KmsConnectionConfig kms_connection_config,
387
+ EncryptionConfiguration encryption_config):
388
+ """Create file encryption properties.
389
+
390
+ Parameters
391
+ ----------
392
+ kms_connection_config : KmsConnectionConfig
393
+ Configuration of connection to KMS
394
+
395
+ encryption_config : EncryptionConfiguration
396
+ Configuration of the encryption, such as which columns to encrypt
397
+
398
+ Returns
399
+ -------
400
+ file_encryption_properties : FileEncryptionProperties
401
+ File encryption properties.
402
+ """
403
+ cdef:
404
+ CResult[shared_ptr[CFileEncryptionProperties]] \
405
+ file_encryption_properties_result
406
+ with nogil:
407
+ file_encryption_properties_result = \
408
+ self.factory.get().SafeGetFileEncryptionProperties(
409
+ deref(kms_connection_config.unwrap().get()),
410
+ deref(encryption_config.unwrap().get()))
411
+ file_encryption_properties = GetResultValue(
412
+ file_encryption_properties_result)
413
+ return FileEncryptionProperties.wrap(file_encryption_properties)
414
+
415
+ def file_decryption_properties(
416
+ self,
417
+ KmsConnectionConfig kms_connection_config,
418
+ DecryptionConfiguration decryption_config=None):
419
+ """Create file decryption properties.
420
+
421
+ Parameters
422
+ ----------
423
+ kms_connection_config : KmsConnectionConfig
424
+ Configuration of connection to KMS
425
+
426
+ decryption_config : DecryptionConfiguration, default None
427
+ Configuration of the decryption, such as cache timeout.
428
+ Can be None.
429
+
430
+ Returns
431
+ -------
432
+ file_decryption_properties : FileDecryptionProperties
433
+ File decryption properties.
434
+ """
435
+ cdef:
436
+ CDecryptionConfiguration c_decryption_config
437
+ CResult[shared_ptr[CFileDecryptionProperties]] \
438
+ c_file_decryption_properties
439
+ if decryption_config is None:
440
+ c_decryption_config = CDecryptionConfiguration()
441
+ else:
442
+ c_decryption_config = deref(decryption_config.unwrap().get())
443
+ with nogil:
444
+ c_file_decryption_properties = \
445
+ self.factory.get().SafeGetFileDecryptionProperties(
446
+ deref(kms_connection_config.unwrap().get()),
447
+ c_decryption_config)
448
+ file_decryption_properties = GetResultValue(
449
+ c_file_decryption_properties)
450
+ return FileDecryptionProperties.wrap(file_decryption_properties)
451
+
452
+ def remove_cache_entries_for_token(self, access_token):
453
+ self.factory.get().RemoveCacheEntriesForToken(tobytes(access_token))
454
+
455
+ def remove_cache_entries_for_all_tokens(self):
456
+ self.factory.get().RemoveCacheEntriesForAllTokens()
457
+
458
+ cdef inline shared_ptr[CPyCryptoFactory] unwrap(self):
459
+ return self.factory
460
+
461
+
462
+ cdef shared_ptr[CCryptoFactory] pyarrow_unwrap_cryptofactory(object crypto_factory) except *:
463
+ if isinstance(crypto_factory, CryptoFactory):
464
+ pycf = (<CryptoFactory> crypto_factory).unwrap()
465
+ return static_pointer_cast[CCryptoFactory, CPyCryptoFactory](pycf)
466
+ raise TypeError("Expected CryptoFactory, got %s" % type(crypto_factory))
467
+
468
+
469
+ cdef shared_ptr[CKmsConnectionConfig] pyarrow_unwrap_kmsconnectionconfig(object kmsconnectionconfig) except *:
470
+ if isinstance(kmsconnectionconfig, KmsConnectionConfig):
471
+ return (<KmsConnectionConfig> kmsconnectionconfig).unwrap()
472
+ raise TypeError("Expected KmsConnectionConfig, got %s" % type(kmsconnectionconfig))
473
+
474
+
475
+ cdef shared_ptr[CEncryptionConfiguration] pyarrow_unwrap_encryptionconfig(object encryptionconfig) except *:
476
+ if isinstance(encryptionconfig, EncryptionConfiguration):
477
+ return (<EncryptionConfiguration> encryptionconfig).unwrap()
478
+ raise TypeError("Expected EncryptionConfiguration, got %s" % type(encryptionconfig))
479
+
480
+
481
+ cdef shared_ptr[CDecryptionConfiguration] pyarrow_unwrap_decryptionconfig(object decryptionconfig) except *:
482
+ if isinstance(decryptionconfig, DecryptionConfiguration):
483
+ return (<DecryptionConfiguration> decryptionconfig).unwrap()
484
+ raise TypeError("Expected DecryptionConfiguration, got %s" % type(decryptionconfig))
venv/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # distutils: language = c++
19
+ # cython: language_level = 3
20
+
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport CStatus
23
+
24
+
25
+ ctypedef CStatus cb_test_func()
26
+
27
+ cdef extern from "arrow/python/python_test.h" namespace "arrow::py::testing" nogil:
28
+
29
+ cdef cppclass CTestCase "arrow::py::testing::TestCase":
30
+ c_string name
31
+ cb_test_func func
32
+
33
+ vector[CTestCase] GetCppTestCases()
venv/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pyx ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: profile=False, binding=True
19
+ # distutils: language = c++
20
+
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport *
23
+ from pyarrow.lib cimport check_status
24
+
25
+ from pyarrow.lib import frombytes
26
+
27
+
28
+ cdef class CppTestCase:
29
+ """
30
+ A simple wrapper for a C++ test case.
31
+ """
32
+ cdef:
33
+ CTestCase c_case
34
+
35
+ @staticmethod
36
+ cdef wrap(CTestCase c_case):
37
+ cdef:
38
+ CppTestCase obj
39
+ obj = CppTestCase.__new__(CppTestCase)
40
+ obj.c_case = c_case
41
+ return obj
42
+
43
+ @property
44
+ def name(self):
45
+ return frombytes(self.c_case.name)
46
+
47
+ def __repr__(self):
48
+ return f"<{self.__class__.__name__} {self.name!r}>"
49
+
50
+ def __call__(self):
51
+ check_status(self.c_case.func())
52
+
53
+
54
+ def get_cpp_tests():
55
+ """
56
+ Get a list of C++ test cases.
57
+ """
58
+ cases = []
59
+ c_cases = GetCppTestCases()
60
+ for c_case in c_cases:
61
+ cases.append(CppTestCase.wrap(c_case))
62
+ return cases