applied-ai-018 commited on
Commit
6326216
·
verified ·
1 Parent(s): d1b1306

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pyarrow/__init__.pxd +42 -0
  2. env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.pxd +44 -0
  3. env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.pyx +0 -0
  4. env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.pxd +55 -0
  5. env-llmeval/lib/python3.10/site-packages/pyarrow/_cuda.pyx +1058 -0
  6. env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.pxd +183 -0
  7. env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.pyx +0 -0
  8. env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so +0 -0
  9. env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_orc.pyx +51 -0
  10. env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so +0 -0
  11. env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd +42 -0
  12. env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pyx +1019 -0
  13. env-llmeval/lib/python3.10/site-packages/pyarrow/_feather.cpython-310-x86_64-linux-gnu.so +0 -0
  14. env-llmeval/lib/python3.10/site-packages/pyarrow/_feather.pyx +117 -0
  15. env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.pyx +0 -0
  16. env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.pxd +94 -0
  17. env-llmeval/lib/python3.10/site-packages/pyarrow/_gcsfs.pyx +212 -0
  18. env-llmeval/lib/python3.10/site-packages/pyarrow/_generated_version.py +4 -0
  19. env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfs.pyx +160 -0
  20. env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfsio.cpython-310-x86_64-linux-gnu.so +0 -0
  21. env-llmeval/lib/python3.10/site-packages/pyarrow/_json.cpython-310-x86_64-linux-gnu.so +0 -0
  22. env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.cpython-310-x86_64-linux-gnu.so +0 -0
  23. env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.pxd +134 -0
  24. env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.pxd +674 -0
  25. env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.cpython-310-x86_64-linux-gnu.so +0 -0
  26. env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx +484 -0
  27. env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so +0 -0
  28. env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pyx +62 -0
  29. env-llmeval/lib/python3.10/site-packages/pyarrow/_s3fs.cpython-310-x86_64-linux-gnu.so +0 -0
  30. env-llmeval/lib/python3.10/site-packages/pyarrow/_substrait.pyx +349 -0
  31. env-llmeval/lib/python3.10/site-packages/pyarrow/array.pxi +0 -0
  32. env-llmeval/lib/python3.10/site-packages/pyarrow/benchmark.pxi +20 -0
  33. env-llmeval/lib/python3.10/site-packages/pyarrow/builder.pxi +82 -0
  34. env-llmeval/lib/python3.10/site-packages/pyarrow/cffi.py +71 -0
  35. env-llmeval/lib/python3.10/site-packages/pyarrow/compat.pxi +71 -0
  36. env-llmeval/lib/python3.10/site-packages/pyarrow/compute.py +731 -0
  37. env-llmeval/lib/python3.10/site-packages/pyarrow/config.pxi +95 -0
  38. env-llmeval/lib/python3.10/site-packages/pyarrow/csv.py +22 -0
  39. env-llmeval/lib/python3.10/site-packages/pyarrow/cuda.py +25 -0
  40. env-llmeval/lib/python3.10/site-packages/pyarrow/dataset.py +1023 -0
  41. env-llmeval/lib/python3.10/site-packages/pyarrow/feather.py +277 -0
  42. env-llmeval/lib/python3.10/site-packages/pyarrow/fs.py +444 -0
  43. env-llmeval/lib/python3.10/site-packages/pyarrow/gandiva.pyx +760 -0
  44. env-llmeval/lib/python3.10/site-packages/pyarrow/hdfs.py +240 -0
  45. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array.h +49 -0
  46. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h +484 -0
  47. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/builder.h +33 -0
  48. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/device.h +366 -0
  49. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool.h +272 -0
  50. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/record_batch.h +367 -0
env-llmeval/lib/python3.10/site-packages/pyarrow/__init__.pxd ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from libcpp.memory cimport shared_ptr
19
+ from pyarrow.includes.libarrow cimport (CArray, CBuffer, CDataType,
20
+ CField, CRecordBatch, CSchema,
21
+ CTable, CTensor, CSparseCOOTensor,
22
+ CSparseCSRMatrix, CSparseCSCMatrix,
23
+ CSparseCSFTensor)
24
+
25
+ cdef extern from "arrow/python/pyarrow.h" namespace "arrow::py":
26
+ cdef int import_pyarrow() except -1
27
+ cdef object wrap_buffer(const shared_ptr[CBuffer]& buffer)
28
+ cdef object wrap_data_type(const shared_ptr[CDataType]& type)
29
+ cdef object wrap_field(const shared_ptr[CField]& field)
30
+ cdef object wrap_schema(const shared_ptr[CSchema]& schema)
31
+ cdef object wrap_array(const shared_ptr[CArray]& sp_array)
32
+ cdef object wrap_tensor(const shared_ptr[CTensor]& sp_tensor)
33
+ cdef object wrap_sparse_tensor_coo(
34
+ const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor)
35
+ cdef object wrap_sparse_tensor_csr(
36
+ const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor)
37
+ cdef object wrap_sparse_tensor_csc(
38
+ const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor)
39
+ cdef object wrap_sparse_tensor_csf(
40
+ const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor)
41
+ cdef object wrap_table(const shared_ptr[CTable]& ctable)
42
+ cdef object wrap_batch(const shared_ptr[CRecordBatch]& cbatch)
env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.pxd ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.lib cimport *
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport *
23
+ from pyarrow.includes.libarrow_acero cimport *
24
+
25
+
26
+ cdef class ExecNodeOptions(_Weakrefable):
27
+ cdef:
28
+ shared_ptr[CExecNodeOptions] wrapped
29
+
30
+ cdef void init(self, const shared_ptr[CExecNodeOptions]& sp)
31
+ cdef inline shared_ptr[CExecNodeOptions] unwrap(self) nogil
32
+
33
+
34
+ cdef class Declaration(_Weakrefable):
35
+
36
+ cdef:
37
+ CDeclaration decl
38
+
39
+ cdef void init(self, const CDeclaration& c_decl)
40
+
41
+ @staticmethod
42
+ cdef wrap(const CDeclaration& c_decl)
43
+
44
+ cdef inline CDeclaration unwrap(self) nogil
env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.pyx ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.pxd ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.includes.libarrow cimport *
21
+ from pyarrow.lib cimport _Weakrefable
22
+
23
+
24
+ cdef class ConvertOptions(_Weakrefable):
25
+ cdef:
26
+ unique_ptr[CCSVConvertOptions] options
27
+
28
+ @staticmethod
29
+ cdef ConvertOptions wrap(CCSVConvertOptions options)
30
+
31
+
32
+ cdef class ParseOptions(_Weakrefable):
33
+ cdef:
34
+ unique_ptr[CCSVParseOptions] options
35
+ object _invalid_row_handler
36
+
37
+ @staticmethod
38
+ cdef ParseOptions wrap(CCSVParseOptions options)
39
+
40
+
41
+ cdef class ReadOptions(_Weakrefable):
42
+ cdef:
43
+ unique_ptr[CCSVReadOptions] options
44
+ public object encoding
45
+
46
+ @staticmethod
47
+ cdef ReadOptions wrap(CCSVReadOptions options)
48
+
49
+
50
+ cdef class WriteOptions(_Weakrefable):
51
+ cdef:
52
+ unique_ptr[CCSVWriteOptions] options
53
+
54
+ @staticmethod
55
+ cdef WriteOptions wrap(CCSVWriteOptions options)
env-llmeval/lib/python3.10/site-packages/pyarrow/_cuda.pyx ADDED
@@ -0,0 +1,1058 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ from pyarrow.lib cimport *
20
+ from pyarrow.includes.libarrow_cuda cimport *
21
+ from pyarrow.lib import allocate_buffer, as_buffer, ArrowTypeError
22
+ from pyarrow.util import get_contiguous_span
23
+ cimport cpython as cp
24
+
25
+
26
+ cdef class Context(_Weakrefable):
27
+ """
28
+ CUDA driver context.
29
+ """
30
+
31
+ def __init__(self, *args, **kwargs):
32
+ """
33
+ Create a CUDA driver context for a particular device.
34
+
35
+ If a CUDA context handle is passed, it is wrapped, otherwise
36
+ a default CUDA context for the given device is requested.
37
+
38
+ Parameters
39
+ ----------
40
+ device_number : int (default 0)
41
+ Specify the GPU device for which the CUDA driver context is
42
+ requested.
43
+ handle : int, optional
44
+ Specify CUDA handle for a shared context that has been created
45
+ by another library.
46
+ """
47
+ # This method exposed because autodoc doesn't pick __cinit__
48
+
49
+ def __cinit__(self, int device_number=0, uintptr_t handle=0):
50
+ cdef CCudaDeviceManager* manager
51
+ manager = GetResultValue(CCudaDeviceManager.Instance())
52
+ cdef int n = manager.num_devices()
53
+ if device_number >= n or device_number < 0:
54
+ self.context.reset()
55
+ raise ValueError('device_number argument must be '
56
+ 'non-negative less than %s' % (n))
57
+ if handle == 0:
58
+ self.context = GetResultValue(manager.GetContext(device_number))
59
+ else:
60
+ self.context = GetResultValue(manager.GetSharedContext(
61
+ device_number, <void*>handle))
62
+ self.device_number = device_number
63
+
64
+ @staticmethod
65
+ def from_numba(context=None):
66
+ """
67
+ Create a Context instance from a Numba CUDA context.
68
+
69
+ Parameters
70
+ ----------
71
+ context : {numba.cuda.cudadrv.driver.Context, None}
72
+ A Numba CUDA context instance.
73
+ If None, the current Numba context is used.
74
+
75
+ Returns
76
+ -------
77
+ shared_context : pyarrow.cuda.Context
78
+ Context instance.
79
+ """
80
+ if context is None:
81
+ import numba.cuda
82
+ context = numba.cuda.current_context()
83
+ return Context(device_number=context.device.id,
84
+ handle=context.handle.value)
85
+
86
+ def to_numba(self):
87
+ """
88
+ Convert Context to a Numba CUDA context.
89
+
90
+ Returns
91
+ -------
92
+ context : numba.cuda.cudadrv.driver.Context
93
+ Numba CUDA context instance.
94
+ """
95
+ import ctypes
96
+ import numba.cuda
97
+ device = numba.cuda.gpus[self.device_number]
98
+ handle = ctypes.c_void_p(self.handle)
99
+ context = numba.cuda.cudadrv.driver.Context(device, handle)
100
+
101
+ class DummyPendingDeallocs(object):
102
+ # Context is managed by pyarrow
103
+ def add_item(self, *args, **kwargs):
104
+ pass
105
+
106
+ context.deallocations = DummyPendingDeallocs()
107
+ return context
108
+
109
+ @staticmethod
110
+ def get_num_devices():
111
+ """ Return the number of GPU devices.
112
+ """
113
+ cdef CCudaDeviceManager* manager
114
+ manager = GetResultValue(CCudaDeviceManager.Instance())
115
+ return manager.num_devices()
116
+
117
+ @property
118
+ def device_number(self):
119
+ """ Return context device number.
120
+ """
121
+ return self.device_number
122
+
123
+ @property
124
+ def handle(self):
125
+ """ Return pointer to context handle.
126
+ """
127
+ return <uintptr_t>self.context.get().handle()
128
+
129
+ cdef void init(self, const shared_ptr[CCudaContext]& ctx):
130
+ self.context = ctx
131
+
132
+ def synchronize(self):
133
+ """Blocks until the device has completed all preceding requested
134
+ tasks.
135
+ """
136
+ check_status(self.context.get().Synchronize())
137
+
138
+ @property
139
+ def bytes_allocated(self):
140
+ """Return the number of allocated bytes.
141
+ """
142
+ return self.context.get().bytes_allocated()
143
+
144
+ def get_device_address(self, uintptr_t address):
145
+ """Return the device address that is reachable from kernels running in
146
+ the context
147
+
148
+ Parameters
149
+ ----------
150
+ address : int
151
+ Specify memory address value
152
+
153
+ Returns
154
+ -------
155
+ device_address : int
156
+ Device address accessible from device context
157
+
158
+ Notes
159
+ -----
160
+ The device address is defined as a memory address accessible
161
+ by device. While it is often a device memory address but it
162
+ can be also a host memory address, for instance, when the
163
+ memory is allocated as host memory (using cudaMallocHost or
164
+ cudaHostAlloc) or as managed memory (using cudaMallocManaged)
165
+ or the host memory is page-locked (using cudaHostRegister).
166
+ """
167
+ return GetResultValue(self.context.get().GetDeviceAddress(address))
168
+
169
+ def new_buffer(self, int64_t nbytes):
170
+ """Return new device buffer.
171
+
172
+ Parameters
173
+ ----------
174
+ nbytes : int
175
+ Specify the number of bytes to be allocated.
176
+
177
+ Returns
178
+ -------
179
+ buf : CudaBuffer
180
+ Allocated buffer.
181
+ """
182
+ cdef:
183
+ shared_ptr[CCudaBuffer] cudabuf
184
+ with nogil:
185
+ cudabuf = GetResultValue(self.context.get().Allocate(nbytes))
186
+ return pyarrow_wrap_cudabuffer(cudabuf)
187
+
188
+ def foreign_buffer(self, address, size, base=None):
189
+ """
190
+ Create device buffer from address and size as a view.
191
+
192
+ The caller is responsible for allocating and freeing the
193
+ memory. When `address==size==0` then a new zero-sized buffer
194
+ is returned.
195
+
196
+ Parameters
197
+ ----------
198
+ address : int
199
+ Specify the starting address of the buffer. The address can
200
+ refer to both device or host memory but it must be
201
+ accessible from device after mapping it with
202
+ `get_device_address` method.
203
+ size : int
204
+ Specify the size of device buffer in bytes.
205
+ base : {None, object}
206
+ Specify object that owns the referenced memory.
207
+
208
+ Returns
209
+ -------
210
+ cbuf : CudaBuffer
211
+ Device buffer as a view of device reachable memory.
212
+
213
+ """
214
+ if not address and size == 0:
215
+ return self.new_buffer(0)
216
+ cdef:
217
+ uintptr_t c_addr = self.get_device_address(address)
218
+ int64_t c_size = size
219
+ shared_ptr[CCudaBuffer] cudabuf
220
+
221
+ cudabuf = GetResultValue(self.context.get().View(
222
+ <uint8_t*>c_addr, c_size))
223
+ return pyarrow_wrap_cudabuffer_base(cudabuf, base)
224
+
225
+ def open_ipc_buffer(self, ipc_handle):
226
+ """ Open existing CUDA IPC memory handle
227
+
228
+ Parameters
229
+ ----------
230
+ ipc_handle : IpcMemHandle
231
+ Specify opaque pointer to CUipcMemHandle (driver API).
232
+
233
+ Returns
234
+ -------
235
+ buf : CudaBuffer
236
+ referencing device buffer
237
+ """
238
+ handle = pyarrow_unwrap_cudaipcmemhandle(ipc_handle)
239
+ cdef shared_ptr[CCudaBuffer] cudabuf
240
+ with nogil:
241
+ cudabuf = GetResultValue(
242
+ self.context.get().OpenIpcBuffer(handle.get()[0]))
243
+ return pyarrow_wrap_cudabuffer(cudabuf)
244
+
245
+ def buffer_from_data(self, object data, int64_t offset=0, int64_t size=-1):
246
+ """Create device buffer and initialize with data.
247
+
248
+ Parameters
249
+ ----------
250
+ data : {CudaBuffer, HostBuffer, Buffer, array-like}
251
+ Specify data to be copied to device buffer.
252
+ offset : int
253
+ Specify the offset of input buffer for device data
254
+ buffering. Default: 0.
255
+ size : int
256
+ Specify the size of device buffer in bytes. Default: all
257
+ (starting from input offset)
258
+
259
+ Returns
260
+ -------
261
+ cbuf : CudaBuffer
262
+ Device buffer with copied data.
263
+ """
264
+ is_host_data = not pyarrow_is_cudabuffer(data)
265
+ buf = as_buffer(data) if is_host_data else data
266
+
267
+ bsize = buf.size
268
+ if offset < 0 or (bsize and offset >= bsize):
269
+ raise ValueError('offset argument is out-of-range')
270
+ if size < 0:
271
+ size = bsize - offset
272
+ elif offset + size > bsize:
273
+ raise ValueError(
274
+ 'requested larger slice than available in device buffer')
275
+
276
+ if offset != 0 or size != bsize:
277
+ buf = buf.slice(offset, size)
278
+
279
+ result = self.new_buffer(size)
280
+ if is_host_data:
281
+ result.copy_from_host(buf, position=0, nbytes=size)
282
+ else:
283
+ result.copy_from_device(buf, position=0, nbytes=size)
284
+ return result
285
+
286
+ def buffer_from_object(self, obj):
287
+ """Create device buffer view of arbitrary object that references
288
+ device accessible memory.
289
+
290
+ When the object contains a non-contiguous view of device
291
+ accessible memory then the returned device buffer will contain
292
+ contiguous view of the memory, that is, including the
293
+ intermediate data that is otherwise invisible to the input
294
+ object.
295
+
296
+ Parameters
297
+ ----------
298
+ obj : {object, Buffer, HostBuffer, CudaBuffer, ...}
299
+ Specify an object that holds (device or host) address that
300
+ can be accessed from device. This includes objects with
301
+ types defined in pyarrow.cuda as well as arbitrary objects
302
+ that implement the CUDA array interface as defined by numba.
303
+
304
+ Returns
305
+ -------
306
+ cbuf : CudaBuffer
307
+ Device buffer as a view of device accessible memory.
308
+
309
+ """
310
+ if isinstance(obj, HostBuffer):
311
+ return self.foreign_buffer(obj.address, obj.size, base=obj)
312
+ elif isinstance(obj, Buffer):
313
+ return CudaBuffer.from_buffer(obj)
314
+ elif isinstance(obj, CudaBuffer):
315
+ return obj
316
+ elif hasattr(obj, '__cuda_array_interface__'):
317
+ desc = obj.__cuda_array_interface__
318
+ addr = desc['data'][0]
319
+ if addr is None:
320
+ return self.new_buffer(0)
321
+ import numpy as np
322
+ start, end = get_contiguous_span(
323
+ desc['shape'], desc.get('strides'),
324
+ np.dtype(desc['typestr']).itemsize)
325
+ return self.foreign_buffer(addr + start, end - start, base=obj)
326
+ raise ArrowTypeError('cannot create device buffer view from'
327
+ ' `%s` object' % (type(obj)))
328
+
329
+
330
+ cdef class IpcMemHandle(_Weakrefable):
331
+ """A serializable container for a CUDA IPC handle.
332
+ """
333
+ cdef void init(self, shared_ptr[CCudaIpcMemHandle]& h):
334
+ self.handle = h
335
+
336
+ @staticmethod
337
+ def from_buffer(Buffer opaque_handle):
338
+ """Create IpcMemHandle from opaque buffer (e.g. from another
339
+ process)
340
+
341
+ Parameters
342
+ ----------
343
+ opaque_handle :
344
+ a CUipcMemHandle as a const void*
345
+
346
+ Returns
347
+ -------
348
+ ipc_handle : IpcMemHandle
349
+ """
350
+ c_buf = pyarrow_unwrap_buffer(opaque_handle)
351
+ cdef:
352
+ shared_ptr[CCudaIpcMemHandle] handle
353
+
354
+ handle = GetResultValue(
355
+ CCudaIpcMemHandle.FromBuffer(c_buf.get().data()))
356
+ return pyarrow_wrap_cudaipcmemhandle(handle)
357
+
358
+ def serialize(self, pool=None):
359
+ """Write IpcMemHandle to a Buffer
360
+
361
+ Parameters
362
+ ----------
363
+ pool : {MemoryPool, None}
364
+ Specify a pool to allocate memory from
365
+
366
+ Returns
367
+ -------
368
+ buf : Buffer
369
+ The serialized buffer.
370
+ """
371
+ cdef CMemoryPool* pool_ = maybe_unbox_memory_pool(pool)
372
+ cdef shared_ptr[CBuffer] buf
373
+ cdef CCudaIpcMemHandle* h = self.handle.get()
374
+ with nogil:
375
+ buf = GetResultValue(h.Serialize(pool_))
376
+ return pyarrow_wrap_buffer(buf)
377
+
378
+
379
+ cdef class CudaBuffer(Buffer):
380
+ """An Arrow buffer with data located in a GPU device.
381
+
382
+ To create a CudaBuffer instance, use Context.device_buffer().
383
+
384
+ The memory allocated in a CudaBuffer is freed when the buffer object
385
+ is deleted.
386
+ """
387
+
388
+ def __init__(self):
389
+ raise TypeError("Do not call CudaBuffer's constructor directly, use "
390
+ "`<pyarrow.Context instance>.device_buffer`"
391
+ " method instead.")
392
+
393
+ cdef void init_cuda(self,
394
+ const shared_ptr[CCudaBuffer]& buffer,
395
+ object base):
396
+ self.cuda_buffer = buffer
397
+ self.init(<shared_ptr[CBuffer]> buffer)
398
+ self.base = base
399
+
400
+ @staticmethod
401
+ def from_buffer(buf):
402
+ """ Convert back generic buffer into CudaBuffer
403
+
404
+ Parameters
405
+ ----------
406
+ buf : Buffer
407
+ Specify buffer containing CudaBuffer
408
+
409
+ Returns
410
+ -------
411
+ dbuf : CudaBuffer
412
+ Resulting device buffer.
413
+ """
414
+ c_buf = pyarrow_unwrap_buffer(buf)
415
+ cuda_buffer = GetResultValue(CCudaBuffer.FromBuffer(c_buf))
416
+ return pyarrow_wrap_cudabuffer(cuda_buffer)
417
+
418
+ @staticmethod
419
+ def from_numba(mem):
420
+ """Create a CudaBuffer view from numba MemoryPointer instance.
421
+
422
+ Parameters
423
+ ----------
424
+ mem : numba.cuda.cudadrv.driver.MemoryPointer
425
+
426
+ Returns
427
+ -------
428
+ cbuf : CudaBuffer
429
+ Device buffer as a view of numba MemoryPointer.
430
+ """
431
+ ctx = Context.from_numba(mem.context)
432
+ if mem.device_pointer.value is None and mem.size==0:
433
+ return ctx.new_buffer(0)
434
+ return ctx.foreign_buffer(mem.device_pointer.value, mem.size, base=mem)
435
+
436
+ def to_numba(self):
437
+ """Return numba memory pointer of CudaBuffer instance.
438
+ """
439
+ import ctypes
440
+ from numba.cuda.cudadrv.driver import MemoryPointer
441
+ return MemoryPointer(self.context.to_numba(),
442
+ pointer=ctypes.c_void_p(self.address),
443
+ size=self.size)
444
+
445
+ cdef getitem(self, int64_t i):
446
+ return self.copy_to_host(position=i, nbytes=1)[0]
447
+
448
+ def copy_to_host(self, int64_t position=0, int64_t nbytes=-1,
449
+ Buffer buf=None,
450
+ MemoryPool memory_pool=None, c_bool resizable=False):
451
+ """Copy memory from GPU device to CPU host
452
+
453
+ Caller is responsible for ensuring that all tasks affecting
454
+ the memory are finished. Use
455
+
456
+ `<CudaBuffer instance>.context.synchronize()`
457
+
458
+ when needed.
459
+
460
+ Parameters
461
+ ----------
462
+ position : int
463
+ Specify the starting position of the source data in GPU
464
+ device buffer. Default: 0.
465
+ nbytes : int
466
+ Specify the number of bytes to copy. Default: -1 (all from
467
+ the position until host buffer is full).
468
+ buf : Buffer
469
+ Specify a pre-allocated output buffer in host. Default: None
470
+ (allocate new output buffer).
471
+ memory_pool : MemoryPool
472
+ resizable : bool
473
+ Specify extra arguments to allocate_buffer. Used only when
474
+ buf is None.
475
+
476
+ Returns
477
+ -------
478
+ buf : Buffer
479
+ Output buffer in host.
480
+
481
+ """
482
+ if position < 0 or (self.size and position > self.size) \
483
+ or (self.size == 0 and position != 0):
484
+ raise ValueError('position argument is out-of-range')
485
+ cdef:
486
+ int64_t c_nbytes
487
+ if buf is None:
488
+ if nbytes < 0:
489
+ # copy all starting from position to new host buffer
490
+ c_nbytes = self.size - position
491
+ else:
492
+ if nbytes > self.size - position:
493
+ raise ValueError(
494
+ 'requested more to copy than available from '
495
+ 'device buffer')
496
+ # copy nbytes starting from position to new host buffer
497
+ c_nbytes = nbytes
498
+ buf = allocate_buffer(c_nbytes, memory_pool=memory_pool,
499
+ resizable=resizable)
500
+ else:
501
+ if nbytes < 0:
502
+ # copy all from position until given host buffer is full
503
+ c_nbytes = min(self.size - position, buf.size)
504
+ else:
505
+ if nbytes > buf.size:
506
+ raise ValueError(
507
+ 'requested copy does not fit into host buffer')
508
+ # copy nbytes from position to given host buffer
509
+ c_nbytes = nbytes
510
+
511
+ cdef:
512
+ shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf)
513
+ int64_t c_position = position
514
+ with nogil:
515
+ check_status(self.cuda_buffer.get()
516
+ .CopyToHost(c_position, c_nbytes,
517
+ c_buf.get().mutable_data()))
518
+ return buf
519
+
520
+ def copy_from_host(self, data, int64_t position=0, int64_t nbytes=-1):
521
+ """Copy data from host to device.
522
+
523
+ The device buffer must be pre-allocated.
524
+
525
+ Parameters
526
+ ----------
527
+ data : {Buffer, array-like}
528
+ Specify data in host. It can be array-like that is valid
529
+ argument to py_buffer
530
+ position : int
531
+ Specify the starting position of the copy in device buffer.
532
+ Default: 0.
533
+ nbytes : int
534
+ Specify the number of bytes to copy. Default: -1 (all from
535
+ source until device buffer, starting from position, is full)
536
+
537
+ Returns
538
+ -------
539
+ nbytes : int
540
+ Number of bytes copied.
541
+ """
542
+ if position < 0 or position > self.size:
543
+ raise ValueError('position argument is out-of-range')
544
+ cdef:
545
+ int64_t c_nbytes
546
+ buf = as_buffer(data)
547
+
548
+ if nbytes < 0:
549
+ # copy from host buffer to device buffer starting from
550
+ # position until device buffer is full
551
+ c_nbytes = min(self.size - position, buf.size)
552
+ else:
553
+ if nbytes > buf.size:
554
+ raise ValueError(
555
+ 'requested more to copy than available from host buffer')
556
+ if nbytes > self.size - position:
557
+ raise ValueError(
558
+ 'requested more to copy than available in device buffer')
559
+ # copy nbytes from host buffer to device buffer starting
560
+ # from position
561
+ c_nbytes = nbytes
562
+
563
+ cdef:
564
+ shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf)
565
+ int64_t c_position = position
566
+ with nogil:
567
+ check_status(self.cuda_buffer.get().
568
+ CopyFromHost(c_position, c_buf.get().data(),
569
+ c_nbytes))
570
+ return c_nbytes
571
+
572
+ def copy_from_device(self, buf, int64_t position=0, int64_t nbytes=-1):
573
+ """Copy data from device to device.
574
+
575
+ Parameters
576
+ ----------
577
+ buf : CudaBuffer
578
+ Specify source device buffer.
579
+ position : int
580
+ Specify the starting position of the copy in device buffer.
581
+ Default: 0.
582
+ nbytes : int
583
+ Specify the number of bytes to copy. Default: -1 (all from
584
+ source until device buffer, starting from position, is full)
585
+
586
+ Returns
587
+ -------
588
+ nbytes : int
589
+ Number of bytes copied.
590
+
591
+ """
592
+ if position < 0 or position > self.size:
593
+ raise ValueError('position argument is out-of-range')
594
+ cdef:
595
+ int64_t c_nbytes
596
+
597
+ if nbytes < 0:
598
+ # copy from source device buffer to device buffer starting
599
+ # from position until device buffer is full
600
+ c_nbytes = min(self.size - position, buf.size)
601
+ else:
602
+ if nbytes > buf.size:
603
+ raise ValueError(
604
+ 'requested more to copy than available from device buffer')
605
+ if nbytes > self.size - position:
606
+ raise ValueError(
607
+ 'requested more to copy than available in device buffer')
608
+ # copy nbytes from source device buffer to device buffer
609
+ # starting from position
610
+ c_nbytes = nbytes
611
+
612
+ cdef:
613
+ shared_ptr[CCudaBuffer] c_buf = pyarrow_unwrap_cudabuffer(buf)
614
+ int64_t c_position = position
615
+ shared_ptr[CCudaContext] c_src_ctx = pyarrow_unwrap_cudacontext(
616
+ buf.context)
617
+ void* c_source_data = <void*>(c_buf.get().address())
618
+
619
+ if self.context.handle != buf.context.handle:
620
+ with nogil:
621
+ check_status(self.cuda_buffer.get().
622
+ CopyFromAnotherDevice(c_src_ctx, c_position,
623
+ c_source_data, c_nbytes))
624
+ else:
625
+ with nogil:
626
+ check_status(self.cuda_buffer.get().
627
+ CopyFromDevice(c_position, c_source_data,
628
+ c_nbytes))
629
+ return c_nbytes
630
+
631
+ def export_for_ipc(self):
632
+ """
633
+ Expose this device buffer as IPC memory which can be used in other
634
+ processes.
635
+
636
+ After calling this function, this device memory will not be
637
+ freed when the CudaBuffer is destructed.
638
+
639
+ Returns
640
+ -------
641
+ ipc_handle : IpcMemHandle
642
+ The exported IPC handle
643
+
644
+ """
645
+ cdef shared_ptr[CCudaIpcMemHandle] handle
646
+ with nogil:
647
+ handle = GetResultValue(self.cuda_buffer.get().ExportForIpc())
648
+ return pyarrow_wrap_cudaipcmemhandle(handle)
649
+
650
+ @property
651
+ def context(self):
652
+ """Returns the CUDA driver context of this buffer.
653
+ """
654
+ return pyarrow_wrap_cudacontext(self.cuda_buffer.get().context())
655
+
656
+ def slice(self, offset=0, length=None):
657
+ """Return slice of device buffer
658
+
659
+ Parameters
660
+ ----------
661
+ offset : int, default 0
662
+ Specify offset from the start of device buffer to slice
663
+ length : int, default None
664
+ Specify the length of slice (default is until end of device
665
+ buffer starting from offset). If the length is larger than
666
+ the data available, the returned slice will have a size of
667
+ the available data starting from the offset.
668
+
669
+ Returns
670
+ -------
671
+ sliced : CudaBuffer
672
+ Zero-copy slice of device buffer.
673
+
674
+ """
675
+ if offset < 0 or (self.size and offset >= self.size):
676
+ raise ValueError('offset argument is out-of-range')
677
+ cdef int64_t offset_ = offset
678
+ cdef int64_t size
679
+ if length is None:
680
+ size = self.size - offset_
681
+ elif offset + length <= self.size:
682
+ size = length
683
+ else:
684
+ size = self.size - offset
685
+ parent = pyarrow_unwrap_cudabuffer(self)
686
+ return pyarrow_wrap_cudabuffer(make_shared[CCudaBuffer](parent,
687
+ offset_, size))
688
+
689
+ def to_pybytes(self):
690
+ """Return device buffer content as Python bytes.
691
+ """
692
+ return self.copy_to_host().to_pybytes()
693
+
694
+ def __getbuffer__(self, cp.Py_buffer* buffer, int flags):
695
+ # Device buffer contains data pointers on the device. Hence,
696
+ # cannot support buffer protocol PEP-3118 for CudaBuffer.
697
+ raise BufferError('buffer protocol for device buffer not supported')
698
+
699
+
700
+ cdef class HostBuffer(Buffer):
701
+ """Device-accessible CPU memory created using cudaHostAlloc.
702
+
703
+ To create a HostBuffer instance, use
704
+
705
+ cuda.new_host_buffer(<nbytes>)
706
+ """
707
+
708
+ def __init__(self):
709
+ raise TypeError("Do not call HostBuffer's constructor directly,"
710
+ " use `cuda.new_host_buffer` function instead.")
711
+
712
+ cdef void init_host(self, const shared_ptr[CCudaHostBuffer]& buffer):
713
+ self.host_buffer = buffer
714
+ self.init(<shared_ptr[CBuffer]> buffer)
715
+
716
+ @property
717
+ def size(self):
718
+ return self.host_buffer.get().size()
719
+
720
+
721
+ cdef class BufferReader(NativeFile):
722
+ """File interface for zero-copy read from CUDA buffers.
723
+
724
+ Note: Read methods return pointers to device memory. This means
725
+ you must be careful using this interface with any Arrow code which
726
+ may expect to be able to do anything other than pointer arithmetic
727
+ on the returned buffers.
728
+ """
729
+
730
+ def __cinit__(self, CudaBuffer obj):
731
+ self.buffer = obj
732
+ self.reader = new CCudaBufferReader(self.buffer.buffer)
733
+ self.set_random_access_file(
734
+ shared_ptr[CRandomAccessFile](self.reader))
735
+ self.is_readable = True
736
+
737
+ def read_buffer(self, nbytes=None):
738
+ """Return a slice view of the underlying device buffer.
739
+
740
+ The slice will start at the current reader position and will
741
+ have specified size in bytes.
742
+
743
+ Parameters
744
+ ----------
745
+ nbytes : int, default None
746
+ Specify the number of bytes to read. Default: None (read all
747
+ remaining bytes).
748
+
749
+ Returns
750
+ -------
751
+ cbuf : CudaBuffer
752
+ New device buffer.
753
+
754
+ """
755
+ cdef:
756
+ int64_t c_nbytes
757
+ shared_ptr[CCudaBuffer] output
758
+
759
+ if nbytes is None:
760
+ c_nbytes = self.size() - self.tell()
761
+ else:
762
+ c_nbytes = nbytes
763
+
764
+ with nogil:
765
+ output = static_pointer_cast[CCudaBuffer, CBuffer](
766
+ GetResultValue(self.reader.Read(c_nbytes)))
767
+
768
+ return pyarrow_wrap_cudabuffer(output)
769
+
770
+
771
+ cdef class BufferWriter(NativeFile):
772
+ """File interface for writing to CUDA buffers.
773
+
774
+ By default writes are unbuffered. Use set_buffer_size to enable
775
+ buffering.
776
+ """
777
+
778
+ def __cinit__(self, CudaBuffer buffer):
779
+ self.buffer = buffer
780
+ self.writer = new CCudaBufferWriter(self.buffer.cuda_buffer)
781
+ self.set_output_stream(shared_ptr[COutputStream](self.writer))
782
+ self.is_writable = True
783
+
784
+ def writeat(self, int64_t position, object data):
785
+ """Write data to buffer starting from position.
786
+
787
+ Parameters
788
+ ----------
789
+ position : int
790
+ Specify device buffer position where the data will be
791
+ written.
792
+ data : array-like
793
+ Specify data, the data instance must implement buffer
794
+ protocol.
795
+ """
796
+ cdef:
797
+ Buffer buf = as_buffer(data)
798
+ const uint8_t* c_data = buf.buffer.get().data()
799
+ int64_t c_size = buf.buffer.get().size()
800
+
801
+ with nogil:
802
+ check_status(self.writer.WriteAt(position, c_data, c_size))
803
+
804
+ def flush(self):
805
+ """ Flush the buffer stream """
806
+ with nogil:
807
+ check_status(self.writer.Flush())
808
+
809
+ def seek(self, int64_t position, int whence=0):
810
+ # TODO: remove this method after NativeFile.seek supports
811
+ # writable files.
812
+ cdef int64_t offset
813
+
814
+ with nogil:
815
+ if whence == 0:
816
+ offset = position
817
+ elif whence == 1:
818
+ offset = GetResultValue(self.writer.Tell())
819
+ offset = offset + position
820
+ else:
821
+ with gil:
822
+ raise ValueError("Invalid value of whence: {0}"
823
+ .format(whence))
824
+ check_status(self.writer.Seek(offset))
825
+ return self.tell()
826
+
827
+ @property
828
+ def buffer_size(self):
829
+ """Returns size of host (CPU) buffer, 0 for unbuffered
830
+ """
831
+ return self.writer.buffer_size()
832
+
833
+ @buffer_size.setter
834
+ def buffer_size(self, int64_t buffer_size):
835
+ """Set CPU buffer size to limit calls to cudaMemcpy
836
+
837
+ Parameters
838
+ ----------
839
+ buffer_size : int
840
+ Specify the size of CPU buffer to allocate in bytes.
841
+ """
842
+ with nogil:
843
+ check_status(self.writer.SetBufferSize(buffer_size))
844
+
845
+ @property
846
+ def num_bytes_buffered(self):
847
+ """Returns number of bytes buffered on host
848
+ """
849
+ return self.writer.num_bytes_buffered()
850
+
851
+ # Functions
852
+
853
+
854
+ def new_host_buffer(const int64_t size, int device=0):
855
+ """Return buffer with CUDA-accessible memory on CPU host
856
+
857
+ Parameters
858
+ ----------
859
+ size : int
860
+ Specify the number of bytes to be allocated.
861
+ device : int
862
+ Specify GPU device number.
863
+
864
+ Returns
865
+ -------
866
+ dbuf : HostBuffer
867
+ Allocated host buffer
868
+ """
869
+ cdef shared_ptr[CCudaHostBuffer] buffer
870
+ with nogil:
871
+ buffer = GetResultValue(AllocateCudaHostBuffer(device, size))
872
+ return pyarrow_wrap_cudahostbuffer(buffer)
873
+
874
+
875
+ def serialize_record_batch(object batch, object ctx):
876
+ """ Write record batch message to GPU device memory
877
+
878
+ Parameters
879
+ ----------
880
+ batch : RecordBatch
881
+ Record batch to write
882
+ ctx : Context
883
+ CUDA Context to allocate device memory from
884
+
885
+ Returns
886
+ -------
887
+ dbuf : CudaBuffer
888
+ device buffer which contains the record batch message
889
+ """
890
+ cdef shared_ptr[CCudaBuffer] buffer
891
+ cdef CRecordBatch* batch_ = pyarrow_unwrap_batch(batch).get()
892
+ cdef CCudaContext* ctx_ = pyarrow_unwrap_cudacontext(ctx).get()
893
+ with nogil:
894
+ buffer = GetResultValue(CudaSerializeRecordBatch(batch_[0], ctx_))
895
+ return pyarrow_wrap_cudabuffer(buffer)
896
+
897
+
898
+ def read_message(object source, pool=None):
899
+ """ Read Arrow IPC message located on GPU device
900
+
901
+ Parameters
902
+ ----------
903
+ source : {CudaBuffer, cuda.BufferReader}
904
+ Device buffer or reader of device buffer.
905
+ pool : MemoryPool (optional)
906
+ Pool to allocate CPU memory for the metadata
907
+
908
+ Returns
909
+ -------
910
+ message : Message
911
+ The deserialized message, body still on device
912
+ """
913
+ cdef:
914
+ Message result = Message.__new__(Message)
915
+ cdef CMemoryPool* pool_ = maybe_unbox_memory_pool(pool)
916
+ if not isinstance(source, BufferReader):
917
+ reader = BufferReader(source)
918
+ with nogil:
919
+ result.message = move(
920
+ GetResultValue(ReadMessage(reader.reader, pool_)))
921
+ return result
922
+
923
+
924
+ def read_record_batch(object buffer, object schema, *,
925
+ DictionaryMemo dictionary_memo=None, pool=None):
926
+ """Construct RecordBatch referencing IPC message located on CUDA device.
927
+
928
+ While the metadata is copied to host memory for deserialization,
929
+ the record batch data remains on the device.
930
+
931
+ Parameters
932
+ ----------
933
+ buffer :
934
+ Device buffer containing the complete IPC message
935
+ schema : Schema
936
+ The schema for the record batch
937
+ dictionary_memo : DictionaryMemo, optional
938
+ If message contains dictionaries, must pass a populated
939
+ DictionaryMemo
940
+ pool : MemoryPool (optional)
941
+ Pool to allocate metadata from
942
+
943
+ Returns
944
+ -------
945
+ batch : RecordBatch
946
+ Reconstructed record batch, with device pointers
947
+
948
+ """
949
+ cdef:
950
+ shared_ptr[CSchema] schema_ = pyarrow_unwrap_schema(schema)
951
+ shared_ptr[CCudaBuffer] buffer_ = pyarrow_unwrap_cudabuffer(buffer)
952
+ CDictionaryMemo temp_memo
953
+ CDictionaryMemo* arg_dict_memo
954
+ CMemoryPool* pool_ = maybe_unbox_memory_pool(pool)
955
+ shared_ptr[CRecordBatch] batch
956
+
957
+ if dictionary_memo is not None:
958
+ arg_dict_memo = dictionary_memo.memo
959
+ else:
960
+ arg_dict_memo = &temp_memo
961
+
962
+ with nogil:
963
+ batch = GetResultValue(CudaReadRecordBatch(
964
+ schema_, arg_dict_memo, buffer_, pool_))
965
+ return pyarrow_wrap_batch(batch)
966
+
967
+
968
+ # Public API
969
+
970
+
971
+ cdef public api bint pyarrow_is_buffer(object buffer):
972
+ return isinstance(buffer, Buffer)
973
+
974
+ # cudabuffer
975
+
976
+ cdef public api bint pyarrow_is_cudabuffer(object buffer):
977
+ return isinstance(buffer, CudaBuffer)
978
+
979
+
980
+ cdef public api object \
981
+ pyarrow_wrap_cudabuffer_base(const shared_ptr[CCudaBuffer]& buf, base):
982
+ cdef CudaBuffer result = CudaBuffer.__new__(CudaBuffer)
983
+ result.init_cuda(buf, base)
984
+ return result
985
+
986
+
987
+ cdef public api object \
988
+ pyarrow_wrap_cudabuffer(const shared_ptr[CCudaBuffer]& buf):
989
+ cdef CudaBuffer result = CudaBuffer.__new__(CudaBuffer)
990
+ result.init_cuda(buf, None)
991
+ return result
992
+
993
+
994
+ cdef public api shared_ptr[CCudaBuffer] pyarrow_unwrap_cudabuffer(object obj):
995
+ if pyarrow_is_cudabuffer(obj):
996
+ return (<CudaBuffer>obj).cuda_buffer
997
+ raise TypeError('expected CudaBuffer instance, got %s'
998
+ % (type(obj).__name__))
999
+
1000
+ # cudahostbuffer
1001
+
1002
+ cdef public api bint pyarrow_is_cudahostbuffer(object buffer):
1003
+ return isinstance(buffer, HostBuffer)
1004
+
1005
+
1006
+ cdef public api object \
1007
+ pyarrow_wrap_cudahostbuffer(const shared_ptr[CCudaHostBuffer]& buf):
1008
+ cdef HostBuffer result = HostBuffer.__new__(HostBuffer)
1009
+ result.init_host(buf)
1010
+ return result
1011
+
1012
+
1013
+ cdef public api shared_ptr[CCudaHostBuffer] \
1014
+ pyarrow_unwrap_cudahostbuffer(object obj):
1015
+ if pyarrow_is_cudahostbuffer(obj):
1016
+ return (<HostBuffer>obj).host_buffer
1017
+ raise TypeError('expected HostBuffer instance, got %s'
1018
+ % (type(obj).__name__))
1019
+
1020
+ # cudacontext
1021
+
1022
+ cdef public api bint pyarrow_is_cudacontext(object ctx):
1023
+ return isinstance(ctx, Context)
1024
+
1025
+
1026
+ cdef public api object \
1027
+ pyarrow_wrap_cudacontext(const shared_ptr[CCudaContext]& ctx):
1028
+ cdef Context result = Context.__new__(Context)
1029
+ result.init(ctx)
1030
+ return result
1031
+
1032
+
1033
+ cdef public api shared_ptr[CCudaContext] \
1034
+ pyarrow_unwrap_cudacontext(object obj):
1035
+ if pyarrow_is_cudacontext(obj):
1036
+ return (<Context>obj).context
1037
+ raise TypeError('expected Context instance, got %s'
1038
+ % (type(obj).__name__))
1039
+
1040
+ # cudaipcmemhandle
1041
+
1042
+ cdef public api bint pyarrow_is_cudaipcmemhandle(object handle):
1043
+ return isinstance(handle, IpcMemHandle)
1044
+
1045
+
1046
+ cdef public api object \
1047
+ pyarrow_wrap_cudaipcmemhandle(shared_ptr[CCudaIpcMemHandle]& h):
1048
+ cdef IpcMemHandle result = IpcMemHandle.__new__(IpcMemHandle)
1049
+ result.init(h)
1050
+ return result
1051
+
1052
+
1053
+ cdef public api shared_ptr[CCudaIpcMemHandle] \
1054
+ pyarrow_unwrap_cudaipcmemhandle(object obj):
1055
+ if pyarrow_is_cudaipcmemhandle(obj):
1056
+ return (<IpcMemHandle>obj).handle
1057
+ raise TypeError('expected IpcMemHandle instance, got %s'
1058
+ % (type(obj).__name__))
env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.pxd ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ """Dataset is currently unstable. APIs subject to change without notice."""
21
+
22
+ from pyarrow.includes.common cimport *
23
+ from pyarrow.includes.libarrow_dataset cimport *
24
+ from pyarrow.lib cimport *
25
+ from pyarrow._fs cimport FileSystem, FileInfo
26
+
27
+
28
+ cdef CFileSource _make_file_source(object file, FileSystem filesystem=*, object file_size=*)
29
+
30
+ cdef class DatasetFactory(_Weakrefable):
31
+
32
+ cdef:
33
+ SharedPtrNoGIL[CDatasetFactory] wrapped
34
+ CDatasetFactory* factory
35
+
36
+ cdef init(self, const shared_ptr[CDatasetFactory]& sp)
37
+
38
+ @staticmethod
39
+ cdef wrap(const shared_ptr[CDatasetFactory]& sp)
40
+
41
+ cdef inline shared_ptr[CDatasetFactory] unwrap(self) nogil
42
+
43
+
44
+ cdef class Dataset(_Weakrefable):
45
+
46
+ cdef:
47
+ SharedPtrNoGIL[CDataset] wrapped
48
+ CDataset* dataset
49
+ public dict _scan_options
50
+
51
+ cdef void init(self, const shared_ptr[CDataset]& sp)
52
+
53
+ @staticmethod
54
+ cdef wrap(const shared_ptr[CDataset]& sp)
55
+
56
+ cdef shared_ptr[CDataset] unwrap(self) nogil
57
+
58
+
59
+ cdef class Scanner(_Weakrefable):
60
+ cdef:
61
+ SharedPtrNoGIL[CScanner] wrapped
62
+ CScanner* scanner
63
+
64
+ cdef void init(self, const shared_ptr[CScanner]& sp)
65
+
66
+ @staticmethod
67
+ cdef wrap(const shared_ptr[CScanner]& sp)
68
+
69
+ cdef shared_ptr[CScanner] unwrap(self)
70
+
71
+ @staticmethod
72
+ cdef shared_ptr[CScanOptions] _make_scan_options(Dataset dataset, dict py_scanoptions) except *
73
+
74
+
75
+ cdef class FragmentScanOptions(_Weakrefable):
76
+
77
+ cdef:
78
+ shared_ptr[CFragmentScanOptions] wrapped
79
+
80
+ cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp)
81
+
82
+ @staticmethod
83
+ cdef wrap(const shared_ptr[CFragmentScanOptions]& sp)
84
+
85
+
86
+ cdef class FileFormat(_Weakrefable):
87
+
88
+ cdef:
89
+ shared_ptr[CFileFormat] wrapped
90
+ CFileFormat* format
91
+
92
+ cdef void init(self, const shared_ptr[CFileFormat]& sp)
93
+
94
+ @staticmethod
95
+ cdef wrap(const shared_ptr[CFileFormat]& sp)
96
+
97
+ cdef inline shared_ptr[CFileFormat] unwrap(self)
98
+
99
+ cdef _set_default_fragment_scan_options(self, FragmentScanOptions options)
100
+
101
+ # Return a WrittenFile after a file was written.
102
+ # May be overridden by subclasses, e.g. to add metadata.
103
+ cdef WrittenFile _finish_write(self, path, base_dir,
104
+ CFileWriter* file_writer)
105
+
106
+
107
+ cdef class FileWriteOptions(_Weakrefable):
108
+
109
+ cdef:
110
+ shared_ptr[CFileWriteOptions] wrapped
111
+ CFileWriteOptions* c_options
112
+
113
+ cdef void init(self, const shared_ptr[CFileWriteOptions]& sp)
114
+
115
+ @staticmethod
116
+ cdef wrap(const shared_ptr[CFileWriteOptions]& sp)
117
+
118
+ cdef inline shared_ptr[CFileWriteOptions] unwrap(self)
119
+
120
+
121
+ cdef class Fragment(_Weakrefable):
122
+
123
+ cdef:
124
+ SharedPtrNoGIL[CFragment] wrapped
125
+ CFragment* fragment
126
+
127
+ cdef void init(self, const shared_ptr[CFragment]& sp)
128
+
129
+ @staticmethod
130
+ cdef wrap(const shared_ptr[CFragment]& sp)
131
+
132
+ cdef inline shared_ptr[CFragment] unwrap(self)
133
+
134
+
135
+ cdef class FileFragment(Fragment):
136
+
137
+ cdef:
138
+ CFileFragment* file_fragment
139
+
140
+ cdef void init(self, const shared_ptr[CFragment]& sp)
141
+
142
+
143
+ cdef class Partitioning(_Weakrefable):
144
+
145
+ cdef:
146
+ shared_ptr[CPartitioning] wrapped
147
+ CPartitioning* partitioning
148
+
149
+ cdef init(self, const shared_ptr[CPartitioning]& sp)
150
+
151
+ @staticmethod
152
+ cdef wrap(const shared_ptr[CPartitioning]& sp)
153
+
154
+ cdef inline shared_ptr[CPartitioning] unwrap(self)
155
+
156
+
157
+ cdef class PartitioningFactory(_Weakrefable):
158
+
159
+ cdef:
160
+ shared_ptr[CPartitioningFactory] wrapped
161
+ CPartitioningFactory* factory
162
+ object constructor
163
+ object options
164
+
165
+ cdef init(self, const shared_ptr[CPartitioningFactory]& sp)
166
+
167
+ @staticmethod
168
+ cdef wrap(const shared_ptr[CPartitioningFactory]& sp,
169
+ object constructor, object options)
170
+
171
+ cdef inline shared_ptr[CPartitioningFactory] unwrap(self)
172
+
173
+
174
+ cdef class WrittenFile(_Weakrefable):
175
+
176
+ # The full path to the created file
177
+ cdef public str path
178
+ # Optional Parquet metadata
179
+ # This metadata will have the file path attribute set to the path of
180
+ # the written file.
181
+ cdef public object metadata
182
+ # The size of the file in bytes
183
+ cdef public int64_t size
env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.pyx ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (78.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_orc.pyx ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ """Dataset support for ORC file format."""
21
+
22
+ from pyarrow.lib cimport *
23
+ from pyarrow.includes.libarrow cimport *
24
+ from pyarrow.includes.libarrow_dataset cimport *
25
+
26
+ from pyarrow._dataset cimport FileFormat
27
+
28
+
29
+ cdef class OrcFileFormat(FileFormat):
30
+
31
+ def __init__(self):
32
+ self.init(shared_ptr[CFileFormat](new COrcFileFormat()))
33
+
34
+ def equals(self, OrcFileFormat other):
35
+ """
36
+ Parameters
37
+ ----------
38
+ other : pyarrow.dataset.OrcFileFormat
39
+
40
+ Returns
41
+ -------
42
+ True
43
+ """
44
+ return True
45
+
46
+ @property
47
+ def default_extname(self):
48
+ return "orc"
49
+
50
+ def __reduce__(self):
51
+ return OrcFileFormat, tuple()
env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (370 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ """Dataset support for Parquet file format."""
21
+
22
+ from pyarrow.includes.libarrow_dataset cimport *
23
+ from pyarrow.includes.libarrow_dataset_parquet cimport *
24
+
25
+ from pyarrow._dataset cimport FragmentScanOptions, FileWriteOptions
26
+
27
+
28
+ cdef class ParquetFragmentScanOptions(FragmentScanOptions):
29
+ cdef:
30
+ CParquetFragmentScanOptions* parquet_options
31
+ object _parquet_decryption_config
32
+
33
+ cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp)
34
+ cdef CReaderProperties* reader_properties(self)
35
+ cdef ArrowReaderProperties* arrow_reader_properties(self)
36
+
37
+
38
+ cdef class ParquetFileWriteOptions(FileWriteOptions):
39
+
40
+ cdef:
41
+ CParquetFileWriteOptions* parquet_options
42
+ object _properties
env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pyx ADDED
@@ -0,0 +1,1019 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ """Dataset support for Parquet file format."""
21
+
22
+ from cython cimport binding
23
+ from cython.operator cimport dereference as deref
24
+
25
+ import os
26
+ import warnings
27
+
28
+ import pyarrow as pa
29
+ from pyarrow.lib cimport *
30
+ from pyarrow.lib import frombytes, tobytes
31
+ from pyarrow.includes.libarrow cimport *
32
+ from pyarrow.includes.libarrow_dataset cimport *
33
+ from pyarrow.includes.libarrow_dataset_parquet cimport *
34
+ from pyarrow._fs cimport FileSystem
35
+
36
+ from pyarrow._compute cimport Expression, _bind
37
+ from pyarrow._dataset cimport (
38
+ _make_file_source,
39
+ DatasetFactory,
40
+ FileFormat,
41
+ FileFragment,
42
+ FileWriteOptions,
43
+ Fragment,
44
+ FragmentScanOptions,
45
+ CacheOptions,
46
+ Partitioning,
47
+ PartitioningFactory,
48
+ WrittenFile
49
+ )
50
+
51
+ from pyarrow._parquet cimport (
52
+ _create_writer_properties, _create_arrow_writer_properties,
53
+ FileMetaData,
54
+ )
55
+
56
+
57
+ try:
58
+ from pyarrow._dataset_parquet_encryption import (
59
+ set_encryption_config, set_decryption_config
60
+ )
61
+ parquet_encryption_enabled = True
62
+ except ImportError:
63
+ parquet_encryption_enabled = False
64
+
65
+
66
+ cdef Expression _true = Expression._scalar(True)
67
+
68
+ ctypedef CParquetFileWriter* _CParquetFileWriterPtr
69
+
70
+
71
+ cdef class ParquetFileFormat(FileFormat):
72
+ """
73
+ FileFormat for Parquet
74
+
75
+ Parameters
76
+ ----------
77
+ read_options : ParquetReadOptions
78
+ Read options for the file.
79
+ default_fragment_scan_options : ParquetFragmentScanOptions
80
+ Scan Options for the file.
81
+ **kwargs : dict
82
+ Additional options for read option or scan option
83
+ """
84
+
85
+ cdef:
86
+ CParquetFileFormat* parquet_format
87
+
88
+ def __init__(self, read_options=None,
89
+ default_fragment_scan_options=None,
90
+ **kwargs):
91
+ cdef:
92
+ shared_ptr[CParquetFileFormat] wrapped
93
+ CParquetFileFormatReaderOptions* options
94
+
95
+ # Read/scan options
96
+ read_options_args = {option: kwargs[option] for option in kwargs
97
+ if option in _PARQUET_READ_OPTIONS}
98
+ scan_args = {option: kwargs[option] for option in kwargs
99
+ if option not in _PARQUET_READ_OPTIONS}
100
+ if read_options and read_options_args:
101
+ duplicates = ', '.join(sorted(read_options_args))
102
+ raise ValueError(f'If `read_options` is given, '
103
+ f'cannot specify {duplicates}')
104
+ if default_fragment_scan_options and scan_args:
105
+ duplicates = ', '.join(sorted(scan_args))
106
+ raise ValueError(f'If `default_fragment_scan_options` is given, '
107
+ f'cannot specify {duplicates}')
108
+
109
+ if read_options is None:
110
+ read_options = ParquetReadOptions(**read_options_args)
111
+ elif isinstance(read_options, dict):
112
+ # For backwards compatibility
113
+ duplicates = []
114
+ for option, value in read_options.items():
115
+ if option in _PARQUET_READ_OPTIONS:
116
+ read_options_args[option] = value
117
+ else:
118
+ duplicates.append(option)
119
+ scan_args[option] = value
120
+ if duplicates:
121
+ duplicates = ", ".join(duplicates)
122
+ warnings.warn(f'The scan options {duplicates} should be '
123
+ 'specified directly as keyword arguments')
124
+ read_options = ParquetReadOptions(**read_options_args)
125
+ elif not isinstance(read_options, ParquetReadOptions):
126
+ raise TypeError('`read_options` must be either a dictionary or an '
127
+ 'instance of ParquetReadOptions')
128
+
129
+ if default_fragment_scan_options is None:
130
+ default_fragment_scan_options = ParquetFragmentScanOptions(
131
+ **scan_args)
132
+ elif isinstance(default_fragment_scan_options, dict):
133
+ default_fragment_scan_options = ParquetFragmentScanOptions(
134
+ **default_fragment_scan_options)
135
+ elif not isinstance(default_fragment_scan_options,
136
+ ParquetFragmentScanOptions):
137
+ raise TypeError('`default_fragment_scan_options` must be either a '
138
+ 'dictionary or an instance of '
139
+ 'ParquetFragmentScanOptions')
140
+
141
+ wrapped = make_shared[CParquetFileFormat]()
142
+
143
+ options = &(wrapped.get().reader_options)
144
+ if read_options.dictionary_columns is not None:
145
+ for column in read_options.dictionary_columns:
146
+ options.dict_columns.insert(tobytes(column))
147
+ options.coerce_int96_timestamp_unit = \
148
+ read_options._coerce_int96_timestamp_unit
149
+
150
+ self.init(<shared_ptr[CFileFormat]> wrapped)
151
+ self.default_fragment_scan_options = default_fragment_scan_options
152
+
153
+ cdef void init(self, const shared_ptr[CFileFormat]& sp):
154
+ FileFormat.init(self, sp)
155
+ self.parquet_format = <CParquetFileFormat*> sp.get()
156
+
157
+ cdef WrittenFile _finish_write(self, path, base_dir,
158
+ CFileWriter* file_writer):
159
+ cdef:
160
+ FileMetaData parquet_metadata
161
+ CParquetFileWriter* parquet_file_writer
162
+
163
+ parquet_metadata = None
164
+ parquet_file_writer = dynamic_cast[_CParquetFileWriterPtr](file_writer)
165
+ with nogil:
166
+ metadata = deref(
167
+ deref(parquet_file_writer).parquet_writer()).metadata()
168
+ if metadata:
169
+ parquet_metadata = FileMetaData()
170
+ parquet_metadata.init(metadata)
171
+ parquet_metadata.set_file_path(os.path.relpath(path, base_dir))
172
+
173
+ size = GetResultValue(file_writer.GetBytesWritten())
174
+
175
+ return WrittenFile(path, parquet_metadata, size)
176
+
177
+ @property
178
+ def read_options(self):
179
+ cdef CParquetFileFormatReaderOptions* options
180
+ options = &self.parquet_format.reader_options
181
+ parquet_read_options = ParquetReadOptions(
182
+ dictionary_columns={frombytes(col)
183
+ for col in options.dict_columns},
184
+ )
185
+ # Read options getter/setter works with strings so setting
186
+ # the private property which uses the C Type
187
+ parquet_read_options._coerce_int96_timestamp_unit = \
188
+ options.coerce_int96_timestamp_unit
189
+ return parquet_read_options
190
+
191
+ def make_write_options(self, **kwargs):
192
+ """
193
+ Parameters
194
+ ----------
195
+ **kwargs : dict
196
+
197
+ Returns
198
+ -------
199
+ pyarrow.dataset.FileWriteOptions
200
+ """
201
+ opts = FileFormat.make_write_options(self)
202
+ (<ParquetFileWriteOptions> opts).update(**kwargs)
203
+ return opts
204
+
205
+ cdef _set_default_fragment_scan_options(self, FragmentScanOptions options):
206
+ if options.type_name == 'parquet':
207
+ self.parquet_format.default_fragment_scan_options = options.wrapped
208
+ else:
209
+ super()._set_default_fragment_scan_options(options)
210
+
211
+ def equals(self, ParquetFileFormat other):
212
+ """
213
+ Parameters
214
+ ----------
215
+ other : pyarrow.dataset.ParquetFileFormat
216
+
217
+ Returns
218
+ -------
219
+ bool
220
+ """
221
+ return (
222
+ self.read_options.equals(other.read_options) and
223
+ self.default_fragment_scan_options ==
224
+ other.default_fragment_scan_options
225
+ )
226
+
227
+ @property
228
+ def default_extname(self):
229
+ return "parquet"
230
+
231
+ def __reduce__(self):
232
+ return ParquetFileFormat, (self.read_options,
233
+ self.default_fragment_scan_options)
234
+
235
+ def __repr__(self):
236
+ return f"<ParquetFileFormat read_options={self.read_options}>"
237
+
238
+ def make_fragment(self, file, filesystem=None,
239
+ Expression partition_expression=None, row_groups=None, *, file_size=None):
240
+ """
241
+ Make a FileFragment from a given file.
242
+
243
+ Parameters
244
+ ----------
245
+ file : file-like object, path-like or str
246
+ The file or file path to make a fragment from.
247
+ filesystem : Filesystem, optional
248
+ If `filesystem` is given, `file` must be a string and specifies
249
+ the path of the file to read from the filesystem.
250
+ partition_expression : Expression, optional
251
+ An expression that is guaranteed true for all rows in the fragment. Allows
252
+ fragment to be potentially skipped while scanning with a filter.
253
+ row_groups : Iterable, optional
254
+ The indices of the row groups to include
255
+ file_size : int, optional
256
+ The size of the file in bytes. Can improve performance with high-latency filesystems
257
+ when file size needs to be known before reading.
258
+
259
+ Returns
260
+ -------
261
+ fragment : Fragment
262
+ The file fragment
263
+ """
264
+ cdef:
265
+ vector[int] c_row_groups
266
+ if partition_expression is None:
267
+ partition_expression = _true
268
+ if row_groups is None:
269
+ return super().make_fragment(file, filesystem,
270
+ partition_expression, file_size=file_size)
271
+
272
+ c_source = _make_file_source(file, filesystem, file_size)
273
+ c_row_groups = [<int> row_group for row_group in set(row_groups)]
274
+
275
+ c_fragment = <shared_ptr[CFragment]> GetResultValue(
276
+ self.parquet_format.MakeFragment(move(c_source),
277
+ partition_expression.unwrap(),
278
+ <shared_ptr[CSchema]>nullptr,
279
+ move(c_row_groups)))
280
+ return Fragment.wrap(move(c_fragment))
281
+
282
+
283
+ class RowGroupInfo:
284
+ """
285
+ A wrapper class for RowGroup information
286
+
287
+ Parameters
288
+ ----------
289
+ id : integer
290
+ The group ID.
291
+ metadata : FileMetaData
292
+ The rowgroup metadata.
293
+ schema : Schema
294
+ Schema of the rows.
295
+ """
296
+
297
+ def __init__(self, id, metadata, schema):
298
+ self.id = id
299
+ self.metadata = metadata
300
+ self.schema = schema
301
+
302
+ @property
303
+ def num_rows(self):
304
+ return self.metadata.num_rows
305
+
306
+ @property
307
+ def total_byte_size(self):
308
+ return self.metadata.total_byte_size
309
+
310
+ @property
311
+ def statistics(self):
312
+ def name_stats(i):
313
+ col = self.metadata.column(i)
314
+
315
+ stats = col.statistics
316
+ if stats is None or not stats.has_min_max:
317
+ return None, None
318
+
319
+ name = col.path_in_schema
320
+ field_index = self.schema.get_field_index(name)
321
+ if field_index < 0:
322
+ return None, None
323
+
324
+ typ = self.schema.field(field_index).type
325
+ return col.path_in_schema, {
326
+ 'min': pa.scalar(stats.min, type=typ).as_py(),
327
+ 'max': pa.scalar(stats.max, type=typ).as_py()
328
+ }
329
+
330
+ return {
331
+ name: stats for name, stats
332
+ in map(name_stats, range(self.metadata.num_columns))
333
+ if stats is not None
334
+ }
335
+
336
+ def __repr__(self):
337
+ return "RowGroupInfo({})".format(self.id)
338
+
339
+ def __eq__(self, other):
340
+ if isinstance(other, int):
341
+ return self.id == other
342
+ if not isinstance(other, RowGroupInfo):
343
+ return False
344
+ return self.id == other.id
345
+
346
+
347
+ cdef class ParquetFileFragment(FileFragment):
348
+ """A Fragment representing a parquet file."""
349
+
350
+ cdef:
351
+ CParquetFileFragment* parquet_file_fragment
352
+
353
+ cdef void init(self, const shared_ptr[CFragment]& sp):
354
+ FileFragment.init(self, sp)
355
+ self.parquet_file_fragment = <CParquetFileFragment*> sp.get()
356
+
357
+ def __reduce__(self):
358
+ buffer = self.buffer
359
+ # parquet_file_fragment.row_groups() is empty if the metadata
360
+ # information of the file is not yet populated
361
+ if not bool(self.parquet_file_fragment.row_groups()):
362
+ row_groups = None
363
+ else:
364
+ row_groups = [row_group.id for row_group in self.row_groups]
365
+
366
+ return self.format.make_fragment, (
367
+ self.path if buffer is None else buffer,
368
+ self.filesystem,
369
+ self.partition_expression,
370
+ row_groups
371
+ )
372
+
373
+ def ensure_complete_metadata(self):
374
+ """
375
+ Ensure that all metadata (statistics, physical schema, ...) have
376
+ been read and cached in this fragment.
377
+ """
378
+ with nogil:
379
+ check_status(self.parquet_file_fragment.EnsureCompleteMetadata())
380
+
381
+ @property
382
+ def row_groups(self):
383
+ metadata = self.metadata
384
+ cdef vector[int] row_groups = self.parquet_file_fragment.row_groups()
385
+ return [RowGroupInfo(i, metadata.row_group(i), self.physical_schema)
386
+ for i in row_groups]
387
+
388
+ @property
389
+ def metadata(self):
390
+ self.ensure_complete_metadata()
391
+ cdef FileMetaData metadata = FileMetaData()
392
+ metadata.init(self.parquet_file_fragment.metadata())
393
+ return metadata
394
+
395
+ @property
396
+ def num_row_groups(self):
397
+ """
398
+ Return the number of row groups viewed by this fragment (not the
399
+ number of row groups in the origin file).
400
+ """
401
+ self.ensure_complete_metadata()
402
+ return self.parquet_file_fragment.row_groups().size()
403
+
404
+ def split_by_row_group(self, Expression filter=None,
405
+ Schema schema=None):
406
+ """
407
+ Split the fragment into multiple fragments.
408
+
409
+ Yield a Fragment wrapping each row group in this ParquetFileFragment.
410
+ Row groups will be excluded whose metadata contradicts the optional
411
+ filter.
412
+
413
+ Parameters
414
+ ----------
415
+ filter : Expression, default None
416
+ Only include the row groups which satisfy this predicate (using
417
+ the Parquet RowGroup statistics).
418
+ schema : Schema, default None
419
+ Schema to use when filtering row groups. Defaults to the
420
+ Fragment's physical schema
421
+
422
+ Returns
423
+ -------
424
+ A list of Fragments
425
+ """
426
+ cdef:
427
+ vector[shared_ptr[CFragment]] c_fragments
428
+ CExpression c_filter
429
+ shared_ptr[CFragment] c_fragment
430
+
431
+ schema = schema or self.physical_schema
432
+ c_filter = _bind(filter, schema)
433
+ with nogil:
434
+ c_fragments = move(GetResultValue(
435
+ self.parquet_file_fragment.SplitByRowGroup(move(c_filter))))
436
+
437
+ return [Fragment.wrap(c_fragment) for c_fragment in c_fragments]
438
+
439
+ def subset(self, Expression filter=None, Schema schema=None,
440
+ object row_group_ids=None):
441
+ """
442
+ Create a subset of the fragment (viewing a subset of the row groups).
443
+
444
+ Subset can be specified by either a filter predicate (with optional
445
+ schema) or by a list of row group IDs. Note that when using a filter,
446
+ the resulting fragment can be empty (viewing no row groups).
447
+
448
+ Parameters
449
+ ----------
450
+ filter : Expression, default None
451
+ Only include the row groups which satisfy this predicate (using
452
+ the Parquet RowGroup statistics).
453
+ schema : Schema, default None
454
+ Schema to use when filtering row groups. Defaults to the
455
+ Fragment's physical schema
456
+ row_group_ids : list of ints
457
+ The row group IDs to include in the subset. Can only be specified
458
+ if `filter` is None.
459
+
460
+ Returns
461
+ -------
462
+ ParquetFileFragment
463
+ """
464
+ cdef:
465
+ CExpression c_filter
466
+ vector[int] c_row_group_ids
467
+ shared_ptr[CFragment] c_fragment
468
+
469
+ if filter is not None and row_group_ids is not None:
470
+ raise ValueError(
471
+ "Cannot specify both 'filter' and 'row_group_ids'."
472
+ )
473
+
474
+ if filter is not None:
475
+ schema = schema or self.physical_schema
476
+ c_filter = _bind(filter, schema)
477
+ with nogil:
478
+ c_fragment = move(GetResultValue(
479
+ self.parquet_file_fragment.SubsetWithFilter(
480
+ move(c_filter))))
481
+ elif row_group_ids is not None:
482
+ c_row_group_ids = [
483
+ <int> row_group for row_group in sorted(set(row_group_ids))
484
+ ]
485
+ with nogil:
486
+ c_fragment = move(GetResultValue(
487
+ self.parquet_file_fragment.SubsetWithIds(
488
+ move(c_row_group_ids))))
489
+ else:
490
+ raise ValueError(
491
+ "Need to specify one of 'filter' or 'row_group_ids'"
492
+ )
493
+
494
+ return Fragment.wrap(c_fragment)
495
+
496
+
497
+ cdef class ParquetReadOptions(_Weakrefable):
498
+ """
499
+ Parquet format specific options for reading.
500
+
501
+ Parameters
502
+ ----------
503
+ dictionary_columns : list of string, default None
504
+ Names of columns which should be dictionary encoded as
505
+ they are read
506
+ coerce_int96_timestamp_unit : str, default None
507
+ Cast timestamps that are stored in INT96 format to a particular
508
+ resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
509
+ and therefore INT96 timestamps will be inferred as timestamps
510
+ in nanoseconds
511
+ """
512
+
513
+ cdef public:
514
+ set dictionary_columns
515
+ TimeUnit _coerce_int96_timestamp_unit
516
+
517
+ # Also see _PARQUET_READ_OPTIONS
518
+ def __init__(self, dictionary_columns=None,
519
+ coerce_int96_timestamp_unit=None):
520
+ self.dictionary_columns = set(dictionary_columns or set())
521
+ self.coerce_int96_timestamp_unit = coerce_int96_timestamp_unit
522
+
523
+ @property
524
+ def coerce_int96_timestamp_unit(self):
525
+ return timeunit_to_string(self._coerce_int96_timestamp_unit)
526
+
527
+ @coerce_int96_timestamp_unit.setter
528
+ def coerce_int96_timestamp_unit(self, unit):
529
+ if unit is not None:
530
+ self._coerce_int96_timestamp_unit = string_to_timeunit(unit)
531
+ else:
532
+ self._coerce_int96_timestamp_unit = TimeUnit_NANO
533
+
534
+ def equals(self, ParquetReadOptions other):
535
+ """
536
+ Parameters
537
+ ----------
538
+ other : pyarrow.dataset.ParquetReadOptions
539
+
540
+ Returns
541
+ -------
542
+ bool
543
+ """
544
+ return (self.dictionary_columns == other.dictionary_columns and
545
+ self.coerce_int96_timestamp_unit ==
546
+ other.coerce_int96_timestamp_unit)
547
+
548
+ def __eq__(self, other):
549
+ try:
550
+ return self.equals(other)
551
+ except TypeError:
552
+ return False
553
+
554
+ def __repr__(self):
555
+ return (
556
+ f"<ParquetReadOptions"
557
+ f" dictionary_columns={self.dictionary_columns}"
558
+ f" coerce_int96_timestamp_unit={self.coerce_int96_timestamp_unit}>"
559
+ )
560
+
561
+
562
+ cdef class ParquetFileWriteOptions(FileWriteOptions):
563
+
564
+ def update(self, **kwargs):
565
+ """
566
+ Parameters
567
+ ----------
568
+ **kwargs : dict
569
+ """
570
+ arrow_fields = {
571
+ "use_deprecated_int96_timestamps",
572
+ "coerce_timestamps",
573
+ "allow_truncated_timestamps",
574
+ "use_compliant_nested_type",
575
+ }
576
+
577
+ setters = set()
578
+ for name, value in kwargs.items():
579
+ if name not in self._properties:
580
+ raise TypeError("unexpected parquet write option: " + name)
581
+ self._properties[name] = value
582
+ if name in arrow_fields:
583
+ setters.add(self._set_arrow_properties)
584
+ elif name == "encryption_config" and value is not None:
585
+ setters.add(self._set_encryption_config)
586
+ else:
587
+ setters.add(self._set_properties)
588
+
589
+ for setter in setters:
590
+ setter()
591
+
592
+ def _set_properties(self):
593
+ cdef CParquetFileWriteOptions* opts = self.parquet_options
594
+
595
+ opts.writer_properties = _create_writer_properties(
596
+ use_dictionary=self._properties["use_dictionary"],
597
+ compression=self._properties["compression"],
598
+ version=self._properties["version"],
599
+ write_statistics=self._properties["write_statistics"],
600
+ data_page_size=self._properties["data_page_size"],
601
+ compression_level=self._properties["compression_level"],
602
+ use_byte_stream_split=(
603
+ self._properties["use_byte_stream_split"]
604
+ ),
605
+ column_encoding=self._properties["column_encoding"],
606
+ data_page_version=self._properties["data_page_version"],
607
+ encryption_properties=self._properties["encryption_properties"],
608
+ write_batch_size=self._properties["write_batch_size"],
609
+ dictionary_pagesize_limit=self._properties["dictionary_pagesize_limit"],
610
+ write_page_index=self._properties["write_page_index"],
611
+ write_page_checksum=self._properties["write_page_checksum"],
612
+ sorting_columns=self._properties["sorting_columns"],
613
+ )
614
+
615
+ def _set_arrow_properties(self):
616
+ cdef CParquetFileWriteOptions* opts = self.parquet_options
617
+
618
+ opts.arrow_writer_properties = _create_arrow_writer_properties(
619
+ use_deprecated_int96_timestamps=(
620
+ self._properties["use_deprecated_int96_timestamps"]
621
+ ),
622
+ coerce_timestamps=self._properties["coerce_timestamps"],
623
+ allow_truncated_timestamps=(
624
+ self._properties["allow_truncated_timestamps"]
625
+ ),
626
+ writer_engine_version="V2",
627
+ use_compliant_nested_type=(
628
+ self._properties["use_compliant_nested_type"]
629
+ )
630
+ )
631
+
632
+ def _set_encryption_config(self):
633
+ if not parquet_encryption_enabled:
634
+ raise NotImplementedError(
635
+ "Encryption is not enabled in your installation of pyarrow, but an "
636
+ "encryption_config was provided."
637
+ )
638
+ set_encryption_config(self, self._properties["encryption_config"])
639
+
640
+ cdef void init(self, const shared_ptr[CFileWriteOptions]& sp):
641
+ FileWriteOptions.init(self, sp)
642
+ self.parquet_options = <CParquetFileWriteOptions*> sp.get()
643
+ self._properties = dict(
644
+ use_dictionary=True,
645
+ compression="snappy",
646
+ version="2.6",
647
+ write_statistics=None,
648
+ data_page_size=None,
649
+ compression_level=None,
650
+ use_byte_stream_split=False,
651
+ column_encoding=None,
652
+ data_page_version="1.0",
653
+ use_deprecated_int96_timestamps=False,
654
+ coerce_timestamps=None,
655
+ allow_truncated_timestamps=False,
656
+ use_compliant_nested_type=True,
657
+ encryption_properties=None,
658
+ write_batch_size=None,
659
+ dictionary_pagesize_limit=None,
660
+ write_page_index=False,
661
+ encryption_config=None,
662
+ write_page_checksum=False,
663
+ sorting_columns=None,
664
+ )
665
+
666
+ self._set_properties()
667
+ self._set_arrow_properties()
668
+
669
+ def __repr__(self):
670
+ return "<pyarrow.dataset.ParquetFileWriteOptions {0}>".format(
671
+ " ".join([f"{key}={value}" for key, value in self._properties.items()])
672
+ )
673
+
674
+
675
+ cdef set _PARQUET_READ_OPTIONS = {
676
+ 'dictionary_columns', 'coerce_int96_timestamp_unit'
677
+ }
678
+
679
+
680
+ cdef class ParquetFragmentScanOptions(FragmentScanOptions):
681
+ """
682
+ Scan-specific options for Parquet fragments.
683
+
684
+ Parameters
685
+ ----------
686
+ use_buffered_stream : bool, default False
687
+ Read files through buffered input streams rather than loading entire
688
+ row groups at once. This may be enabled to reduce memory overhead.
689
+ Disabled by default.
690
+ buffer_size : int, default 8192
691
+ Size of buffered stream, if enabled. Default is 8KB.
692
+ pre_buffer : bool, default True
693
+ If enabled, pre-buffer the raw Parquet data instead of issuing one
694
+ read per column chunk. This can improve performance on high-latency
695
+ filesystems (e.g. S3, GCS) by coalescing and issuing file reads in
696
+ parallel using a background I/O thread pool.
697
+ Set to False if you want to prioritize minimal memory usage
698
+ over maximum speed.
699
+ cache_options : pyarrow.CacheOptions, default None
700
+ Cache options used when pre_buffer is enabled. The default values should
701
+ be good for most use cases. You may want to adjust these for example if
702
+ you have exceptionally high latency to the file system.
703
+ thrift_string_size_limit : int, default None
704
+ If not None, override the maximum total string size allocated
705
+ when decoding Thrift structures. The default limit should be
706
+ sufficient for most Parquet files.
707
+ thrift_container_size_limit : int, default None
708
+ If not None, override the maximum total size of containers allocated
709
+ when decoding Thrift structures. The default limit should be
710
+ sufficient for most Parquet files.
711
+ decryption_config : pyarrow.dataset.ParquetDecryptionConfig, default None
712
+ If not None, use the provided ParquetDecryptionConfig to decrypt the
713
+ Parquet file.
714
+ page_checksum_verification : bool, default False
715
+ If True, verify the page checksum for each page read from the file.
716
+ """
717
+
718
+ # Avoid mistakingly creating attributes
719
+ __slots__ = ()
720
+
721
+ def __init__(self, *, bint use_buffered_stream=False,
722
+ buffer_size=8192,
723
+ bint pre_buffer=True,
724
+ cache_options=None,
725
+ thrift_string_size_limit=None,
726
+ thrift_container_size_limit=None,
727
+ decryption_config=None,
728
+ bint page_checksum_verification=False):
729
+ self.init(shared_ptr[CFragmentScanOptions](
730
+ new CParquetFragmentScanOptions()))
731
+ self.use_buffered_stream = use_buffered_stream
732
+ self.buffer_size = buffer_size
733
+ self.pre_buffer = pre_buffer
734
+ if cache_options is not None:
735
+ self.cache_options = cache_options
736
+ if thrift_string_size_limit is not None:
737
+ self.thrift_string_size_limit = thrift_string_size_limit
738
+ if thrift_container_size_limit is not None:
739
+ self.thrift_container_size_limit = thrift_container_size_limit
740
+ if decryption_config is not None:
741
+ self.parquet_decryption_config = decryption_config
742
+ self.page_checksum_verification = page_checksum_verification
743
+
744
+ cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp):
745
+ FragmentScanOptions.init(self, sp)
746
+ self.parquet_options = <CParquetFragmentScanOptions*> sp.get()
747
+
748
+ cdef CReaderProperties* reader_properties(self):
749
+ return self.parquet_options.reader_properties.get()
750
+
751
+ cdef ArrowReaderProperties* arrow_reader_properties(self):
752
+ return self.parquet_options.arrow_reader_properties.get()
753
+
754
+ @property
755
+ def use_buffered_stream(self):
756
+ return self.reader_properties().is_buffered_stream_enabled()
757
+
758
+ @use_buffered_stream.setter
759
+ def use_buffered_stream(self, bint use_buffered_stream):
760
+ if use_buffered_stream:
761
+ self.reader_properties().enable_buffered_stream()
762
+ else:
763
+ self.reader_properties().disable_buffered_stream()
764
+
765
+ @property
766
+ def buffer_size(self):
767
+ return self.reader_properties().buffer_size()
768
+
769
+ @buffer_size.setter
770
+ def buffer_size(self, buffer_size):
771
+ if buffer_size <= 0:
772
+ raise ValueError("Buffer size must be larger than zero")
773
+ self.reader_properties().set_buffer_size(buffer_size)
774
+
775
+ @property
776
+ def pre_buffer(self):
777
+ return self.arrow_reader_properties().pre_buffer()
778
+
779
+ @pre_buffer.setter
780
+ def pre_buffer(self, bint pre_buffer):
781
+ self.arrow_reader_properties().set_pre_buffer(pre_buffer)
782
+
783
+ @property
784
+ def cache_options(self):
785
+ return CacheOptions.wrap(self.arrow_reader_properties().cache_options())
786
+
787
+ @cache_options.setter
788
+ def cache_options(self, CacheOptions options):
789
+ self.arrow_reader_properties().set_cache_options(options.unwrap())
790
+
791
+ @property
792
+ def thrift_string_size_limit(self):
793
+ return self.reader_properties().thrift_string_size_limit()
794
+
795
+ @thrift_string_size_limit.setter
796
+ def thrift_string_size_limit(self, size):
797
+ if size <= 0:
798
+ raise ValueError("size must be larger than zero")
799
+ self.reader_properties().set_thrift_string_size_limit(size)
800
+
801
+ @property
802
+ def thrift_container_size_limit(self):
803
+ return self.reader_properties().thrift_container_size_limit()
804
+
805
+ @thrift_container_size_limit.setter
806
+ def thrift_container_size_limit(self, size):
807
+ if size <= 0:
808
+ raise ValueError("size must be larger than zero")
809
+ self.reader_properties().set_thrift_container_size_limit(size)
810
+
811
+ @property
812
+ def parquet_decryption_config(self):
813
+ if not parquet_encryption_enabled:
814
+ raise NotImplementedError(
815
+ "Unable to access encryption features. "
816
+ "Encryption is not enabled in your installation of pyarrow."
817
+ )
818
+ return self._parquet_decryption_config
819
+
820
+ @parquet_decryption_config.setter
821
+ def parquet_decryption_config(self, config):
822
+ if not parquet_encryption_enabled:
823
+ raise NotImplementedError(
824
+ "Encryption is not enabled in your installation of pyarrow, but a "
825
+ "decryption_config was provided."
826
+ )
827
+ set_decryption_config(self, config)
828
+ self._parquet_decryption_config = config
829
+
830
+ @property
831
+ def page_checksum_verification(self):
832
+ return self.reader_properties().page_checksum_verification()
833
+
834
+ @page_checksum_verification.setter
835
+ def page_checksum_verification(self, bint page_checksum_verification):
836
+ self.reader_properties().set_page_checksum_verification(page_checksum_verification)
837
+
838
+ def equals(self, ParquetFragmentScanOptions other):
839
+ """
840
+ Parameters
841
+ ----------
842
+ other : pyarrow.dataset.ParquetFragmentScanOptions
843
+
844
+ Returns
845
+ -------
846
+ bool
847
+ """
848
+ attrs = (
849
+ self.use_buffered_stream, self.buffer_size, self.pre_buffer, self.cache_options,
850
+ self.thrift_string_size_limit, self.thrift_container_size_limit,
851
+ self.page_checksum_verification)
852
+ other_attrs = (
853
+ other.use_buffered_stream, other.buffer_size, other.pre_buffer, other.cache_options,
854
+ other.thrift_string_size_limit,
855
+ other.thrift_container_size_limit, other.page_checksum_verification)
856
+ return attrs == other_attrs
857
+
858
+ @staticmethod
859
+ @binding(True) # Required for Cython < 3
860
+ def _reconstruct(kwargs):
861
+ # __reduce__ doesn't allow passing named arguments directly to the
862
+ # reconstructor, hence this wrapper.
863
+ return ParquetFragmentScanOptions(**kwargs)
864
+
865
+ def __reduce__(self):
866
+ kwargs = dict(
867
+ use_buffered_stream=self.use_buffered_stream,
868
+ buffer_size=self.buffer_size,
869
+ pre_buffer=self.pre_buffer,
870
+ cache_options=self.cache_options,
871
+ thrift_string_size_limit=self.thrift_string_size_limit,
872
+ thrift_container_size_limit=self.thrift_container_size_limit,
873
+ page_checksum_verification=self.page_checksum_verification
874
+ )
875
+ return ParquetFragmentScanOptions._reconstruct, (kwargs,)
876
+
877
+
878
+ cdef class ParquetFactoryOptions(_Weakrefable):
879
+ """
880
+ Influences the discovery of parquet dataset.
881
+
882
+ Parameters
883
+ ----------
884
+ partition_base_dir : str, optional
885
+ For the purposes of applying the partitioning, paths will be
886
+ stripped of the partition_base_dir. Files not matching the
887
+ partition_base_dir prefix will be skipped for partitioning discovery.
888
+ The ignored files will still be part of the Dataset, but will not
889
+ have partition information.
890
+ partitioning : Partitioning, PartitioningFactory, optional
891
+ The partitioning scheme applied to fragments, see ``Partitioning``.
892
+ validate_column_chunk_paths : bool, default False
893
+ Assert that all ColumnChunk paths are consistent. The parquet spec
894
+ allows for ColumnChunk data to be stored in multiple files, but
895
+ ParquetDatasetFactory supports only a single file with all ColumnChunk
896
+ data. If this flag is set construction of a ParquetDatasetFactory will
897
+ raise an error if ColumnChunk data is not resident in a single file.
898
+ """
899
+
900
+ cdef:
901
+ CParquetFactoryOptions options
902
+
903
+ __slots__ = () # avoid mistakingly creating attributes
904
+
905
+ def __init__(self, partition_base_dir=None, partitioning=None,
906
+ validate_column_chunk_paths=False):
907
+ if isinstance(partitioning, PartitioningFactory):
908
+ self.partitioning_factory = partitioning
909
+ elif isinstance(partitioning, Partitioning):
910
+ self.partitioning = partitioning
911
+
912
+ if partition_base_dir is not None:
913
+ self.partition_base_dir = partition_base_dir
914
+
915
+ self.options.validate_column_chunk_paths = validate_column_chunk_paths
916
+
917
+ cdef inline CParquetFactoryOptions unwrap(self):
918
+ return self.options
919
+
920
+ @property
921
+ def partitioning(self):
922
+ """Partitioning to apply to discovered files.
923
+
924
+ NOTE: setting this property will overwrite partitioning_factory.
925
+ """
926
+ c_partitioning = self.options.partitioning.partitioning()
927
+ if c_partitioning.get() == nullptr:
928
+ return None
929
+ return Partitioning.wrap(c_partitioning)
930
+
931
+ @partitioning.setter
932
+ def partitioning(self, Partitioning value):
933
+ self.options.partitioning = (<Partitioning> value).unwrap()
934
+
935
+ @property
936
+ def partitioning_factory(self):
937
+ """PartitioningFactory to apply to discovered files and
938
+ discover a Partitioning.
939
+
940
+ NOTE: setting this property will overwrite partitioning.
941
+ """
942
+ c_factory = self.options.partitioning.factory()
943
+ if c_factory.get() == nullptr:
944
+ return None
945
+ return PartitioningFactory.wrap(c_factory, None, None)
946
+
947
+ @partitioning_factory.setter
948
+ def partitioning_factory(self, PartitioningFactory value):
949
+ self.options.partitioning = (<PartitioningFactory> value).unwrap()
950
+
951
+ @property
952
+ def partition_base_dir(self):
953
+ """
954
+ Base directory to strip paths before applying the partitioning.
955
+ """
956
+ return frombytes(self.options.partition_base_dir)
957
+
958
+ @partition_base_dir.setter
959
+ def partition_base_dir(self, value):
960
+ self.options.partition_base_dir = tobytes(value)
961
+
962
+ @property
963
+ def validate_column_chunk_paths(self):
964
+ """
965
+ Base directory to strip paths before applying the partitioning.
966
+ """
967
+ return self.options.validate_column_chunk_paths
968
+
969
+ @validate_column_chunk_paths.setter
970
+ def validate_column_chunk_paths(self, value):
971
+ self.options.validate_column_chunk_paths = value
972
+
973
+
974
+ cdef class ParquetDatasetFactory(DatasetFactory):
975
+ """
976
+ Create a ParquetDatasetFactory from a Parquet `_metadata` file.
977
+
978
+ Parameters
979
+ ----------
980
+ metadata_path : str
981
+ Path to the `_metadata` parquet metadata-only file generated with
982
+ `pyarrow.parquet.write_metadata`.
983
+ filesystem : pyarrow.fs.FileSystem
984
+ Filesystem to read the metadata_path from, and subsequent parquet
985
+ files.
986
+ format : ParquetFileFormat
987
+ Parquet format options.
988
+ options : ParquetFactoryOptions, optional
989
+ Various flags influencing the discovery of filesystem paths.
990
+ """
991
+
992
+ cdef:
993
+ CParquetDatasetFactory* parquet_factory
994
+
995
+ def __init__(self, metadata_path, FileSystem filesystem not None,
996
+ FileFormat format not None,
997
+ ParquetFactoryOptions options=None):
998
+ cdef:
999
+ c_string c_path
1000
+ shared_ptr[CFileSystem] c_filesystem
1001
+ shared_ptr[CParquetFileFormat] c_format
1002
+ CResult[shared_ptr[CDatasetFactory]] result
1003
+ CParquetFactoryOptions c_options
1004
+
1005
+ c_path = tobytes(metadata_path)
1006
+ c_filesystem = filesystem.unwrap()
1007
+ c_format = static_pointer_cast[CParquetFileFormat, CFileFormat](
1008
+ format.unwrap())
1009
+ options = options or ParquetFactoryOptions()
1010
+ c_options = options.unwrap()
1011
+
1012
+ with nogil:
1013
+ result = CParquetDatasetFactory.MakeFromMetaDataPath(
1014
+ c_path, c_filesystem, c_format, c_options)
1015
+ self.init(GetResultValue(result))
1016
+
1017
+ cdef init(self, shared_ptr[CDatasetFactory]& sp):
1018
+ DatasetFactory.init(self, sp)
1019
+ self.parquet_factory = <CParquetDatasetFactory*> sp.get()
env-llmeval/lib/python3.10/site-packages/pyarrow/_feather.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (115 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pyarrow/_feather.pyx ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # ---------------------------------------------------------------------
19
+ # Implement Feather file format
20
+
21
+ # cython: profile=False
22
+ # distutils: language = c++
23
+ # cython: language_level=3
24
+
25
+ from cython.operator cimport dereference as deref
26
+ from pyarrow.includes.common cimport *
27
+ from pyarrow.includes.libarrow cimport *
28
+ from pyarrow.includes.libarrow_feather cimport *
29
+ from pyarrow.lib cimport (check_status, Table, _Weakrefable,
30
+ get_writer, get_reader, pyarrow_wrap_table)
31
+ from pyarrow.lib import tobytes
32
+
33
+
34
+ class FeatherError(Exception):
35
+ pass
36
+
37
+
38
+ def write_feather(Table table, object dest, compression=None,
39
+ compression_level=None, chunksize=None, version=2):
40
+ cdef shared_ptr[COutputStream] sink
41
+ get_writer(dest, &sink)
42
+
43
+ cdef CFeatherProperties properties
44
+ if version == 2:
45
+ properties.version = kFeatherV2Version
46
+ else:
47
+ properties.version = kFeatherV1Version
48
+
49
+ if compression == 'zstd':
50
+ properties.compression = CCompressionType_ZSTD
51
+ elif compression == 'lz4':
52
+ properties.compression = CCompressionType_LZ4_FRAME
53
+ else:
54
+ properties.compression = CCompressionType_UNCOMPRESSED
55
+
56
+ if chunksize is not None:
57
+ properties.chunksize = chunksize
58
+
59
+ if compression_level is not None:
60
+ properties.compression_level = compression_level
61
+
62
+ with nogil:
63
+ check_status(WriteFeather(deref(table.table), sink.get(),
64
+ properties))
65
+
66
+
67
+ cdef class FeatherReader(_Weakrefable):
68
+ cdef:
69
+ shared_ptr[CFeatherReader] reader
70
+
71
+ def __cinit__(self, source, c_bool use_memory_map, c_bool use_threads):
72
+ cdef:
73
+ shared_ptr[CRandomAccessFile] reader
74
+ CIpcReadOptions options = CIpcReadOptions.Defaults()
75
+ options.use_threads = use_threads
76
+
77
+ get_reader(source, use_memory_map, &reader)
78
+ with nogil:
79
+ self.reader = GetResultValue(CFeatherReader.Open(reader, options))
80
+
81
+ @property
82
+ def version(self):
83
+ return self.reader.get().version()
84
+
85
+ def read(self):
86
+ cdef shared_ptr[CTable] sp_table
87
+ with nogil:
88
+ check_status(self.reader.get()
89
+ .Read(&sp_table))
90
+
91
+ return pyarrow_wrap_table(sp_table)
92
+
93
+ def read_indices(self, indices):
94
+ cdef:
95
+ shared_ptr[CTable] sp_table
96
+ vector[int] c_indices
97
+
98
+ for index in indices:
99
+ c_indices.push_back(index)
100
+ with nogil:
101
+ check_status(self.reader.get()
102
+ .Read(c_indices, &sp_table))
103
+
104
+ return pyarrow_wrap_table(sp_table)
105
+
106
+ def read_names(self, names):
107
+ cdef:
108
+ shared_ptr[CTable] sp_table
109
+ vector[c_string] c_names
110
+
111
+ for name in names:
112
+ c_names.push_back(tobytes(name))
113
+ with nogil:
114
+ check_status(self.reader.get()
115
+ .Read(c_names, &sp_table))
116
+
117
+ return pyarrow_wrap_table(sp_table)
env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.pyx ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.pxd ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.includes.common cimport *
21
+ from pyarrow.includes.libarrow_fs cimport *
22
+ from pyarrow.lib import _detect_compression, frombytes, tobytes
23
+ from pyarrow.lib cimport *
24
+
25
+
26
+ cpdef enum FileType:
27
+ NotFound = <int8_t> CFileType_NotFound
28
+ Unknown = <int8_t> CFileType_Unknown
29
+ File = <int8_t> CFileType_File
30
+ Directory = <int8_t> CFileType_Directory
31
+
32
+
33
+ cdef class FileInfo(_Weakrefable):
34
+ cdef:
35
+ CFileInfo info
36
+
37
+ @staticmethod
38
+ cdef wrap(CFileInfo info)
39
+
40
+ cdef inline CFileInfo unwrap(self) nogil
41
+
42
+ @staticmethod
43
+ cdef CFileInfo unwrap_safe(obj)
44
+
45
+
46
+ cdef class FileSelector(_Weakrefable):
47
+ cdef:
48
+ CFileSelector selector
49
+
50
+ @staticmethod
51
+ cdef FileSelector wrap(CFileSelector selector)
52
+
53
+ cdef inline CFileSelector unwrap(self) nogil
54
+
55
+
56
+ cdef class FileSystem(_Weakrefable):
57
+ cdef:
58
+ shared_ptr[CFileSystem] wrapped
59
+ CFileSystem* fs
60
+
61
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
62
+
63
+ @staticmethod
64
+ cdef wrap(const shared_ptr[CFileSystem]& sp)
65
+
66
+ cdef inline shared_ptr[CFileSystem] unwrap(self) nogil
67
+
68
+
69
+ cdef class LocalFileSystem(FileSystem):
70
+ cdef:
71
+ CLocalFileSystem* localfs
72
+
73
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
74
+
75
+
76
+ cdef class SubTreeFileSystem(FileSystem):
77
+ cdef:
78
+ CSubTreeFileSystem* subtreefs
79
+
80
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
81
+
82
+
83
+ cdef class _MockFileSystem(FileSystem):
84
+ cdef:
85
+ CMockFileSystem* mockfs
86
+
87
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
88
+
89
+
90
+ cdef class PyFileSystem(FileSystem):
91
+ cdef:
92
+ CPyFileSystem* pyfs
93
+
94
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped)
env-llmeval/lib/python3.10/site-packages/pyarrow/_gcsfs.pyx ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from cython cimport binding
21
+
22
+ from pyarrow.lib cimport (pyarrow_wrap_metadata,
23
+ pyarrow_unwrap_metadata)
24
+ from pyarrow.lib import frombytes, tobytes, ensure_metadata
25
+ from pyarrow.includes.common cimport *
26
+ from pyarrow.includes.libarrow cimport *
27
+ from pyarrow.includes.libarrow_fs cimport *
28
+ from pyarrow._fs cimport FileSystem, TimePoint_to_ns, PyDateTime_to_TimePoint
29
+
30
+ from datetime import datetime, timedelta, timezone
31
+
32
+
33
+ cdef class GcsFileSystem(FileSystem):
34
+ """
35
+ Google Cloud Storage (GCS) backed FileSystem implementation
36
+
37
+ By default uses the process described in https://google.aip.dev/auth/4110
38
+ to resolve credentials. If not running on Google Cloud Platform (GCP),
39
+ this generally requires the environment variable
40
+ GOOGLE_APPLICATION_CREDENTIALS to point to a JSON file
41
+ containing credentials.
42
+
43
+ Note: GCS buckets are special and the operations available on them may be
44
+ limited or more expensive than expected compared to local file systems.
45
+
46
+ Note: When pickling a GcsFileSystem that uses default credentials, resolution
47
+ credentials are not stored in the serialized data. Therefore, when unpickling
48
+ it is assumed that the necessary credentials are in place for the target
49
+ process.
50
+
51
+ Parameters
52
+ ----------
53
+ anonymous : boolean, default False
54
+ Whether to connect anonymously.
55
+ If true, will not attempt to look up credentials using standard GCP
56
+ configuration methods.
57
+ access_token : str, default None
58
+ GCP access token. If provided, temporary credentials will be fetched by
59
+ assuming this role; also, a `credential_token_expiration` must be
60
+ specified as well.
61
+ target_service_account : str, default None
62
+ An optional service account to try to impersonate when accessing GCS. This
63
+ requires the specified credential user or service account to have the necessary
64
+ permissions.
65
+ credential_token_expiration : datetime, default None
66
+ Expiration for credential generated with an access token. Must be specified
67
+ if `access_token` is specified.
68
+ default_bucket_location : str, default 'US'
69
+ GCP region to create buckets in.
70
+ scheme : str, default 'https'
71
+ GCS connection transport scheme.
72
+ endpoint_override : str, default None
73
+ Override endpoint with a connect string such as "localhost:9000"
74
+ default_metadata : mapping or pyarrow.KeyValueMetadata, default None
75
+ Default metadata for `open_output_stream`. This will be ignored if
76
+ non-empty metadata is passed to `open_output_stream`.
77
+ retry_time_limit : timedelta, default None
78
+ Set the maximum amount of time the GCS client will attempt to retry
79
+ transient errors. Subsecond granularity is ignored.
80
+ project_id : str, default None
81
+ The GCP project identifier to use for creating buckets.
82
+ If not set, the library uses the GOOGLE_CLOUD_PROJECT environment
83
+ variable. Most I/O operations do not need a project id, only applications
84
+ that create new buckets need a project id.
85
+ """
86
+
87
+ cdef:
88
+ CGcsFileSystem* gcsfs
89
+
90
+ def __init__(self, *, bint anonymous=False, access_token=None,
91
+ target_service_account=None, credential_token_expiration=None,
92
+ default_bucket_location='US',
93
+ scheme=None,
94
+ endpoint_override=None,
95
+ default_metadata=None,
96
+ retry_time_limit=None,
97
+ project_id=None):
98
+ cdef:
99
+ CGcsOptions options
100
+ shared_ptr[CGcsFileSystem] wrapped
101
+ double time_limit_seconds
102
+
103
+ # Intentional use of truthiness because empty strings aren't valid and
104
+ # for reconstruction from pickling will give empty strings.
105
+ if anonymous and (target_service_account or access_token):
106
+ raise ValueError(
107
+ 'anonymous option is not compatible with target_service_account and '
108
+ 'access_token'
109
+ )
110
+ elif bool(access_token) != bool(credential_token_expiration):
111
+ raise ValueError(
112
+ 'access_token and credential_token_expiration must be '
113
+ 'specified together'
114
+ )
115
+
116
+ elif anonymous:
117
+ options = CGcsOptions.Anonymous()
118
+ elif access_token:
119
+ if not isinstance(credential_token_expiration, datetime):
120
+ raise ValueError(
121
+ "credential_token_expiration must be a datetime")
122
+ options = CGcsOptions.FromAccessToken(
123
+ tobytes(access_token),
124
+ PyDateTime_to_TimePoint(<PyDateTime_DateTime*>credential_token_expiration))
125
+ else:
126
+ options = CGcsOptions.Defaults()
127
+
128
+ # Target service account requires base credentials so
129
+ # it is not part of the if/else chain above which only
130
+ # handles base credentials.
131
+ if target_service_account:
132
+ options = CGcsOptions.FromImpersonatedServiceAccount(
133
+ options.credentials, tobytes(target_service_account))
134
+
135
+ options.default_bucket_location = tobytes(default_bucket_location)
136
+
137
+ if scheme is not None:
138
+ options.scheme = tobytes(scheme)
139
+ if endpoint_override is not None:
140
+ options.endpoint_override = tobytes(endpoint_override)
141
+ if default_metadata is not None:
142
+ options.default_metadata = pyarrow_unwrap_metadata(
143
+ ensure_metadata(default_metadata))
144
+ if retry_time_limit is not None:
145
+ time_limit_seconds = retry_time_limit.total_seconds()
146
+ options.retry_limit_seconds = time_limit_seconds
147
+ if project_id is not None:
148
+ options.project_id = <c_string>tobytes(project_id)
149
+
150
+ with nogil:
151
+ wrapped = GetResultValue(CGcsFileSystem.Make(options))
152
+
153
+ self.init(<shared_ptr[CFileSystem]> wrapped)
154
+
155
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
156
+ FileSystem.init(self, wrapped)
157
+ self.gcsfs = <CGcsFileSystem*> wrapped.get()
158
+
159
+ def _expiration_datetime_from_options(self):
160
+ expiration_ns = TimePoint_to_ns(
161
+ self.gcsfs.options().credentials.expiration())
162
+ if expiration_ns == 0:
163
+ return None
164
+ return datetime.fromtimestamp(expiration_ns / 1.0e9, timezone.utc)
165
+
166
+ @staticmethod
167
+ @binding(True) # Required for cython < 3
168
+ def _reconstruct(kwargs):
169
+ # __reduce__ doesn't allow passing named arguments directly to the
170
+ # reconstructor, hence this wrapper.
171
+ return GcsFileSystem(**kwargs)
172
+
173
+ def __reduce__(self):
174
+ cdef CGcsOptions opts = self.gcsfs.options()
175
+ service_account = frombytes(opts.credentials.target_service_account())
176
+ expiration_dt = self._expiration_datetime_from_options()
177
+ retry_time_limit = None
178
+ if opts.retry_limit_seconds.has_value():
179
+ retry_time_limit = timedelta(
180
+ seconds=opts.retry_limit_seconds.value())
181
+ project_id = None
182
+ if opts.project_id.has_value():
183
+ project_id = frombytes(opts.project_id.value())
184
+ return (
185
+ GcsFileSystem._reconstruct, (dict(
186
+ access_token=frombytes(opts.credentials.access_token()),
187
+ anonymous=opts.credentials.anonymous(),
188
+ credential_token_expiration=expiration_dt,
189
+ target_service_account=service_account,
190
+ scheme=frombytes(opts.scheme),
191
+ endpoint_override=frombytes(opts.endpoint_override),
192
+ default_bucket_location=frombytes(
193
+ opts.default_bucket_location),
194
+ default_metadata=pyarrow_wrap_metadata(opts.default_metadata),
195
+ retry_time_limit=retry_time_limit,
196
+ project_id=project_id
197
+ ),))
198
+
199
+ @property
200
+ def default_bucket_location(self):
201
+ """
202
+ The GCP location this filesystem will write to.
203
+ """
204
+ return frombytes(self.gcsfs.options().default_bucket_location)
205
+
206
+ @property
207
+ def project_id(self):
208
+ """
209
+ The GCP project id this filesystem will use.
210
+ """
211
+ if self.gcsfs.options().project_id.has_value():
212
+ return frombytes(self.gcsfs.options().project_id.value())
env-llmeval/lib/python3.10/site-packages/pyarrow/_generated_version.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # file generated by setuptools_scm
2
+ # don't change, don't track in version control
3
+ __version__ = version = '15.0.2'
4
+ __version_tuple__ = version_tuple = (15, 0, 2)
env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfs.pyx ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from cython cimport binding
21
+
22
+ from pyarrow.includes.common cimport *
23
+ from pyarrow.includes.libarrow cimport *
24
+ from pyarrow.includes.libarrow_fs cimport *
25
+ from pyarrow._fs cimport FileSystem
26
+
27
+ from pyarrow.lib import frombytes, tobytes
28
+ from pyarrow.util import _stringify_path
29
+
30
+
31
+ cdef class HadoopFileSystem(FileSystem):
32
+ """
33
+ HDFS backed FileSystem implementation
34
+
35
+ Parameters
36
+ ----------
37
+ host : str
38
+ HDFS host to connect to. Set to "default" for fs.defaultFS from
39
+ core-site.xml.
40
+ port : int, default 8020
41
+ HDFS port to connect to. Set to 0 for default or logical (HA) nodes.
42
+ user : str, default None
43
+ Username when connecting to HDFS; None implies login user.
44
+ replication : int, default 3
45
+ Number of copies each block will have.
46
+ buffer_size : int, default 0
47
+ If 0, no buffering will happen otherwise the size of the temporary read
48
+ and write buffer.
49
+ default_block_size : int, default None
50
+ None means the default configuration for HDFS, a typical block size is
51
+ 128 MB.
52
+ kerb_ticket : string or path, default None
53
+ If not None, the path to the Kerberos ticket cache.
54
+ extra_conf : dict, default None
55
+ Extra key/value pairs for configuration; will override any
56
+ hdfs-site.xml properties.
57
+
58
+ Examples
59
+ --------
60
+ >>> from pyarrow import fs
61
+ >>> hdfs = fs.HadoopFileSystem(host, port, user=user, kerb_ticket=ticket_cache_path) # doctest: +SKIP
62
+
63
+ For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`.
64
+ """
65
+
66
+ cdef:
67
+ CHadoopFileSystem* hdfs
68
+
69
+ def __init__(self, str host, int port=8020, *, str user=None,
70
+ int replication=3, int buffer_size=0,
71
+ default_block_size=None, kerb_ticket=None,
72
+ extra_conf=None):
73
+ cdef:
74
+ CHdfsOptions options
75
+ shared_ptr[CHadoopFileSystem] wrapped
76
+
77
+ if not host.startswith(('hdfs://', 'viewfs://')) and host != "default":
78
+ # TODO(kszucs): do more sanitization
79
+ host = 'hdfs://{}'.format(host)
80
+
81
+ options.ConfigureEndPoint(tobytes(host), int(port))
82
+ options.ConfigureReplication(replication)
83
+ options.ConfigureBufferSize(buffer_size)
84
+
85
+ if user is not None:
86
+ options.ConfigureUser(tobytes(user))
87
+ if default_block_size is not None:
88
+ options.ConfigureBlockSize(default_block_size)
89
+ if kerb_ticket is not None:
90
+ options.ConfigureKerberosTicketCachePath(
91
+ tobytes(_stringify_path(kerb_ticket)))
92
+ if extra_conf is not None:
93
+ for k, v in extra_conf.items():
94
+ options.ConfigureExtraConf(tobytes(k), tobytes(v))
95
+
96
+ with nogil:
97
+ wrapped = GetResultValue(CHadoopFileSystem.Make(options))
98
+ self.init(<shared_ptr[CFileSystem]> wrapped)
99
+
100
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
101
+ FileSystem.init(self, wrapped)
102
+ self.hdfs = <CHadoopFileSystem*> wrapped.get()
103
+
104
+ @staticmethod
105
+ def from_uri(uri):
106
+ """
107
+ Instantiate HadoopFileSystem object from an URI string.
108
+
109
+ The following two calls are equivalent
110
+
111
+ * ``HadoopFileSystem.from_uri('hdfs://localhost:8020/?user=test\
112
+ &replication=1')``
113
+ * ``HadoopFileSystem('localhost', port=8020, user='test', \
114
+ replication=1)``
115
+
116
+ Parameters
117
+ ----------
118
+ uri : str
119
+ A string URI describing the connection to HDFS.
120
+ In order to change the user, replication, buffer_size or
121
+ default_block_size pass the values as query parts.
122
+
123
+ Returns
124
+ -------
125
+ HadoopFileSystem
126
+ """
127
+ cdef:
128
+ HadoopFileSystem self = HadoopFileSystem.__new__(HadoopFileSystem)
129
+ shared_ptr[CHadoopFileSystem] wrapped
130
+ CHdfsOptions options
131
+
132
+ options = GetResultValue(CHdfsOptions.FromUriString(tobytes(uri)))
133
+ with nogil:
134
+ wrapped = GetResultValue(CHadoopFileSystem.Make(options))
135
+
136
+ self.init(<shared_ptr[CFileSystem]> wrapped)
137
+ return self
138
+
139
+ @staticmethod
140
+ @binding(True) # Required for cython < 3
141
+ def _reconstruct(kwargs):
142
+ # __reduce__ doesn't allow passing named arguments directly to the
143
+ # reconstructor, hence this wrapper.
144
+ return HadoopFileSystem(**kwargs)
145
+
146
+ def __reduce__(self):
147
+ cdef CHdfsOptions opts = self.hdfs.options()
148
+ return (
149
+ HadoopFileSystem._reconstruct, (dict(
150
+ host=frombytes(opts.connection_config.host),
151
+ port=opts.connection_config.port,
152
+ user=frombytes(opts.connection_config.user),
153
+ replication=opts.replication,
154
+ buffer_size=opts.buffer_size,
155
+ default_block_size=opts.default_block_size,
156
+ kerb_ticket=frombytes(opts.connection_config.kerb_ticket),
157
+ extra_conf={frombytes(k): frombytes(v)
158
+ for k, v in opts.connection_config.extra_conf},
159
+ ),)
160
+ )
env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfsio.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (245 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pyarrow/_json.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (112 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (209 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.pxd ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # distutils: language = c++
19
+ # cython: language_level = 3
20
+
21
+ from libcpp cimport bool as c_bool
22
+ from libc.string cimport const_char
23
+ from libcpp.vector cimport vector as std_vector
24
+ from pyarrow.includes.common cimport *
25
+ from pyarrow.includes.libarrow cimport (CArray, CSchema, CStatus,
26
+ CResult, CTable, CMemoryPool,
27
+ CKeyValueMetadata,
28
+ CRecordBatch,
29
+ CTable, CCompressionType,
30
+ CRandomAccessFile, COutputStream,
31
+ TimeUnit)
32
+
33
+ cdef extern from "arrow/adapters/orc/options.h" \
34
+ namespace "arrow::adapters::orc" nogil:
35
+ cdef enum CompressionStrategy \
36
+ " arrow::adapters::orc::CompressionStrategy":
37
+ _CompressionStrategy_SPEED \
38
+ " arrow::adapters::orc::CompressionStrategy::kSpeed"
39
+ _CompressionStrategy_COMPRESSION \
40
+ " arrow::adapters::orc::CompressionStrategy::kCompression"
41
+
42
+ cdef enum WriterId" arrow::adapters::orc::WriterId":
43
+ _WriterId_ORC_JAVA_WRITER" arrow::adapters::orc::WriterId::kOrcJava"
44
+ _WriterId_ORC_CPP_WRITER" arrow::adapters::orc::WriterId::kOrcCpp"
45
+ _WriterId_PRESTO_WRITER" arrow::adapters::orc::WriterId::kPresto"
46
+ _WriterId_SCRITCHLEY_GO \
47
+ " arrow::adapters::orc::WriterId::kScritchleyGo"
48
+ _WriterId_TRINO_WRITER" arrow::adapters::orc::WriterId::kTrino"
49
+ _WriterId_UNKNOWN_WRITER" arrow::adapters::orc::WriterId::kUnknown"
50
+
51
+ cdef enum WriterVersion" arrow::adapters::orc::WriterVersion":
52
+ _WriterVersion_ORIGINAL \
53
+ " arrow::adapters::orc::WriterVersion::kOriginal"
54
+ _WriterVersion_HIVE_8732 \
55
+ " arrow::adapters::orc::WriterVersion::kHive8732"
56
+ _WriterVersion_HIVE_4243 \
57
+ " arrow::adapters::orc::WriterVersion::kHive4243"
58
+ _WriterVersion_HIVE_12055 \
59
+ " arrow::adapters::orc::WriterVersion::kHive12055"
60
+ _WriterVersion_HIVE_13083 \
61
+ " arrow::adapters::orc::WriterVersion::kHive13083"
62
+ _WriterVersion_ORC_101" arrow::adapters::orc::WriterVersion::kOrc101"
63
+ _WriterVersion_ORC_135" arrow::adapters::orc::WriterVersion::kOrc135"
64
+ _WriterVersion_ORC_517" arrow::adapters::orc::WriterVersion::kOrc517"
65
+ _WriterVersion_ORC_203" arrow::adapters::orc::WriterVersion::kOrc203"
66
+ _WriterVersion_ORC_14" arrow::adapters::orc::WriterVersion::kOrc14"
67
+ _WriterVersion_MAX" arrow::adapters::orc::WriterVersion::kMax"
68
+
69
+ cdef cppclass FileVersion" arrow::adapters::orc::FileVersion":
70
+ FileVersion(uint32_t major_version, uint32_t minor_version)
71
+ uint32_t major_version()
72
+ uint32_t minor_version()
73
+ c_string ToString()
74
+
75
+ cdef struct WriteOptions" arrow::adapters::orc::WriteOptions":
76
+ int64_t batch_size
77
+ FileVersion file_version
78
+ int64_t stripe_size
79
+ CCompressionType compression
80
+ int64_t compression_block_size
81
+ CompressionStrategy compression_strategy
82
+ int64_t row_index_stride
83
+ double padding_tolerance
84
+ double dictionary_key_size_threshold
85
+ std_vector[int64_t] bloom_filter_columns
86
+ double bloom_filter_fpp
87
+
88
+
89
+ cdef extern from "arrow/adapters/orc/adapter.h" \
90
+ namespace "arrow::adapters::orc" nogil:
91
+
92
+ cdef cppclass ORCFileReader:
93
+ @staticmethod
94
+ CResult[unique_ptr[ORCFileReader]] Open(
95
+ const shared_ptr[CRandomAccessFile]& file,
96
+ CMemoryPool* pool)
97
+
98
+ CResult[shared_ptr[const CKeyValueMetadata]] ReadMetadata()
99
+
100
+ CResult[shared_ptr[CSchema]] ReadSchema()
101
+
102
+ CResult[shared_ptr[CRecordBatch]] ReadStripe(int64_t stripe)
103
+ CResult[shared_ptr[CRecordBatch]] ReadStripe(
104
+ int64_t stripe, std_vector[c_string])
105
+
106
+ CResult[shared_ptr[CTable]] Read()
107
+ CResult[shared_ptr[CTable]] Read(std_vector[c_string])
108
+
109
+ int64_t NumberOfStripes()
110
+ int64_t NumberOfRows()
111
+ FileVersion GetFileVersion()
112
+ c_string GetSoftwareVersion()
113
+ CResult[CCompressionType] GetCompression()
114
+ int64_t GetCompressionSize()
115
+ int64_t GetRowIndexStride()
116
+ WriterId GetWriterId()
117
+ int32_t GetWriterIdValue()
118
+ WriterVersion GetWriterVersion()
119
+ int64_t GetNumberOfStripeStatistics()
120
+ int64_t GetContentLength()
121
+ int64_t GetStripeStatisticsLength()
122
+ int64_t GetFileFooterLength()
123
+ int64_t GetFilePostscriptLength()
124
+ int64_t GetFileLength()
125
+ c_string GetSerializedFileTail()
126
+
127
+ cdef cppclass ORCFileWriter:
128
+ @staticmethod
129
+ CResult[unique_ptr[ORCFileWriter]] Open(
130
+ COutputStream* output_stream, const WriteOptions& writer_options)
131
+
132
+ CStatus Write(const CTable& table)
133
+
134
+ CStatus Close()
env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.pxd ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # distutils: language = c++
19
+ # cython: language_level = 3
20
+
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport (CChunkedArray, CScalar, CSchema, CStatus,
23
+ CTable, CMemoryPool, CBuffer,
24
+ CKeyValueMetadata, CRandomAccessFile,
25
+ COutputStream, CCacheOptions,
26
+ TimeUnit, CRecordBatchReader)
27
+ from pyarrow.lib cimport _Weakrefable
28
+
29
+
30
+ cdef extern from "parquet/api/schema.h" namespace "parquet::schema" nogil:
31
+ cdef cppclass Node:
32
+ pass
33
+
34
+ cdef cppclass GroupNode(Node):
35
+ pass
36
+
37
+ cdef cppclass PrimitiveNode(Node):
38
+ pass
39
+
40
+ cdef cppclass ColumnPath:
41
+ c_string ToDotString()
42
+ vector[c_string] ToDotVector()
43
+
44
+
45
+ cdef extern from "parquet/api/schema.h" namespace "parquet" nogil:
46
+ enum ParquetType" parquet::Type::type":
47
+ ParquetType_BOOLEAN" parquet::Type::BOOLEAN"
48
+ ParquetType_INT32" parquet::Type::INT32"
49
+ ParquetType_INT64" parquet::Type::INT64"
50
+ ParquetType_INT96" parquet::Type::INT96"
51
+ ParquetType_FLOAT" parquet::Type::FLOAT"
52
+ ParquetType_DOUBLE" parquet::Type::DOUBLE"
53
+ ParquetType_BYTE_ARRAY" parquet::Type::BYTE_ARRAY"
54
+ ParquetType_FIXED_LEN_BYTE_ARRAY" parquet::Type::FIXED_LEN_BYTE_ARRAY"
55
+
56
+ enum ParquetLogicalTypeId" parquet::LogicalType::Type::type":
57
+ ParquetLogicalType_UNDEFINED" parquet::LogicalType::Type::UNDEFINED"
58
+ ParquetLogicalType_STRING" parquet::LogicalType::Type::STRING"
59
+ ParquetLogicalType_MAP" parquet::LogicalType::Type::MAP"
60
+ ParquetLogicalType_LIST" parquet::LogicalType::Type::LIST"
61
+ ParquetLogicalType_ENUM" parquet::LogicalType::Type::ENUM"
62
+ ParquetLogicalType_DECIMAL" parquet::LogicalType::Type::DECIMAL"
63
+ ParquetLogicalType_DATE" parquet::LogicalType::Type::DATE"
64
+ ParquetLogicalType_TIME" parquet::LogicalType::Type::TIME"
65
+ ParquetLogicalType_TIMESTAMP" parquet::LogicalType::Type::TIMESTAMP"
66
+ ParquetLogicalType_INT" parquet::LogicalType::Type::INT"
67
+ ParquetLogicalType_JSON" parquet::LogicalType::Type::JSON"
68
+ ParquetLogicalType_BSON" parquet::LogicalType::Type::BSON"
69
+ ParquetLogicalType_UUID" parquet::LogicalType::Type::UUID"
70
+ ParquetLogicalType_NONE" parquet::LogicalType::Type::NONE"
71
+
72
+ enum ParquetTimeUnit" parquet::LogicalType::TimeUnit::unit":
73
+ ParquetTimeUnit_UNKNOWN" parquet::LogicalType::TimeUnit::UNKNOWN"
74
+ ParquetTimeUnit_MILLIS" parquet::LogicalType::TimeUnit::MILLIS"
75
+ ParquetTimeUnit_MICROS" parquet::LogicalType::TimeUnit::MICROS"
76
+ ParquetTimeUnit_NANOS" parquet::LogicalType::TimeUnit::NANOS"
77
+
78
+ enum ParquetConvertedType" parquet::ConvertedType::type":
79
+ ParquetConvertedType_NONE" parquet::ConvertedType::NONE"
80
+ ParquetConvertedType_UTF8" parquet::ConvertedType::UTF8"
81
+ ParquetConvertedType_MAP" parquet::ConvertedType::MAP"
82
+ ParquetConvertedType_MAP_KEY_VALUE \
83
+ " parquet::ConvertedType::MAP_KEY_VALUE"
84
+ ParquetConvertedType_LIST" parquet::ConvertedType::LIST"
85
+ ParquetConvertedType_ENUM" parquet::ConvertedType::ENUM"
86
+ ParquetConvertedType_DECIMAL" parquet::ConvertedType::DECIMAL"
87
+ ParquetConvertedType_DATE" parquet::ConvertedType::DATE"
88
+ ParquetConvertedType_TIME_MILLIS" parquet::ConvertedType::TIME_MILLIS"
89
+ ParquetConvertedType_TIME_MICROS" parquet::ConvertedType::TIME_MICROS"
90
+ ParquetConvertedType_TIMESTAMP_MILLIS \
91
+ " parquet::ConvertedType::TIMESTAMP_MILLIS"
92
+ ParquetConvertedType_TIMESTAMP_MICROS \
93
+ " parquet::ConvertedType::TIMESTAMP_MICROS"
94
+ ParquetConvertedType_UINT_8" parquet::ConvertedType::UINT_8"
95
+ ParquetConvertedType_UINT_16" parquet::ConvertedType::UINT_16"
96
+ ParquetConvertedType_UINT_32" parquet::ConvertedType::UINT_32"
97
+ ParquetConvertedType_UINT_64" parquet::ConvertedType::UINT_64"
98
+ ParquetConvertedType_INT_8" parquet::ConvertedType::INT_8"
99
+ ParquetConvertedType_INT_16" parquet::ConvertedType::INT_16"
100
+ ParquetConvertedType_INT_32" parquet::ConvertedType::INT_32"
101
+ ParquetConvertedType_INT_64" parquet::ConvertedType::INT_64"
102
+ ParquetConvertedType_JSON" parquet::ConvertedType::JSON"
103
+ ParquetConvertedType_BSON" parquet::ConvertedType::BSON"
104
+ ParquetConvertedType_INTERVAL" parquet::ConvertedType::INTERVAL"
105
+
106
+ enum ParquetRepetition" parquet::Repetition::type":
107
+ ParquetRepetition_REQUIRED" parquet::REPETITION::REQUIRED"
108
+ ParquetRepetition_OPTIONAL" parquet::REPETITION::OPTIONAL"
109
+ ParquetRepetition_REPEATED" parquet::REPETITION::REPEATED"
110
+
111
+ enum ParquetEncoding" parquet::Encoding::type":
112
+ ParquetEncoding_PLAIN" parquet::Encoding::PLAIN"
113
+ ParquetEncoding_PLAIN_DICTIONARY" parquet::Encoding::PLAIN_DICTIONARY"
114
+ ParquetEncoding_RLE" parquet::Encoding::RLE"
115
+ ParquetEncoding_BIT_PACKED" parquet::Encoding::BIT_PACKED"
116
+ ParquetEncoding_DELTA_BINARY_PACKED \
117
+ " parquet::Encoding::DELTA_BINARY_PACKED"
118
+ ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY \
119
+ " parquet::Encoding::DELTA_LENGTH_BYTE_ARRAY"
120
+ ParquetEncoding_DELTA_BYTE_ARRAY" parquet::Encoding::DELTA_BYTE_ARRAY"
121
+ ParquetEncoding_RLE_DICTIONARY" parquet::Encoding::RLE_DICTIONARY"
122
+ ParquetEncoding_BYTE_STREAM_SPLIT \
123
+ " parquet::Encoding::BYTE_STREAM_SPLIT"
124
+
125
+ enum ParquetCompression" parquet::Compression::type":
126
+ ParquetCompression_UNCOMPRESSED" parquet::Compression::UNCOMPRESSED"
127
+ ParquetCompression_SNAPPY" parquet::Compression::SNAPPY"
128
+ ParquetCompression_GZIP" parquet::Compression::GZIP"
129
+ ParquetCompression_LZO" parquet::Compression::LZO"
130
+ ParquetCompression_BROTLI" parquet::Compression::BROTLI"
131
+ ParquetCompression_LZ4" parquet::Compression::LZ4"
132
+ ParquetCompression_ZSTD" parquet::Compression::ZSTD"
133
+
134
+ enum ParquetVersion" parquet::ParquetVersion::type":
135
+ ParquetVersion_V1" parquet::ParquetVersion::PARQUET_1_0"
136
+ ParquetVersion_V2_0" parquet::ParquetVersion::PARQUET_2_0"
137
+ ParquetVersion_V2_4" parquet::ParquetVersion::PARQUET_2_4"
138
+ ParquetVersion_V2_6" parquet::ParquetVersion::PARQUET_2_6"
139
+
140
+ enum ParquetSortOrder" parquet::SortOrder::type":
141
+ ParquetSortOrder_SIGNED" parquet::SortOrder::SIGNED"
142
+ ParquetSortOrder_UNSIGNED" parquet::SortOrder::UNSIGNED"
143
+ ParquetSortOrder_UNKNOWN" parquet::SortOrder::UNKNOWN"
144
+
145
+ cdef cppclass CParquetLogicalType" parquet::LogicalType":
146
+ c_string ToString() const
147
+ c_string ToJSON() const
148
+ ParquetLogicalTypeId type() const
149
+
150
+ cdef cppclass CParquetDecimalType \
151
+ " parquet::DecimalLogicalType"(CParquetLogicalType):
152
+ int32_t precision() const
153
+ int32_t scale() const
154
+
155
+ cdef cppclass CParquetIntType \
156
+ " parquet::IntLogicalType"(CParquetLogicalType):
157
+ int bit_width() const
158
+ c_bool is_signed() const
159
+
160
+ cdef cppclass CParquetTimeType \
161
+ " parquet::TimeLogicalType"(CParquetLogicalType):
162
+ c_bool is_adjusted_to_utc() const
163
+ ParquetTimeUnit time_unit() const
164
+
165
+ cdef cppclass CParquetTimestampType \
166
+ " parquet::TimestampLogicalType"(CParquetLogicalType):
167
+ c_bool is_adjusted_to_utc() const
168
+ ParquetTimeUnit time_unit() const
169
+
170
+ cdef cppclass ColumnDescriptor" parquet::ColumnDescriptor":
171
+ c_bool Equals(const ColumnDescriptor& other)
172
+
173
+ shared_ptr[ColumnPath] path()
174
+ int16_t max_definition_level()
175
+ int16_t max_repetition_level()
176
+
177
+ ParquetType physical_type()
178
+ const shared_ptr[const CParquetLogicalType]& logical_type()
179
+ ParquetConvertedType converted_type()
180
+ const c_string& name()
181
+ int type_length()
182
+ int type_precision()
183
+ int type_scale()
184
+
185
+ cdef cppclass SchemaDescriptor:
186
+ const ColumnDescriptor* Column(int i)
187
+ shared_ptr[Node] schema()
188
+ GroupNode* group()
189
+ c_bool Equals(const SchemaDescriptor& other)
190
+ c_string ToString()
191
+ int num_columns()
192
+
193
+ cdef c_string FormatStatValue(ParquetType parquet_type, c_string val)
194
+
195
+ enum ParquetCipher" parquet::ParquetCipher::type":
196
+ ParquetCipher_AES_GCM_V1" parquet::ParquetCipher::AES_GCM_V1"
197
+ ParquetCipher_AES_GCM_CTR_V1" parquet::ParquetCipher::AES_GCM_CTR_V1"
198
+
199
+ struct AadMetadata:
200
+ c_string aad_prefix
201
+ c_string aad_file_unique
202
+ c_bool supply_aad_prefix
203
+
204
+ struct EncryptionAlgorithm:
205
+ ParquetCipher algorithm
206
+ AadMetadata aad
207
+
208
+ cdef extern from "parquet/api/reader.h" namespace "parquet" nogil:
209
+ cdef cppclass ColumnReader:
210
+ pass
211
+
212
+ cdef cppclass BoolReader(ColumnReader):
213
+ pass
214
+
215
+ cdef cppclass Int32Reader(ColumnReader):
216
+ pass
217
+
218
+ cdef cppclass Int64Reader(ColumnReader):
219
+ pass
220
+
221
+ cdef cppclass Int96Reader(ColumnReader):
222
+ pass
223
+
224
+ cdef cppclass FloatReader(ColumnReader):
225
+ pass
226
+
227
+ cdef cppclass DoubleReader(ColumnReader):
228
+ pass
229
+
230
+ cdef cppclass ByteArrayReader(ColumnReader):
231
+ pass
232
+
233
+ cdef cppclass RowGroupReader:
234
+ pass
235
+
236
+ cdef cppclass CEncodedStatistics" parquet::EncodedStatistics":
237
+ const c_string& max() const
238
+ const c_string& min() const
239
+ int64_t null_count
240
+ int64_t distinct_count
241
+ bint has_min
242
+ bint has_max
243
+ bint has_null_count
244
+ bint has_distinct_count
245
+
246
+ cdef cppclass ParquetByteArray" parquet::ByteArray":
247
+ uint32_t len
248
+ const uint8_t* ptr
249
+
250
+ cdef cppclass ParquetFLBA" parquet::FLBA":
251
+ const uint8_t* ptr
252
+
253
+ cdef cppclass CStatistics" parquet::Statistics":
254
+ int64_t null_count() const
255
+ int64_t distinct_count() const
256
+ int64_t num_values() const
257
+ bint HasMinMax()
258
+ bint HasNullCount()
259
+ bint HasDistinctCount()
260
+ c_bool Equals(const CStatistics&) const
261
+ void Reset()
262
+ c_string EncodeMin()
263
+ c_string EncodeMax()
264
+ CEncodedStatistics Encode()
265
+ void SetComparator()
266
+ ParquetType physical_type() const
267
+ const ColumnDescriptor* descr() const
268
+
269
+ cdef cppclass CBoolStatistics" parquet::BoolStatistics"(CStatistics):
270
+ c_bool min()
271
+ c_bool max()
272
+
273
+ cdef cppclass CInt32Statistics" parquet::Int32Statistics"(CStatistics):
274
+ int32_t min()
275
+ int32_t max()
276
+
277
+ cdef cppclass CInt64Statistics" parquet::Int64Statistics"(CStatistics):
278
+ int64_t min()
279
+ int64_t max()
280
+
281
+ cdef cppclass CFloatStatistics" parquet::FloatStatistics"(CStatistics):
282
+ float min()
283
+ float max()
284
+
285
+ cdef cppclass CDoubleStatistics" parquet::DoubleStatistics"(CStatistics):
286
+ double min()
287
+ double max()
288
+
289
+ cdef cppclass CByteArrayStatistics \
290
+ " parquet::ByteArrayStatistics"(CStatistics):
291
+ ParquetByteArray min()
292
+ ParquetByteArray max()
293
+
294
+ cdef cppclass CFLBAStatistics" parquet::FLBAStatistics"(CStatistics):
295
+ ParquetFLBA min()
296
+ ParquetFLBA max()
297
+
298
+ cdef cppclass CColumnCryptoMetaData" parquet::ColumnCryptoMetaData":
299
+ shared_ptr[ColumnPath] path_in_schema() const
300
+ c_bool encrypted_with_footer_key() const
301
+ const c_string& key_metadata() const
302
+
303
+ cdef cppclass ParquetIndexLocation" parquet::IndexLocation":
304
+ int64_t offset
305
+ int32_t length
306
+
307
+ cdef cppclass CColumnChunkMetaData" parquet::ColumnChunkMetaData":
308
+ int64_t file_offset() const
309
+ const c_string& file_path() const
310
+
311
+ c_bool is_metadata_set() const
312
+ ParquetType type() const
313
+ int64_t num_values() const
314
+ shared_ptr[ColumnPath] path_in_schema() const
315
+ bint is_stats_set() const
316
+ shared_ptr[CStatistics] statistics() const
317
+ ParquetCompression compression() const
318
+ const vector[ParquetEncoding]& encodings() const
319
+ c_bool Equals(const CColumnChunkMetaData&) const
320
+
321
+ int64_t has_dictionary_page() const
322
+ int64_t dictionary_page_offset() const
323
+ int64_t data_page_offset() const
324
+ int64_t index_page_offset() const
325
+ int64_t total_compressed_size() const
326
+ int64_t total_uncompressed_size() const
327
+ unique_ptr[CColumnCryptoMetaData] crypto_metadata() const
328
+ optional[ParquetIndexLocation] GetColumnIndexLocation() const
329
+ optional[ParquetIndexLocation] GetOffsetIndexLocation() const
330
+
331
+ struct CSortingColumn" parquet::SortingColumn":
332
+ int column_idx
333
+ c_bool descending
334
+ c_bool nulls_first
335
+
336
+ cdef cppclass CRowGroupMetaData" parquet::RowGroupMetaData":
337
+ c_bool Equals(const CRowGroupMetaData&) const
338
+ int num_columns() const
339
+ int64_t num_rows() const
340
+ int64_t total_byte_size() const
341
+ vector[CSortingColumn] sorting_columns() const
342
+ unique_ptr[CColumnChunkMetaData] ColumnChunk(int i) const
343
+
344
+ cdef cppclass CFileMetaData" parquet::FileMetaData":
345
+ c_bool Equals(const CFileMetaData&) const
346
+ uint32_t size()
347
+ int num_columns()
348
+ int64_t num_rows()
349
+ int num_row_groups()
350
+ ParquetVersion version()
351
+ const c_string created_by()
352
+ int num_schema_elements()
353
+
354
+ void set_file_path(const c_string& path)
355
+ void AppendRowGroups(const CFileMetaData& other) except +
356
+
357
+ unique_ptr[CRowGroupMetaData] RowGroup(int i)
358
+ const SchemaDescriptor* schema()
359
+ shared_ptr[const CKeyValueMetadata] key_value_metadata() const
360
+ void WriteTo(COutputStream* dst) const
361
+
362
+ inline c_bool is_encryption_algorithm_set() const
363
+ inline EncryptionAlgorithm encryption_algorithm() const
364
+ inline const c_string& footer_signing_key_metadata() const
365
+
366
+ cdef shared_ptr[CFileMetaData] CFileMetaData_Make \
367
+ " parquet::FileMetaData::Make"(const void* serialized_metadata,
368
+ uint32_t* metadata_len)
369
+
370
+ cdef cppclass CReaderProperties" parquet::ReaderProperties":
371
+ c_bool is_buffered_stream_enabled() const
372
+ void enable_buffered_stream()
373
+ void disable_buffered_stream()
374
+
375
+ void set_buffer_size(int64_t buf_size)
376
+ int64_t buffer_size() const
377
+
378
+ void set_thrift_string_size_limit(int32_t size)
379
+ int32_t thrift_string_size_limit() const
380
+
381
+ void set_thrift_container_size_limit(int32_t size)
382
+ int32_t thrift_container_size_limit() const
383
+
384
+ void file_decryption_properties(shared_ptr[CFileDecryptionProperties]
385
+ decryption)
386
+ shared_ptr[CFileDecryptionProperties] file_decryption_properties() \
387
+ const
388
+
389
+ c_bool page_checksum_verification() const
390
+ void set_page_checksum_verification(c_bool check_crc)
391
+
392
+ CReaderProperties default_reader_properties()
393
+
394
+ cdef cppclass ArrowReaderProperties:
395
+ ArrowReaderProperties()
396
+ void set_read_dictionary(int column_index, c_bool read_dict)
397
+ c_bool read_dictionary()
398
+ void set_batch_size(int64_t batch_size)
399
+ int64_t batch_size()
400
+ void set_pre_buffer(c_bool pre_buffer)
401
+ c_bool pre_buffer() const
402
+ void set_cache_options(CCacheOptions options)
403
+ CCacheOptions cache_options() const
404
+ void set_coerce_int96_timestamp_unit(TimeUnit unit)
405
+ TimeUnit coerce_int96_timestamp_unit() const
406
+
407
+ ArrowReaderProperties default_arrow_reader_properties()
408
+
409
+ cdef cppclass ParquetFileReader:
410
+ shared_ptr[CFileMetaData] metadata()
411
+
412
+
413
+ cdef extern from "parquet/api/writer.h" namespace "parquet" nogil:
414
+ cdef cppclass WriterProperties:
415
+ cppclass Builder:
416
+ Builder* data_page_version(ParquetDataPageVersion version)
417
+ Builder* version(ParquetVersion version)
418
+ Builder* compression(ParquetCompression codec)
419
+ Builder* compression(const c_string& path,
420
+ ParquetCompression codec)
421
+ Builder* compression_level(int compression_level)
422
+ Builder* compression_level(const c_string& path,
423
+ int compression_level)
424
+ Builder* encryption(
425
+ shared_ptr[CFileEncryptionProperties]
426
+ file_encryption_properties)
427
+ Builder* disable_dictionary()
428
+ Builder* enable_dictionary()
429
+ Builder* enable_dictionary(const c_string& path)
430
+ Builder* set_sorting_columns(vector[CSortingColumn] sorting_columns)
431
+ Builder* disable_statistics()
432
+ Builder* enable_statistics()
433
+ Builder* enable_statistics(const c_string& path)
434
+ Builder* data_pagesize(int64_t size)
435
+ Builder* encoding(ParquetEncoding encoding)
436
+ Builder* encoding(const c_string& path,
437
+ ParquetEncoding encoding)
438
+ Builder* max_row_group_length(int64_t size)
439
+ Builder* write_batch_size(int64_t batch_size)
440
+ Builder* dictionary_pagesize_limit(int64_t dictionary_pagesize_limit)
441
+ Builder* enable_write_page_index()
442
+ Builder* disable_write_page_index()
443
+ Builder* enable_page_checksum()
444
+ Builder* disable_page_checksum()
445
+ shared_ptr[WriterProperties] build()
446
+
447
+ cdef cppclass ArrowWriterProperties:
448
+ cppclass Builder:
449
+ Builder()
450
+ Builder* disable_deprecated_int96_timestamps()
451
+ Builder* enable_deprecated_int96_timestamps()
452
+ Builder* coerce_timestamps(TimeUnit unit)
453
+ Builder* allow_truncated_timestamps()
454
+ Builder* disallow_truncated_timestamps()
455
+ Builder* store_schema()
456
+ Builder* enable_compliant_nested_types()
457
+ Builder* disable_compliant_nested_types()
458
+ Builder* set_engine_version(ArrowWriterEngineVersion version)
459
+ shared_ptr[ArrowWriterProperties] build()
460
+ c_bool support_deprecated_int96_timestamps()
461
+
462
+
463
+ cdef extern from "parquet/arrow/reader.h" namespace "parquet::arrow" nogil:
464
+ cdef cppclass FileReader:
465
+ FileReader(CMemoryPool* pool, unique_ptr[ParquetFileReader] reader)
466
+
467
+ CStatus GetSchema(shared_ptr[CSchema]* out)
468
+
469
+ CStatus ReadColumn(int i, shared_ptr[CChunkedArray]* out)
470
+ CStatus ReadSchemaField(int i, shared_ptr[CChunkedArray]* out)
471
+
472
+ int num_row_groups()
473
+ CStatus ReadRowGroup(int i, shared_ptr[CTable]* out)
474
+ CStatus ReadRowGroup(int i, const vector[int]& column_indices,
475
+ shared_ptr[CTable]* out)
476
+
477
+ CStatus ReadRowGroups(const vector[int]& row_groups,
478
+ shared_ptr[CTable]* out)
479
+ CStatus ReadRowGroups(const vector[int]& row_groups,
480
+ const vector[int]& column_indices,
481
+ shared_ptr[CTable]* out)
482
+
483
+ CStatus GetRecordBatchReader(const vector[int]& row_group_indices,
484
+ const vector[int]& column_indices,
485
+ unique_ptr[CRecordBatchReader]* out)
486
+ CStatus GetRecordBatchReader(const vector[int]& row_group_indices,
487
+ unique_ptr[CRecordBatchReader]* out)
488
+
489
+ CStatus ReadTable(shared_ptr[CTable]* out)
490
+ CStatus ReadTable(const vector[int]& column_indices,
491
+ shared_ptr[CTable]* out)
492
+
493
+ CStatus ScanContents(vector[int] columns, int32_t column_batch_size,
494
+ int64_t* num_rows)
495
+
496
+ const ParquetFileReader* parquet_reader()
497
+
498
+ void set_use_threads(c_bool use_threads)
499
+
500
+ void set_batch_size(int64_t batch_size)
501
+
502
+ cdef cppclass FileReaderBuilder:
503
+ FileReaderBuilder()
504
+ CStatus Open(const shared_ptr[CRandomAccessFile]& file,
505
+ const CReaderProperties& properties,
506
+ const shared_ptr[CFileMetaData]& metadata)
507
+
508
+ ParquetFileReader* raw_reader()
509
+ FileReaderBuilder* memory_pool(CMemoryPool*)
510
+ FileReaderBuilder* properties(const ArrowReaderProperties&)
511
+ CStatus Build(unique_ptr[FileReader]* out)
512
+
513
+ CStatus FromParquetSchema(
514
+ const SchemaDescriptor* parquet_schema,
515
+ const ArrowReaderProperties& properties,
516
+ const shared_ptr[const CKeyValueMetadata]& key_value_metadata,
517
+ shared_ptr[CSchema]* out)
518
+
519
+ CStatus StatisticsAsScalars(const CStatistics& Statistics,
520
+ shared_ptr[CScalar]* min,
521
+ shared_ptr[CScalar]* max)
522
+
523
+ cdef extern from "parquet/arrow/schema.h" namespace "parquet::arrow" nogil:
524
+
525
+ CStatus ToParquetSchema(
526
+ const CSchema* arrow_schema,
527
+ const WriterProperties& properties,
528
+ const ArrowWriterProperties& arrow_properties,
529
+ shared_ptr[SchemaDescriptor]* out)
530
+
531
+
532
+ cdef extern from "parquet/properties.h" namespace "parquet" nogil:
533
+ cdef enum ArrowWriterEngineVersion:
534
+ V1 "parquet::ArrowWriterProperties::V1",
535
+ V2 "parquet::ArrowWriterProperties::V2"
536
+
537
+ cdef cppclass ParquetDataPageVersion:
538
+ pass
539
+
540
+ cdef ParquetDataPageVersion ParquetDataPageVersion_V1 \
541
+ " parquet::ParquetDataPageVersion::V1"
542
+ cdef ParquetDataPageVersion ParquetDataPageVersion_V2 \
543
+ " parquet::ParquetDataPageVersion::V2"
544
+
545
+ cdef extern from "parquet/arrow/writer.h" namespace "parquet::arrow" nogil:
546
+ cdef cppclass FileWriter:
547
+
548
+ @staticmethod
549
+ CResult[unique_ptr[FileWriter]] Open(const CSchema& schema, CMemoryPool* pool,
550
+ const shared_ptr[COutputStream]& sink,
551
+ const shared_ptr[WriterProperties]& properties,
552
+ const shared_ptr[ArrowWriterProperties]& arrow_properties)
553
+
554
+ CStatus WriteTable(const CTable& table, int64_t chunk_size)
555
+ CStatus NewRowGroup(int64_t chunk_size)
556
+ CStatus Close()
557
+
558
+ const shared_ptr[CFileMetaData] metadata() const
559
+
560
+ CStatus WriteMetaDataFile(
561
+ const CFileMetaData& file_metadata,
562
+ const COutputStream* sink)
563
+
564
+ cdef class FileEncryptionProperties:
565
+ """File-level encryption properties for the low-level API"""
566
+ cdef:
567
+ shared_ptr[CFileEncryptionProperties] properties
568
+
569
+ @staticmethod
570
+ cdef inline FileEncryptionProperties wrap(
571
+ shared_ptr[CFileEncryptionProperties] properties):
572
+
573
+ result = FileEncryptionProperties()
574
+ result.properties = properties
575
+ return result
576
+
577
+ cdef inline shared_ptr[CFileEncryptionProperties] unwrap(self):
578
+ return self.properties
579
+
580
+ cdef shared_ptr[WriterProperties] _create_writer_properties(
581
+ use_dictionary=*,
582
+ compression=*,
583
+ version=*,
584
+ write_statistics=*,
585
+ data_page_size=*,
586
+ compression_level=*,
587
+ use_byte_stream_split=*,
588
+ column_encoding=*,
589
+ data_page_version=*,
590
+ FileEncryptionProperties encryption_properties=*,
591
+ write_batch_size=*,
592
+ dictionary_pagesize_limit=*,
593
+ write_page_index=*,
594
+ write_page_checksum=*,
595
+ sorting_columns=*,
596
+ ) except *
597
+
598
+
599
+ cdef shared_ptr[ArrowWriterProperties] _create_arrow_writer_properties(
600
+ use_deprecated_int96_timestamps=*,
601
+ coerce_timestamps=*,
602
+ allow_truncated_timestamps=*,
603
+ writer_engine_version=*,
604
+ use_compliant_nested_type=*,
605
+ store_schema=*,
606
+ ) except *
607
+
608
+ cdef class ParquetSchema(_Weakrefable):
609
+ cdef:
610
+ FileMetaData parent # the FileMetaData owning the SchemaDescriptor
611
+ const SchemaDescriptor* schema
612
+
613
+ cdef class FileMetaData(_Weakrefable):
614
+ cdef:
615
+ shared_ptr[CFileMetaData] sp_metadata
616
+ CFileMetaData* _metadata
617
+ ParquetSchema _schema
618
+
619
+ cdef inline init(self, const shared_ptr[CFileMetaData]& metadata):
620
+ self.sp_metadata = metadata
621
+ self._metadata = metadata.get()
622
+
623
+ cdef class RowGroupMetaData(_Weakrefable):
624
+ cdef:
625
+ int index # for pickling support
626
+ unique_ptr[CRowGroupMetaData] up_metadata
627
+ CRowGroupMetaData* metadata
628
+ FileMetaData parent
629
+
630
+ cdef class ColumnChunkMetaData(_Weakrefable):
631
+ cdef:
632
+ unique_ptr[CColumnChunkMetaData] up_metadata
633
+ CColumnChunkMetaData* metadata
634
+ RowGroupMetaData parent
635
+
636
+ cdef inline init(self, RowGroupMetaData parent, int i):
637
+ self.up_metadata = parent.metadata.ColumnChunk(i)
638
+ self.metadata = self.up_metadata.get()
639
+ self.parent = parent
640
+
641
+ cdef class Statistics(_Weakrefable):
642
+ cdef:
643
+ shared_ptr[CStatistics] statistics
644
+ ColumnChunkMetaData parent
645
+
646
+ cdef inline init(self, const shared_ptr[CStatistics]& statistics,
647
+ ColumnChunkMetaData parent):
648
+ self.statistics = statistics
649
+ self.parent = parent
650
+
651
+ cdef extern from "parquet/encryption/encryption.h" namespace "parquet" nogil:
652
+ cdef cppclass CFileDecryptionProperties\
653
+ " parquet::FileDecryptionProperties":
654
+ pass
655
+
656
+ cdef cppclass CFileEncryptionProperties\
657
+ " parquet::FileEncryptionProperties":
658
+ pass
659
+
660
+ cdef class FileDecryptionProperties:
661
+ """File-level decryption properties for the low-level API"""
662
+ cdef:
663
+ shared_ptr[CFileDecryptionProperties] properties
664
+
665
+ @staticmethod
666
+ cdef inline FileDecryptionProperties wrap(
667
+ shared_ptr[CFileDecryptionProperties] properties):
668
+
669
+ result = FileDecryptionProperties()
670
+ result.properties = properties
671
+ return result
672
+
673
+ cdef inline shared_ptr[CFileDecryptionProperties] unwrap(self):
674
+ return self.properties
env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (284 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: profile=False
19
+ # distutils: language = c++
20
+
21
+ from datetime import timedelta
22
+
23
+ from cython.operator cimport dereference as deref
24
+ from libcpp.memory cimport shared_ptr
25
+ from pyarrow.includes.common cimport *
26
+ from pyarrow.includes.libarrow cimport *
27
+ from pyarrow.lib cimport _Weakrefable
28
+ from pyarrow.lib import tobytes, frombytes
29
+
30
+
31
+ cdef ParquetCipher cipher_from_name(name):
32
+ name = name.upper()
33
+ if name == 'AES_GCM_V1':
34
+ return ParquetCipher_AES_GCM_V1
35
+ elif name == 'AES_GCM_CTR_V1':
36
+ return ParquetCipher_AES_GCM_CTR_V1
37
+ else:
38
+ raise ValueError(f'Invalid cipher name: {name!r}')
39
+
40
+
41
+ cdef cipher_to_name(ParquetCipher cipher):
42
+ if ParquetCipher_AES_GCM_V1 == cipher:
43
+ return 'AES_GCM_V1'
44
+ elif ParquetCipher_AES_GCM_CTR_V1 == cipher:
45
+ return 'AES_GCM_CTR_V1'
46
+ else:
47
+ raise ValueError('Invalid cipher value: {0}'.format(cipher))
48
+
49
+ cdef class EncryptionConfiguration(_Weakrefable):
50
+ """Configuration of the encryption, such as which columns to encrypt"""
51
+ # Avoid mistakingly creating attributes
52
+ __slots__ = ()
53
+
54
+ def __init__(self, footer_key, *, column_keys=None,
55
+ encryption_algorithm=None,
56
+ plaintext_footer=None, double_wrapping=None,
57
+ cache_lifetime=None, internal_key_material=None,
58
+ data_key_length_bits=None):
59
+ self.configuration.reset(
60
+ new CEncryptionConfiguration(tobytes(footer_key)))
61
+ if column_keys is not None:
62
+ self.column_keys = column_keys
63
+ if encryption_algorithm is not None:
64
+ self.encryption_algorithm = encryption_algorithm
65
+ if plaintext_footer is not None:
66
+ self.plaintext_footer = plaintext_footer
67
+ if double_wrapping is not None:
68
+ self.double_wrapping = double_wrapping
69
+ if cache_lifetime is not None:
70
+ self.cache_lifetime = cache_lifetime
71
+ if internal_key_material is not None:
72
+ self.internal_key_material = internal_key_material
73
+ if data_key_length_bits is not None:
74
+ self.data_key_length_bits = data_key_length_bits
75
+
76
+ @property
77
+ def footer_key(self):
78
+ """ID of the master key for footer encryption/signing"""
79
+ return frombytes(self.configuration.get().footer_key)
80
+
81
+ @property
82
+ def column_keys(self):
83
+ """
84
+ List of columns to encrypt, with master key IDs.
85
+ """
86
+ column_keys_str = frombytes(self.configuration.get().column_keys)
87
+ # Convert from "masterKeyID:colName,colName;masterKeyID:colName..."
88
+ # (see HIVE-21848) to dictionary of master key ID to column name lists
89
+ column_keys_to_key_list_str = dict(subString.replace(" ", "").split(
90
+ ":") for subString in column_keys_str.split(";"))
91
+ column_keys_dict = {k: v.split(
92
+ ",") for k, v in column_keys_to_key_list_str.items()}
93
+ return column_keys_dict
94
+
95
+ @column_keys.setter
96
+ def column_keys(self, dict value):
97
+ if value is not None:
98
+ # convert a dictionary such as
99
+ # '{"key1": ["col1 ", "col2"], "key2": ["col3 ", "col4"]}''
100
+ # to the string defined by the spec
101
+ # 'key1: col1 , col2; key2: col3 , col4'
102
+ column_keys = "; ".join(
103
+ ["{}: {}".format(k, ", ".join(v)) for k, v in value.items()])
104
+ self.configuration.get().column_keys = tobytes(column_keys)
105
+
106
+ @property
107
+ def encryption_algorithm(self):
108
+ """Parquet encryption algorithm.
109
+ Can be "AES_GCM_V1" (default), or "AES_GCM_CTR_V1"."""
110
+ return cipher_to_name(self.configuration.get().encryption_algorithm)
111
+
112
+ @encryption_algorithm.setter
113
+ def encryption_algorithm(self, value):
114
+ cipher = cipher_from_name(value)
115
+ self.configuration.get().encryption_algorithm = cipher
116
+
117
+ @property
118
+ def plaintext_footer(self):
119
+ """Write files with plaintext footer."""
120
+ return self.configuration.get().plaintext_footer
121
+
122
+ @plaintext_footer.setter
123
+ def plaintext_footer(self, value):
124
+ self.configuration.get().plaintext_footer = value
125
+
126
+ @property
127
+ def double_wrapping(self):
128
+ """Use double wrapping - where data encryption keys (DEKs) are
129
+ encrypted with key encryption keys (KEKs), which in turn are
130
+ encrypted with master keys.
131
+ If set to false, use single wrapping - where DEKs are
132
+ encrypted directly with master keys."""
133
+ return self.configuration.get().double_wrapping
134
+
135
+ @double_wrapping.setter
136
+ def double_wrapping(self, value):
137
+ self.configuration.get().double_wrapping = value
138
+
139
+ @property
140
+ def cache_lifetime(self):
141
+ """Lifetime of cached entities (key encryption keys,
142
+ local wrapping keys, KMS client objects)."""
143
+ return timedelta(
144
+ seconds=self.configuration.get().cache_lifetime_seconds)
145
+
146
+ @cache_lifetime.setter
147
+ def cache_lifetime(self, value):
148
+ if not isinstance(value, timedelta):
149
+ raise TypeError("cache_lifetime should be a timedelta")
150
+ self.configuration.get().cache_lifetime_seconds = value.total_seconds()
151
+
152
+ @property
153
+ def internal_key_material(self):
154
+ """Store key material inside Parquet file footers; this mode doesn’t
155
+ produce additional files. If set to false, key material is stored in
156
+ separate files in the same folder, which enables key rotation for
157
+ immutable Parquet files."""
158
+ return self.configuration.get().internal_key_material
159
+
160
+ @internal_key_material.setter
161
+ def internal_key_material(self, value):
162
+ self.configuration.get().internal_key_material = value
163
+
164
+ @property
165
+ def data_key_length_bits(self):
166
+ """Length of data encryption keys (DEKs), randomly generated by parquet key
167
+ management tools. Can be 128, 192 or 256 bits."""
168
+ return self.configuration.get().data_key_length_bits
169
+
170
+ @data_key_length_bits.setter
171
+ def data_key_length_bits(self, value):
172
+ self.configuration.get().data_key_length_bits = value
173
+
174
+ cdef inline shared_ptr[CEncryptionConfiguration] unwrap(self) nogil:
175
+ return self.configuration
176
+
177
+
178
+ cdef class DecryptionConfiguration(_Weakrefable):
179
+ """Configuration of the decryption, such as cache timeout."""
180
+ # Avoid mistakingly creating attributes
181
+ __slots__ = ()
182
+
183
+ def __init__(self, *, cache_lifetime=None):
184
+ self.configuration.reset(new CDecryptionConfiguration())
185
+
186
+ @property
187
+ def cache_lifetime(self):
188
+ """Lifetime of cached entities (key encryption keys,
189
+ local wrapping keys, KMS client objects)."""
190
+ return timedelta(
191
+ seconds=self.configuration.get().cache_lifetime_seconds)
192
+
193
+ @cache_lifetime.setter
194
+ def cache_lifetime(self, value):
195
+ self.configuration.get().cache_lifetime_seconds = value.total_seconds()
196
+
197
+ cdef inline shared_ptr[CDecryptionConfiguration] unwrap(self) nogil:
198
+ return self.configuration
199
+
200
+
201
+ cdef class KmsConnectionConfig(_Weakrefable):
202
+ """Configuration of the connection to the Key Management Service (KMS)"""
203
+ # Avoid mistakingly creating attributes
204
+ __slots__ = ()
205
+
206
+ def __init__(self, *, kms_instance_id=None, kms_instance_url=None,
207
+ key_access_token=None, custom_kms_conf=None):
208
+ self.configuration.reset(new CKmsConnectionConfig())
209
+ if kms_instance_id is not None:
210
+ self.kms_instance_id = kms_instance_id
211
+ if kms_instance_url is not None:
212
+ self.kms_instance_url = kms_instance_url
213
+ if key_access_token is None:
214
+ self.key_access_token = b'DEFAULT'
215
+ else:
216
+ self.key_access_token = key_access_token
217
+ if custom_kms_conf is not None:
218
+ self.custom_kms_conf = custom_kms_conf
219
+
220
+ @property
221
+ def kms_instance_id(self):
222
+ """ID of the KMS instance that will be used for encryption
223
+ (if multiple KMS instances are available)."""
224
+ return frombytes(self.configuration.get().kms_instance_id)
225
+
226
+ @kms_instance_id.setter
227
+ def kms_instance_id(self, value):
228
+ self.configuration.get().kms_instance_id = tobytes(value)
229
+
230
+ @property
231
+ def kms_instance_url(self):
232
+ """URL of the KMS instance."""
233
+ return frombytes(self.configuration.get().kms_instance_url)
234
+
235
+ @kms_instance_url.setter
236
+ def kms_instance_url(self, value):
237
+ self.configuration.get().kms_instance_url = tobytes(value)
238
+
239
+ @property
240
+ def key_access_token(self):
241
+ """Authorization token that will be passed to KMS."""
242
+ return frombytes(self.configuration.get()
243
+ .refreshable_key_access_token.get().value())
244
+
245
+ @key_access_token.setter
246
+ def key_access_token(self, value):
247
+ self.refresh_key_access_token(value)
248
+
249
+ @property
250
+ def custom_kms_conf(self):
251
+ """A dictionary with KMS-type-specific configuration"""
252
+ custom_kms_conf = {
253
+ frombytes(k): frombytes(v)
254
+ for k, v in self.configuration.get().custom_kms_conf
255
+ }
256
+ return custom_kms_conf
257
+
258
+ @custom_kms_conf.setter
259
+ def custom_kms_conf(self, dict value):
260
+ if value is not None:
261
+ for k, v in value.items():
262
+ if isinstance(k, str) and isinstance(v, str):
263
+ self.configuration.get().custom_kms_conf[tobytes(k)] = \
264
+ tobytes(v)
265
+ else:
266
+ raise TypeError("Expected custom_kms_conf to be " +
267
+ "a dictionary of strings")
268
+
269
+ def refresh_key_access_token(self, value):
270
+ cdef:
271
+ shared_ptr[CKeyAccessToken] c_key_access_token = \
272
+ self.configuration.get().refreshable_key_access_token
273
+
274
+ c_key_access_token.get().Refresh(tobytes(value))
275
+
276
+ cdef inline shared_ptr[CKmsConnectionConfig] unwrap(self) nogil:
277
+ return self.configuration
278
+
279
+ @staticmethod
280
+ cdef wrap(const CKmsConnectionConfig& config):
281
+ result = KmsConnectionConfig()
282
+ result.configuration = make_shared[CKmsConnectionConfig](move(config))
283
+ return result
284
+
285
+
286
+ # Callback definitions for CPyKmsClientVtable
287
+ cdef void _cb_wrap_key(
288
+ handler, const c_string& key_bytes,
289
+ const c_string& master_key_identifier, c_string* out) except *:
290
+ mkid_str = frombytes(master_key_identifier)
291
+ wrapped_key = handler.wrap_key(key_bytes, mkid_str)
292
+ out[0] = tobytes(wrapped_key)
293
+
294
+
295
+ cdef void _cb_unwrap_key(
296
+ handler, const c_string& wrapped_key,
297
+ const c_string& master_key_identifier, c_string* out) except *:
298
+ mkid_str = frombytes(master_key_identifier)
299
+ wk_str = frombytes(wrapped_key)
300
+ key = handler.unwrap_key(wk_str, mkid_str)
301
+ out[0] = tobytes(key)
302
+
303
+
304
+ cdef class KmsClient(_Weakrefable):
305
+ """The abstract base class for KmsClient implementations."""
306
+ cdef:
307
+ shared_ptr[CKmsClient] client
308
+
309
+ def __init__(self):
310
+ self.init()
311
+
312
+ cdef init(self):
313
+ cdef:
314
+ CPyKmsClientVtable vtable = CPyKmsClientVtable()
315
+
316
+ vtable.wrap_key = _cb_wrap_key
317
+ vtable.unwrap_key = _cb_unwrap_key
318
+
319
+ self.client.reset(new CPyKmsClient(self, vtable))
320
+
321
+ def wrap_key(self, key_bytes, master_key_identifier):
322
+ """Wrap a key - encrypt it with the master key."""
323
+ raise NotImplementedError()
324
+
325
+ def unwrap_key(self, wrapped_key, master_key_identifier):
326
+ """Unwrap a key - decrypt it with the master key."""
327
+ raise NotImplementedError()
328
+
329
+ cdef inline shared_ptr[CKmsClient] unwrap(self) nogil:
330
+ return self.client
331
+
332
+
333
+ # Callback definition for CPyKmsClientFactoryVtable
334
+ cdef void _cb_create_kms_client(
335
+ handler,
336
+ const CKmsConnectionConfig& kms_connection_config,
337
+ shared_ptr[CKmsClient]* out) except *:
338
+ connection_config = KmsConnectionConfig.wrap(kms_connection_config)
339
+
340
+ result = handler(connection_config)
341
+ if not isinstance(result, KmsClient):
342
+ raise TypeError(
343
+ "callable must return KmsClient instances, but got {}".format(
344
+ type(result)))
345
+
346
+ out[0] = (<KmsClient> result).unwrap()
347
+
348
+
349
+ cdef class CryptoFactory(_Weakrefable):
350
+ """ A factory that produces the low-level FileEncryptionProperties and
351
+ FileDecryptionProperties objects, from the high-level parameters."""
352
+ # Avoid mistakingly creating attributes
353
+ __slots__ = ()
354
+
355
+ def __init__(self, kms_client_factory):
356
+ """Create CryptoFactory.
357
+
358
+ Parameters
359
+ ----------
360
+ kms_client_factory : a callable that accepts KmsConnectionConfig
361
+ and returns a KmsClient
362
+ """
363
+ self.factory.reset(new CPyCryptoFactory())
364
+
365
+ if callable(kms_client_factory):
366
+ self.init(kms_client_factory)
367
+ else:
368
+ raise TypeError("Parameter kms_client_factory must be a callable")
369
+
370
+ cdef init(self, callable_client_factory):
371
+ cdef:
372
+ CPyKmsClientFactoryVtable vtable
373
+ shared_ptr[CPyKmsClientFactory] kms_client_factory
374
+
375
+ vtable.create_kms_client = _cb_create_kms_client
376
+ kms_client_factory.reset(
377
+ new CPyKmsClientFactory(callable_client_factory, vtable))
378
+ # A KmsClientFactory object must be registered
379
+ # via this method before calling any of
380
+ # file_encryption_properties()/file_decryption_properties() methods.
381
+ self.factory.get().RegisterKmsClientFactory(
382
+ static_pointer_cast[CKmsClientFactory, CPyKmsClientFactory](
383
+ kms_client_factory))
384
+
385
+ def file_encryption_properties(self,
386
+ KmsConnectionConfig kms_connection_config,
387
+ EncryptionConfiguration encryption_config):
388
+ """Create file encryption properties.
389
+
390
+ Parameters
391
+ ----------
392
+ kms_connection_config : KmsConnectionConfig
393
+ Configuration of connection to KMS
394
+
395
+ encryption_config : EncryptionConfiguration
396
+ Configuration of the encryption, such as which columns to encrypt
397
+
398
+ Returns
399
+ -------
400
+ file_encryption_properties : FileEncryptionProperties
401
+ File encryption properties.
402
+ """
403
+ cdef:
404
+ CResult[shared_ptr[CFileEncryptionProperties]] \
405
+ file_encryption_properties_result
406
+ with nogil:
407
+ file_encryption_properties_result = \
408
+ self.factory.get().SafeGetFileEncryptionProperties(
409
+ deref(kms_connection_config.unwrap().get()),
410
+ deref(encryption_config.unwrap().get()))
411
+ file_encryption_properties = GetResultValue(
412
+ file_encryption_properties_result)
413
+ return FileEncryptionProperties.wrap(file_encryption_properties)
414
+
415
+ def file_decryption_properties(
416
+ self,
417
+ KmsConnectionConfig kms_connection_config,
418
+ DecryptionConfiguration decryption_config=None):
419
+ """Create file decryption properties.
420
+
421
+ Parameters
422
+ ----------
423
+ kms_connection_config : KmsConnectionConfig
424
+ Configuration of connection to KMS
425
+
426
+ decryption_config : DecryptionConfiguration, default None
427
+ Configuration of the decryption, such as cache timeout.
428
+ Can be None.
429
+
430
+ Returns
431
+ -------
432
+ file_decryption_properties : FileDecryptionProperties
433
+ File decryption properties.
434
+ """
435
+ cdef:
436
+ CDecryptionConfiguration c_decryption_config
437
+ CResult[shared_ptr[CFileDecryptionProperties]] \
438
+ c_file_decryption_properties
439
+ if decryption_config is None:
440
+ c_decryption_config = CDecryptionConfiguration()
441
+ else:
442
+ c_decryption_config = deref(decryption_config.unwrap().get())
443
+ with nogil:
444
+ c_file_decryption_properties = \
445
+ self.factory.get().SafeGetFileDecryptionProperties(
446
+ deref(kms_connection_config.unwrap().get()),
447
+ c_decryption_config)
448
+ file_decryption_properties = GetResultValue(
449
+ c_file_decryption_properties)
450
+ return FileDecryptionProperties.wrap(file_decryption_properties)
451
+
452
+ def remove_cache_entries_for_token(self, access_token):
453
+ self.factory.get().RemoveCacheEntriesForToken(tobytes(access_token))
454
+
455
+ def remove_cache_entries_for_all_tokens(self):
456
+ self.factory.get().RemoveCacheEntriesForAllTokens()
457
+
458
+ cdef inline shared_ptr[CPyCryptoFactory] unwrap(self):
459
+ return self.factory
460
+
461
+
462
+ cdef shared_ptr[CCryptoFactory] pyarrow_unwrap_cryptofactory(object crypto_factory) except *:
463
+ if isinstance(crypto_factory, CryptoFactory):
464
+ pycf = (<CryptoFactory> crypto_factory).unwrap()
465
+ return static_pointer_cast[CCryptoFactory, CPyCryptoFactory](pycf)
466
+ raise TypeError("Expected CryptoFactory, got %s" % type(crypto_factory))
467
+
468
+
469
+ cdef shared_ptr[CKmsConnectionConfig] pyarrow_unwrap_kmsconnectionconfig(object kmsconnectionconfig) except *:
470
+ if isinstance(kmsconnectionconfig, KmsConnectionConfig):
471
+ return (<KmsConnectionConfig> kmsconnectionconfig).unwrap()
472
+ raise TypeError("Expected KmsConnectionConfig, got %s" % type(kmsconnectionconfig))
473
+
474
+
475
+ cdef shared_ptr[CEncryptionConfiguration] pyarrow_unwrap_encryptionconfig(object encryptionconfig) except *:
476
+ if isinstance(encryptionconfig, EncryptionConfiguration):
477
+ return (<EncryptionConfiguration> encryptionconfig).unwrap()
478
+ raise TypeError("Expected EncryptionConfiguration, got %s" % type(encryptionconfig))
479
+
480
+
481
+ cdef shared_ptr[CDecryptionConfiguration] pyarrow_unwrap_decryptionconfig(object decryptionconfig) except *:
482
+ if isinstance(decryptionconfig, DecryptionConfiguration):
483
+ return (<DecryptionConfiguration> decryptionconfig).unwrap()
484
+ raise TypeError("Expected DecryptionConfiguration, got %s" % type(decryptionconfig))
env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (88.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pyx ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: profile=False, binding=True
19
+ # distutils: language = c++
20
+
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport *
23
+ from pyarrow.lib cimport check_status
24
+
25
+ from pyarrow.lib import frombytes
26
+
27
+
28
+ cdef class CppTestCase:
29
+ """
30
+ A simple wrapper for a C++ test case.
31
+ """
32
+ cdef:
33
+ CTestCase c_case
34
+
35
+ @staticmethod
36
+ cdef wrap(CTestCase c_case):
37
+ cdef:
38
+ CppTestCase obj
39
+ obj = CppTestCase.__new__(CppTestCase)
40
+ obj.c_case = c_case
41
+ return obj
42
+
43
+ @property
44
+ def name(self):
45
+ return frombytes(self.c_case.name)
46
+
47
+ def __repr__(self):
48
+ return f"<{self.__class__.__name__} {self.name!r}>"
49
+
50
+ def __call__(self):
51
+ check_status(self.c_case.func())
52
+
53
+
54
+ def get_cpp_tests():
55
+ """
56
+ Get a list of C++ test cases.
57
+ """
58
+ cases = []
59
+ c_cases = GetCppTestCases()
60
+ for c_case in c_cases:
61
+ cases.append(CppTestCase.wrap(c_case))
62
+ return cases
env-llmeval/lib/python3.10/site-packages/pyarrow/_s3fs.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (235 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pyarrow/_substrait.pyx ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+ from cython.operator cimport dereference as deref
20
+ from libcpp.vector cimport vector as std_vector
21
+
22
+ from pyarrow import Buffer, py_buffer
23
+ from pyarrow._compute cimport Expression
24
+ from pyarrow.lib import frombytes, tobytes
25
+ from pyarrow.lib cimport *
26
+ from pyarrow.includes.libarrow cimport *
27
+ from pyarrow.includes.libarrow_substrait cimport *
28
+
29
+
30
+ # TODO GH-37235: Fix exception handling
31
+ cdef CDeclaration _create_named_table_provider(
32
+ dict named_args, const std_vector[c_string]& names, const CSchema& schema
33
+ ) noexcept:
34
+ cdef:
35
+ c_string c_name
36
+ shared_ptr[CTable] c_in_table
37
+ shared_ptr[CTableSourceNodeOptions] c_tablesourceopts
38
+ shared_ptr[CExecNodeOptions] c_input_node_opts
39
+ vector[CDeclaration.Input] no_c_inputs
40
+
41
+ py_names = []
42
+ for i in range(names.size()):
43
+ c_name = names[i]
44
+ py_names.append(frombytes(c_name))
45
+ py_schema = pyarrow_wrap_schema(make_shared[CSchema](schema))
46
+
47
+ py_table = named_args["provider"](py_names, py_schema)
48
+ c_in_table = pyarrow_unwrap_table(py_table)
49
+ c_tablesourceopts = make_shared[CTableSourceNodeOptions](c_in_table)
50
+ c_input_node_opts = static_pointer_cast[CExecNodeOptions, CTableSourceNodeOptions](
51
+ c_tablesourceopts)
52
+ return CDeclaration(tobytes("table_source"),
53
+ no_c_inputs, c_input_node_opts)
54
+
55
+
56
+ def run_query(plan, *, table_provider=None, use_threads=True):
57
+ """
58
+ Execute a Substrait plan and read the results as a RecordBatchReader.
59
+
60
+ Parameters
61
+ ----------
62
+ plan : Union[Buffer, bytes]
63
+ The serialized Substrait plan to execute.
64
+ table_provider : object (optional)
65
+ A function to resolve any NamedTable relation to a table.
66
+ The function will receive two arguments which will be a list
67
+ of strings representing the table name and a pyarrow.Schema representing
68
+ the expected schema and should return a pyarrow.Table.
69
+ use_threads : bool, default True
70
+ If True then multiple threads will be used to run the query. If False then
71
+ all CPU intensive work will be done on the calling thread.
72
+
73
+ Returns
74
+ -------
75
+ RecordBatchReader
76
+ A reader containing the result of the executed query
77
+
78
+ Examples
79
+ --------
80
+ >>> import pyarrow as pa
81
+ >>> from pyarrow.lib import tobytes
82
+ >>> import pyarrow.substrait as substrait
83
+ >>> test_table_1 = pa.Table.from_pydict({"x": [1, 2, 3]})
84
+ >>> test_table_2 = pa.Table.from_pydict({"x": [4, 5, 6]})
85
+ >>> def table_provider(names, schema):
86
+ ... if not names:
87
+ ... raise Exception("No names provided")
88
+ ... elif names[0] == "t1":
89
+ ... return test_table_1
90
+ ... elif names[1] == "t2":
91
+ ... return test_table_2
92
+ ... else:
93
+ ... raise Exception("Unrecognized table name")
94
+ ...
95
+ >>> substrait_query = '''
96
+ ... {
97
+ ... "relations": [
98
+ ... {"rel": {
99
+ ... "read": {
100
+ ... "base_schema": {
101
+ ... "struct": {
102
+ ... "types": [
103
+ ... {"i64": {}}
104
+ ... ]
105
+ ... },
106
+ ... "names": [
107
+ ... "x"
108
+ ... ]
109
+ ... },
110
+ ... "namedTable": {
111
+ ... "names": ["t1"]
112
+ ... }
113
+ ... }
114
+ ... }}
115
+ ... ]
116
+ ... }
117
+ ... '''
118
+ >>> buf = pa._substrait._parse_json_plan(tobytes(substrait_query))
119
+ >>> reader = pa.substrait.run_query(buf, table_provider=table_provider)
120
+ >>> reader.read_all()
121
+ pyarrow.Table
122
+ x: int64
123
+ ----
124
+ x: [[1,2,3]]
125
+ """
126
+
127
+ cdef:
128
+ CResult[shared_ptr[CRecordBatchReader]] c_res_reader
129
+ shared_ptr[CRecordBatchReader] c_reader
130
+ RecordBatchReader reader
131
+ shared_ptr[CBuffer] c_buf_plan
132
+ CConversionOptions c_conversion_options
133
+ c_bool c_use_threads
134
+
135
+ c_use_threads = use_threads
136
+ if isinstance(plan, bytes):
137
+ c_buf_plan = pyarrow_unwrap_buffer(py_buffer(plan))
138
+ elif isinstance(plan, Buffer):
139
+ c_buf_plan = pyarrow_unwrap_buffer(plan)
140
+ else:
141
+ raise TypeError(
142
+ f"Expected 'pyarrow.Buffer' or bytes, got '{type(plan)}'")
143
+
144
+ if table_provider is not None:
145
+ named_table_args = {
146
+ "provider": table_provider
147
+ }
148
+ c_conversion_options.named_table_provider = BindFunction[CNamedTableProvider](
149
+ &_create_named_table_provider, named_table_args)
150
+
151
+ with nogil:
152
+ c_res_reader = ExecuteSerializedPlan(
153
+ deref(c_buf_plan), default_extension_id_registry(),
154
+ GetFunctionRegistry(), c_conversion_options, c_use_threads)
155
+
156
+ c_reader = GetResultValue(c_res_reader)
157
+
158
+ reader = RecordBatchReader.__new__(RecordBatchReader)
159
+ reader.reader = c_reader
160
+ return reader
161
+
162
+
163
+ def _parse_json_plan(plan):
164
+ """
165
+ Parse a JSON plan into equivalent serialized Protobuf.
166
+
167
+ Parameters
168
+ ----------
169
+ plan : bytes
170
+ Substrait plan in JSON.
171
+
172
+ Returns
173
+ -------
174
+ Buffer
175
+ A buffer containing the serialized Protobuf plan.
176
+ """
177
+
178
+ cdef:
179
+ CResult[shared_ptr[CBuffer]] c_res_buffer
180
+ c_string c_str_plan
181
+ shared_ptr[CBuffer] c_buf_plan
182
+
183
+ c_str_plan = plan
184
+ c_res_buffer = SerializeJsonPlan(c_str_plan)
185
+ with nogil:
186
+ c_buf_plan = GetResultValue(c_res_buffer)
187
+ return pyarrow_wrap_buffer(c_buf_plan)
188
+
189
+
190
+ def serialize_expressions(exprs, names, schema, *, allow_arrow_extensions=False):
191
+ """
192
+ Serialize a collection of expressions into Substrait
193
+
194
+ Substrait expressions must be bound to a schema. For example,
195
+ the Substrait expression ``a:i32 + b:i32`` is different from the
196
+ Substrait expression ``a:i64 + b:i64``. Pyarrow expressions are
197
+ typically unbound. For example, both of the above expressions
198
+ would be represented as ``a + b`` in pyarrow.
199
+
200
+ This means a schema must be provided when serializing an expression.
201
+ It also means that the serialization may fail if a matching function
202
+ call cannot be found for the expression.
203
+
204
+ Parameters
205
+ ----------
206
+ exprs : list of Expression
207
+ The expressions to serialize
208
+ names : list of str
209
+ Names for the expressions
210
+ schema : Schema
211
+ The schema the expressions will be bound to
212
+ allow_arrow_extensions : bool, default False
213
+ If False then only functions that are part of the core Substrait function
214
+ definitions will be allowed. Set this to True to allow pyarrow-specific functions
215
+ and user defined functions but the result may not be accepted by other
216
+ compute libraries.
217
+
218
+ Returns
219
+ -------
220
+ Buffer
221
+ An ExtendedExpression message containing the serialized expressions
222
+ """
223
+ cdef:
224
+ CResult[shared_ptr[CBuffer]] c_res_buffer
225
+ shared_ptr[CBuffer] c_buffer
226
+ CNamedExpression c_named_expr
227
+ CBoundExpressions c_bound_exprs
228
+ CConversionOptions c_conversion_options
229
+
230
+ if len(exprs) != len(names):
231
+ raise ValueError("exprs and names need to have the same length")
232
+ for expr, name in zip(exprs, names):
233
+ if not isinstance(expr, Expression):
234
+ raise TypeError(f"Expected Expression, got '{type(expr)}' in exprs")
235
+ if not isinstance(name, str):
236
+ raise TypeError(f"Expected str, got '{type(name)}' in names")
237
+ c_named_expr.expression = (<Expression> expr).unwrap()
238
+ c_named_expr.name = tobytes(<str> name)
239
+ c_bound_exprs.named_expressions.push_back(c_named_expr)
240
+
241
+ c_bound_exprs.schema = (<Schema> schema).sp_schema
242
+
243
+ c_conversion_options.allow_arrow_extensions = allow_arrow_extensions
244
+
245
+ with nogil:
246
+ c_res_buffer = SerializeExpressions(c_bound_exprs, c_conversion_options)
247
+ c_buffer = GetResultValue(c_res_buffer)
248
+ return pyarrow_wrap_buffer(c_buffer)
249
+
250
+
251
+ cdef class BoundExpressions(_Weakrefable):
252
+ """
253
+ A collection of named expressions and the schema they are bound to
254
+
255
+ This is equivalent to the Substrait ExtendedExpression message
256
+ """
257
+
258
+ cdef:
259
+ CBoundExpressions c_bound_exprs
260
+
261
+ def __init__(self):
262
+ msg = 'BoundExpressions is an abstract class thus cannot be initialized.'
263
+ raise TypeError(msg)
264
+
265
+ cdef void init(self, CBoundExpressions bound_expressions):
266
+ self.c_bound_exprs = bound_expressions
267
+
268
+ @property
269
+ def schema(self):
270
+ """
271
+ The common schema that all expressions are bound to
272
+ """
273
+ return pyarrow_wrap_schema(self.c_bound_exprs.schema)
274
+
275
+ @property
276
+ def expressions(self):
277
+ """
278
+ A dict from expression name to expression
279
+ """
280
+ expr_dict = {}
281
+ for named_expr in self.c_bound_exprs.named_expressions:
282
+ name = frombytes(named_expr.name)
283
+ expr = Expression.wrap(named_expr.expression)
284
+ expr_dict[name] = expr
285
+ return expr_dict
286
+
287
+ @staticmethod
288
+ cdef wrap(const CBoundExpressions& bound_expressions):
289
+ cdef BoundExpressions self = BoundExpressions.__new__(BoundExpressions)
290
+ self.init(bound_expressions)
291
+ return self
292
+
293
+
294
+ def deserialize_expressions(buf):
295
+ """
296
+ Deserialize an ExtendedExpression Substrait message into a BoundExpressions object
297
+
298
+ Parameters
299
+ ----------
300
+ buf : Buffer or bytes
301
+ The message to deserialize
302
+
303
+ Returns
304
+ -------
305
+ BoundExpressions
306
+ The deserialized expressions, their names, and the bound schema
307
+ """
308
+ cdef:
309
+ shared_ptr[CBuffer] c_buffer
310
+ CResult[CBoundExpressions] c_res_bound_exprs
311
+ CBoundExpressions c_bound_exprs
312
+
313
+ if isinstance(buf, bytes):
314
+ c_buffer = pyarrow_unwrap_buffer(py_buffer(buf))
315
+ elif isinstance(buf, Buffer):
316
+ c_buffer = pyarrow_unwrap_buffer(buf)
317
+ else:
318
+ raise TypeError(
319
+ f"Expected 'pyarrow.Buffer' or bytes, got '{type(buf)}'")
320
+
321
+ with nogil:
322
+ c_res_bound_exprs = DeserializeExpressions(deref(c_buffer))
323
+ c_bound_exprs = GetResultValue(c_res_bound_exprs)
324
+
325
+ return BoundExpressions.wrap(c_bound_exprs)
326
+
327
+
328
+ def get_supported_functions():
329
+ """
330
+ Get a list of Substrait functions that the underlying
331
+ engine currently supports.
332
+
333
+ Returns
334
+ -------
335
+ list[str]
336
+ A list of function ids encoded as '{uri}#{name}'
337
+ """
338
+
339
+ cdef:
340
+ ExtensionIdRegistry* c_id_registry
341
+ std_vector[c_string] c_ids
342
+
343
+ c_id_registry = default_extension_id_registry()
344
+ c_ids = c_id_registry.GetSupportedSubstraitFunctions()
345
+
346
+ functions_list = []
347
+ for c_id in c_ids:
348
+ functions_list.append(frombytes(c_id))
349
+ return functions_list
env-llmeval/lib/python3.10/site-packages/pyarrow/array.pxi ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/pyarrow/benchmark.pxi ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ def benchmark_PandasObjectIsNull(list obj):
20
+ Benchmark_PandasObjectIsNull(obj)
env-llmeval/lib/python3.10/site-packages/pyarrow/builder.pxi ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ cdef class StringBuilder(_Weakrefable):
20
+ """
21
+ Builder class for UTF8 strings.
22
+
23
+ This class exposes facilities for incrementally adding string values and
24
+ building the null bitmap for a pyarrow.Array (type='string').
25
+ """
26
+ cdef:
27
+ unique_ptr[CStringBuilder] builder
28
+
29
+ def __cinit__(self, MemoryPool memory_pool=None):
30
+ cdef CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool)
31
+ self.builder.reset(new CStringBuilder(pool))
32
+
33
+ def append(self, value):
34
+ """
35
+ Append a single value to the builder.
36
+
37
+ The value can either be a string/bytes object or a null value
38
+ (np.nan or None).
39
+
40
+ Parameters
41
+ ----------
42
+ value : string/bytes or np.nan/None
43
+ The value to append to the string array builder.
44
+ """
45
+ if value is None or value is np.nan:
46
+ self.builder.get().AppendNull()
47
+ elif isinstance(value, (bytes, str)):
48
+ self.builder.get().Append(tobytes(value))
49
+ else:
50
+ raise TypeError('StringBuilder only accepts string objects')
51
+
52
+ def append_values(self, values):
53
+ """
54
+ Append all the values from an iterable.
55
+
56
+ Parameters
57
+ ----------
58
+ values : iterable of string/bytes or np.nan/None values
59
+ The values to append to the string array builder.
60
+ """
61
+ for value in values:
62
+ self.append(value)
63
+
64
+ def finish(self):
65
+ """
66
+ Return result of builder as an Array object; also resets the builder.
67
+
68
+ Returns
69
+ -------
70
+ array : pyarrow.Array
71
+ """
72
+ cdef shared_ptr[CArray] out
73
+ with nogil:
74
+ self.builder.get().Finish(&out)
75
+ return pyarrow_wrap_array(out)
76
+
77
+ @property
78
+ def null_count(self):
79
+ return self.builder.get().null_count()
80
+
81
+ def __len__(self):
82
+ return self.builder.get().length()
env-llmeval/lib/python3.10/site-packages/pyarrow/cffi.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from __future__ import absolute_import
19
+
20
+ import cffi
21
+
22
+ c_source = """
23
+ struct ArrowSchema {
24
+ // Array type description
25
+ const char* format;
26
+ const char* name;
27
+ const char* metadata;
28
+ int64_t flags;
29
+ int64_t n_children;
30
+ struct ArrowSchema** children;
31
+ struct ArrowSchema* dictionary;
32
+
33
+ // Release callback
34
+ void (*release)(struct ArrowSchema*);
35
+ // Opaque producer-specific data
36
+ void* private_data;
37
+ };
38
+
39
+ struct ArrowArray {
40
+ // Array data description
41
+ int64_t length;
42
+ int64_t null_count;
43
+ int64_t offset;
44
+ int64_t n_buffers;
45
+ int64_t n_children;
46
+ const void** buffers;
47
+ struct ArrowArray** children;
48
+ struct ArrowArray* dictionary;
49
+
50
+ // Release callback
51
+ void (*release)(struct ArrowArray*);
52
+ // Opaque producer-specific data
53
+ void* private_data;
54
+ };
55
+
56
+ struct ArrowArrayStream {
57
+ int (*get_schema)(struct ArrowArrayStream*, struct ArrowSchema* out);
58
+ int (*get_next)(struct ArrowArrayStream*, struct ArrowArray* out);
59
+
60
+ const char* (*get_last_error)(struct ArrowArrayStream*);
61
+
62
+ // Release callback
63
+ void (*release)(struct ArrowArrayStream*);
64
+ // Opaque producer-specific data
65
+ void* private_data;
66
+ };
67
+ """
68
+
69
+ # TODO use out-of-line mode for faster import and avoid C parsing
70
+ ffi = cffi.FFI()
71
+ ffi.cdef(c_source)
env-llmeval/lib/python3.10/site-packages/pyarrow/compat.pxi ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ def encode_file_path(path):
20
+ if isinstance(path, str):
21
+ # POSIX systems can handle utf-8. UTF8 is converted to utf16-le in
22
+ # libarrow
23
+ encoded_path = path.encode('utf-8')
24
+ else:
25
+ encoded_path = path
26
+
27
+ # Windows file system requires utf-16le for file names; Arrow C++ libraries
28
+ # will convert utf8 to utf16
29
+ return encoded_path
30
+
31
+
32
+ # Starting with Python 3.7, dicts are guaranteed to be insertion-ordered.
33
+ ordered_dict = dict
34
+
35
+
36
+ try:
37
+ import cloudpickle as pickle
38
+ except ImportError:
39
+ import pickle
40
+
41
+
42
+ def tobytes(o):
43
+ """
44
+ Encode a unicode or bytes string to bytes.
45
+
46
+ Parameters
47
+ ----------
48
+ o : str or bytes
49
+ Input string.
50
+ """
51
+ if isinstance(o, str):
52
+ return o.encode('utf8')
53
+ else:
54
+ return o
55
+
56
+
57
+ def frombytes(o, *, safe=False):
58
+ """
59
+ Decode the given bytestring to unicode.
60
+
61
+ Parameters
62
+ ----------
63
+ o : bytes-like
64
+ Input object.
65
+ safe : bool, default False
66
+ If true, raise on encoding errors.
67
+ """
68
+ if safe:
69
+ return o.decode('utf8', errors='replace')
70
+ else:
71
+ return o.decode('utf8')
env-llmeval/lib/python3.10/site-packages/pyarrow/compute.py ADDED
@@ -0,0 +1,731 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from pyarrow._compute import ( # noqa
19
+ Function,
20
+ FunctionOptions,
21
+ FunctionRegistry,
22
+ HashAggregateFunction,
23
+ HashAggregateKernel,
24
+ Kernel,
25
+ ScalarAggregateFunction,
26
+ ScalarAggregateKernel,
27
+ ScalarFunction,
28
+ ScalarKernel,
29
+ VectorFunction,
30
+ VectorKernel,
31
+ # Option classes
32
+ ArraySortOptions,
33
+ AssumeTimezoneOptions,
34
+ CastOptions,
35
+ CountOptions,
36
+ CumulativeOptions,
37
+ CumulativeSumOptions,
38
+ DayOfWeekOptions,
39
+ DictionaryEncodeOptions,
40
+ RunEndEncodeOptions,
41
+ ElementWiseAggregateOptions,
42
+ ExtractRegexOptions,
43
+ FilterOptions,
44
+ IndexOptions,
45
+ JoinOptions,
46
+ ListSliceOptions,
47
+ MakeStructOptions,
48
+ MapLookupOptions,
49
+ MatchSubstringOptions,
50
+ ModeOptions,
51
+ NullOptions,
52
+ PadOptions,
53
+ PairwiseOptions,
54
+ PartitionNthOptions,
55
+ QuantileOptions,
56
+ RandomOptions,
57
+ RankOptions,
58
+ ReplaceSliceOptions,
59
+ ReplaceSubstringOptions,
60
+ RoundBinaryOptions,
61
+ RoundOptions,
62
+ RoundTemporalOptions,
63
+ RoundToMultipleOptions,
64
+ ScalarAggregateOptions,
65
+ SelectKOptions,
66
+ SetLookupOptions,
67
+ SliceOptions,
68
+ SortOptions,
69
+ SplitOptions,
70
+ SplitPatternOptions,
71
+ StrftimeOptions,
72
+ StrptimeOptions,
73
+ StructFieldOptions,
74
+ TakeOptions,
75
+ TDigestOptions,
76
+ TrimOptions,
77
+ Utf8NormalizeOptions,
78
+ VarianceOptions,
79
+ WeekOptions,
80
+ # Functions
81
+ call_function,
82
+ function_registry,
83
+ get_function,
84
+ list_functions,
85
+ # Udf
86
+ call_tabular_function,
87
+ register_scalar_function,
88
+ register_tabular_function,
89
+ register_aggregate_function,
90
+ register_vector_function,
91
+ UdfContext,
92
+ # Expressions
93
+ Expression,
94
+ )
95
+
96
+ from collections import namedtuple
97
+ import inspect
98
+ from textwrap import dedent
99
+ import warnings
100
+
101
+ import pyarrow as pa
102
+ from pyarrow import _compute_docstrings
103
+ from pyarrow.vendored import docscrape
104
+
105
+
106
+ def _get_arg_names(func):
107
+ return func._doc.arg_names
108
+
109
+
110
+ _OptionsClassDoc = namedtuple('_OptionsClassDoc', ('params',))
111
+
112
+
113
+ def _scrape_options_class_doc(options_class):
114
+ if not options_class.__doc__:
115
+ return None
116
+ doc = docscrape.NumpyDocString(options_class.__doc__)
117
+ return _OptionsClassDoc(doc['Parameters'])
118
+
119
+
120
+ def _decorate_compute_function(wrapper, exposed_name, func, options_class):
121
+ # Decorate the given compute function wrapper with useful metadata
122
+ # and documentation.
123
+ cpp_doc = func._doc
124
+
125
+ wrapper.__arrow_compute_function__ = dict(
126
+ name=func.name,
127
+ arity=func.arity,
128
+ options_class=cpp_doc.options_class,
129
+ options_required=cpp_doc.options_required)
130
+ wrapper.__name__ = exposed_name
131
+ wrapper.__qualname__ = exposed_name
132
+
133
+ doc_pieces = []
134
+
135
+ # 1. One-line summary
136
+ summary = cpp_doc.summary
137
+ if not summary:
138
+ arg_str = "arguments" if func.arity > 1 else "argument"
139
+ summary = ("Call compute function {!r} with the given {}"
140
+ .format(func.name, arg_str))
141
+
142
+ doc_pieces.append(f"{summary}.\n\n")
143
+
144
+ # 2. Multi-line description
145
+ description = cpp_doc.description
146
+ if description:
147
+ doc_pieces.append(f"{description}\n\n")
148
+
149
+ doc_addition = _compute_docstrings.function_doc_additions.get(func.name)
150
+
151
+ # 3. Parameter description
152
+ doc_pieces.append(dedent("""\
153
+ Parameters
154
+ ----------
155
+ """))
156
+
157
+ # 3a. Compute function parameters
158
+ arg_names = _get_arg_names(func)
159
+ for arg_name in arg_names:
160
+ if func.kind in ('vector', 'scalar_aggregate'):
161
+ arg_type = 'Array-like'
162
+ else:
163
+ arg_type = 'Array-like or scalar-like'
164
+ doc_pieces.append(f"{arg_name} : {arg_type}\n")
165
+ doc_pieces.append(" Argument to compute function.\n")
166
+
167
+ # 3b. Compute function option values
168
+ if options_class is not None:
169
+ options_class_doc = _scrape_options_class_doc(options_class)
170
+ if options_class_doc:
171
+ for p in options_class_doc.params:
172
+ doc_pieces.append(f"{p.name} : {p.type}\n")
173
+ for s in p.desc:
174
+ doc_pieces.append(f" {s}\n")
175
+ else:
176
+ warnings.warn(f"Options class {options_class.__name__} "
177
+ f"does not have a docstring", RuntimeWarning)
178
+ options_sig = inspect.signature(options_class)
179
+ for p in options_sig.parameters.values():
180
+ doc_pieces.append(dedent("""\
181
+ {0} : optional
182
+ Parameter for {1} constructor. Either `options`
183
+ or `{0}` can be passed, but not both at the same time.
184
+ """.format(p.name, options_class.__name__)))
185
+ doc_pieces.append(dedent(f"""\
186
+ options : pyarrow.compute.{options_class.__name__}, optional
187
+ Alternative way of passing options.
188
+ """))
189
+
190
+ doc_pieces.append(dedent("""\
191
+ memory_pool : pyarrow.MemoryPool, optional
192
+ If not passed, will allocate memory from the default memory pool.
193
+ """))
194
+
195
+ # 4. Custom addition (e.g. examples)
196
+ if doc_addition is not None:
197
+ doc_pieces.append("\n{}\n".format(dedent(doc_addition).strip("\n")))
198
+
199
+ wrapper.__doc__ = "".join(doc_pieces)
200
+ return wrapper
201
+
202
+
203
+ def _get_options_class(func):
204
+ class_name = func._doc.options_class
205
+ if not class_name:
206
+ return None
207
+ try:
208
+ return globals()[class_name]
209
+ except KeyError:
210
+ warnings.warn("Python binding for {} not exposed"
211
+ .format(class_name), RuntimeWarning)
212
+ return None
213
+
214
+
215
+ def _handle_options(name, options_class, options, args, kwargs):
216
+ if args or kwargs:
217
+ if options is not None:
218
+ raise TypeError(
219
+ "Function {!r} called with both an 'options' argument "
220
+ "and additional arguments"
221
+ .format(name))
222
+ return options_class(*args, **kwargs)
223
+
224
+ if options is not None:
225
+ if isinstance(options, dict):
226
+ return options_class(**options)
227
+ elif isinstance(options, options_class):
228
+ return options
229
+ raise TypeError(
230
+ "Function {!r} expected a {} parameter, got {}"
231
+ .format(name, options_class, type(options)))
232
+
233
+ return None
234
+
235
+
236
+ def _make_generic_wrapper(func_name, func, options_class, arity):
237
+ if options_class is None:
238
+ def wrapper(*args, memory_pool=None):
239
+ if arity is not Ellipsis and len(args) != arity:
240
+ raise TypeError(
241
+ f"{func_name} takes {arity} positional argument(s), "
242
+ f"but {len(args)} were given"
243
+ )
244
+ if args and isinstance(args[0], Expression):
245
+ return Expression._call(func_name, list(args))
246
+ return func.call(args, None, memory_pool)
247
+ else:
248
+ def wrapper(*args, memory_pool=None, options=None, **kwargs):
249
+ if arity is not Ellipsis:
250
+ if len(args) < arity:
251
+ raise TypeError(
252
+ f"{func_name} takes {arity} positional argument(s), "
253
+ f"but {len(args)} were given"
254
+ )
255
+ option_args = args[arity:]
256
+ args = args[:arity]
257
+ else:
258
+ option_args = ()
259
+ options = _handle_options(func_name, options_class, options,
260
+ option_args, kwargs)
261
+ if args and isinstance(args[0], Expression):
262
+ return Expression._call(func_name, list(args), options)
263
+ return func.call(args, options, memory_pool)
264
+ return wrapper
265
+
266
+
267
+ def _make_signature(arg_names, var_arg_names, options_class):
268
+ from inspect import Parameter
269
+ params = []
270
+ for name in arg_names:
271
+ params.append(Parameter(name, Parameter.POSITIONAL_ONLY))
272
+ for name in var_arg_names:
273
+ params.append(Parameter(name, Parameter.VAR_POSITIONAL))
274
+ if options_class is not None:
275
+ options_sig = inspect.signature(options_class)
276
+ for p in options_sig.parameters.values():
277
+ assert p.kind in (Parameter.POSITIONAL_OR_KEYWORD,
278
+ Parameter.KEYWORD_ONLY)
279
+ if var_arg_names:
280
+ # Cannot have a positional argument after a *args
281
+ p = p.replace(kind=Parameter.KEYWORD_ONLY)
282
+ params.append(p)
283
+ params.append(Parameter("options", Parameter.KEYWORD_ONLY,
284
+ default=None))
285
+ params.append(Parameter("memory_pool", Parameter.KEYWORD_ONLY,
286
+ default=None))
287
+ return inspect.Signature(params)
288
+
289
+
290
+ def _wrap_function(name, func):
291
+ options_class = _get_options_class(func)
292
+ arg_names = _get_arg_names(func)
293
+ has_vararg = arg_names and arg_names[-1].startswith('*')
294
+ if has_vararg:
295
+ var_arg_names = [arg_names.pop().lstrip('*')]
296
+ else:
297
+ var_arg_names = []
298
+
299
+ wrapper = _make_generic_wrapper(
300
+ name, func, options_class, arity=func.arity)
301
+ wrapper.__signature__ = _make_signature(arg_names, var_arg_names,
302
+ options_class)
303
+ return _decorate_compute_function(wrapper, name, func, options_class)
304
+
305
+
306
+ def _make_global_functions():
307
+ """
308
+ Make global functions wrapping each compute function.
309
+
310
+ Note that some of the automatically-generated wrappers may be overridden
311
+ by custom versions below.
312
+ """
313
+ g = globals()
314
+ reg = function_registry()
315
+
316
+ # Avoid clashes with Python keywords
317
+ rewrites = {'and': 'and_',
318
+ 'or': 'or_'}
319
+
320
+ for cpp_name in reg.list_functions():
321
+ name = rewrites.get(cpp_name, cpp_name)
322
+ func = reg.get_function(cpp_name)
323
+ if func.kind == "hash_aggregate":
324
+ # Hash aggregate functions are not callable,
325
+ # so let's not expose them at module level.
326
+ continue
327
+ if func.kind == "scalar_aggregate" and func.arity == 0:
328
+ # Nullary scalar aggregate functions are not callable
329
+ # directly so let's not expose them at module level.
330
+ continue
331
+ assert name not in g, name
332
+ g[cpp_name] = g[name] = _wrap_function(name, func)
333
+
334
+
335
+ _make_global_functions()
336
+
337
+
338
+ def cast(arr, target_type=None, safe=None, options=None, memory_pool=None):
339
+ """
340
+ Cast array values to another data type. Can also be invoked as an array
341
+ instance method.
342
+
343
+ Parameters
344
+ ----------
345
+ arr : Array-like
346
+ target_type : DataType or str
347
+ Type to cast to
348
+ safe : bool, default True
349
+ Check for overflows or other unsafe conversions
350
+ options : CastOptions, default None
351
+ Additional checks pass by CastOptions
352
+ memory_pool : MemoryPool, optional
353
+ memory pool to use for allocations during function execution.
354
+
355
+ Examples
356
+ --------
357
+ >>> from datetime import datetime
358
+ >>> import pyarrow as pa
359
+ >>> arr = pa.array([datetime(2010, 1, 1), datetime(2015, 1, 1)])
360
+ >>> arr.type
361
+ TimestampType(timestamp[us])
362
+
363
+ You can use ``pyarrow.DataType`` objects to specify the target type:
364
+
365
+ >>> cast(arr, pa.timestamp('ms'))
366
+ <pyarrow.lib.TimestampArray object at ...>
367
+ [
368
+ 2010-01-01 00:00:00.000,
369
+ 2015-01-01 00:00:00.000
370
+ ]
371
+
372
+ >>> cast(arr, pa.timestamp('ms')).type
373
+ TimestampType(timestamp[ms])
374
+
375
+ Alternatively, it is also supported to use the string aliases for these
376
+ types:
377
+
378
+ >>> arr.cast('timestamp[ms]')
379
+ <pyarrow.lib.TimestampArray object at ...>
380
+ [
381
+ 2010-01-01 00:00:00.000,
382
+ 2015-01-01 00:00:00.000
383
+ ]
384
+ >>> arr.cast('timestamp[ms]').type
385
+ TimestampType(timestamp[ms])
386
+
387
+ Returns
388
+ -------
389
+ casted : Array
390
+ The cast result as a new Array
391
+ """
392
+ safe_vars_passed = (safe is not None) or (target_type is not None)
393
+
394
+ if safe_vars_passed and (options is not None):
395
+ raise ValueError("Must either pass values for 'target_type' and 'safe'"
396
+ " or pass a value for 'options'")
397
+
398
+ if options is None:
399
+ target_type = pa.types.lib.ensure_type(target_type)
400
+ if safe is False:
401
+ options = CastOptions.unsafe(target_type)
402
+ else:
403
+ options = CastOptions.safe(target_type)
404
+ return call_function("cast", [arr], options, memory_pool)
405
+
406
+
407
+ def index(data, value, start=None, end=None, *, memory_pool=None):
408
+ """
409
+ Find the index of the first occurrence of a given value.
410
+
411
+ Parameters
412
+ ----------
413
+ data : Array-like
414
+ value : Scalar-like object
415
+ The value to search for.
416
+ start : int, optional
417
+ end : int, optional
418
+ memory_pool : MemoryPool, optional
419
+ If not passed, will allocate memory from the default memory pool.
420
+
421
+ Returns
422
+ -------
423
+ index : int
424
+ the index, or -1 if not found
425
+ """
426
+ if start is not None:
427
+ if end is not None:
428
+ data = data.slice(start, end - start)
429
+ else:
430
+ data = data.slice(start)
431
+ elif end is not None:
432
+ data = data.slice(0, end)
433
+
434
+ if not isinstance(value, pa.Scalar):
435
+ value = pa.scalar(value, type=data.type)
436
+ elif data.type != value.type:
437
+ value = pa.scalar(value.as_py(), type=data.type)
438
+ options = IndexOptions(value=value)
439
+ result = call_function('index', [data], options, memory_pool)
440
+ if start is not None and result.as_py() >= 0:
441
+ result = pa.scalar(result.as_py() + start, type=pa.int64())
442
+ return result
443
+
444
+
445
+ def take(data, indices, *, boundscheck=True, memory_pool=None):
446
+ """
447
+ Select values (or records) from array- or table-like data given integer
448
+ selection indices.
449
+
450
+ The result will be of the same type(s) as the input, with elements taken
451
+ from the input array (or record batch / table fields) at the given
452
+ indices. If an index is null then the corresponding value in the output
453
+ will be null.
454
+
455
+ Parameters
456
+ ----------
457
+ data : Array, ChunkedArray, RecordBatch, or Table
458
+ indices : Array, ChunkedArray
459
+ Must be of integer type
460
+ boundscheck : boolean, default True
461
+ Whether to boundscheck the indices. If False and there is an out of
462
+ bounds index, will likely cause the process to crash.
463
+ memory_pool : MemoryPool, optional
464
+ If not passed, will allocate memory from the default memory pool.
465
+
466
+ Returns
467
+ -------
468
+ result : depends on inputs
469
+ Selected values for the given indices
470
+
471
+ Examples
472
+ --------
473
+ >>> import pyarrow as pa
474
+ >>> arr = pa.array(["a", "b", "c", None, "e", "f"])
475
+ >>> indices = pa.array([0, None, 4, 3])
476
+ >>> arr.take(indices)
477
+ <pyarrow.lib.StringArray object at ...>
478
+ [
479
+ "a",
480
+ null,
481
+ "e",
482
+ null
483
+ ]
484
+ """
485
+ options = TakeOptions(boundscheck=boundscheck)
486
+ return call_function('take', [data, indices], options, memory_pool)
487
+
488
+
489
+ def fill_null(values, fill_value):
490
+ """Replace each null element in values with a corresponding
491
+ element from fill_value.
492
+
493
+ If fill_value is scalar-like, then every null element in values
494
+ will be replaced with fill_value. If fill_value is array-like,
495
+ then the i-th element in values will be replaced with the i-th
496
+ element in fill_value.
497
+
498
+ The fill_value's type must be the same as that of values, or it
499
+ must be able to be implicitly casted to the array's type.
500
+
501
+ This is an alias for :func:`coalesce`.
502
+
503
+ Parameters
504
+ ----------
505
+ values : Array, ChunkedArray, or Scalar-like object
506
+ Each null element is replaced with the corresponding value
507
+ from fill_value.
508
+ fill_value : Array, ChunkedArray, or Scalar-like object
509
+ If not same type as values, will attempt to cast.
510
+
511
+ Returns
512
+ -------
513
+ result : depends on inputs
514
+ Values with all null elements replaced
515
+
516
+ Examples
517
+ --------
518
+ >>> import pyarrow as pa
519
+ >>> arr = pa.array([1, 2, None, 3], type=pa.int8())
520
+ >>> fill_value = pa.scalar(5, type=pa.int8())
521
+ >>> arr.fill_null(fill_value)
522
+ <pyarrow.lib.Int8Array object at ...>
523
+ [
524
+ 1,
525
+ 2,
526
+ 5,
527
+ 3
528
+ ]
529
+ >>> arr = pa.array([1, 2, None, 4, None])
530
+ >>> arr.fill_null(pa.array([10, 20, 30, 40, 50]))
531
+ <pyarrow.lib.Int64Array object at ...>
532
+ [
533
+ 1,
534
+ 2,
535
+ 30,
536
+ 4,
537
+ 50
538
+ ]
539
+ """
540
+ if not isinstance(fill_value, (pa.Array, pa.ChunkedArray, pa.Scalar)):
541
+ fill_value = pa.scalar(fill_value, type=values.type)
542
+ elif values.type != fill_value.type:
543
+ fill_value = pa.scalar(fill_value.as_py(), type=values.type)
544
+
545
+ return call_function("coalesce", [values, fill_value])
546
+
547
+
548
+ def top_k_unstable(values, k, sort_keys=None, *, memory_pool=None):
549
+ """
550
+ Select the indices of the top-k ordered elements from array- or table-like
551
+ data.
552
+
553
+ This is a specialization for :func:`select_k_unstable`. Output is not
554
+ guaranteed to be stable.
555
+
556
+ Parameters
557
+ ----------
558
+ values : Array, ChunkedArray, RecordBatch, or Table
559
+ Data to sort and get top indices from.
560
+ k : int
561
+ The number of `k` elements to keep.
562
+ sort_keys : List-like
563
+ Column key names to order by when input is table-like data.
564
+ memory_pool : MemoryPool, optional
565
+ If not passed, will allocate memory from the default memory pool.
566
+
567
+ Returns
568
+ -------
569
+ result : Array
570
+ Indices of the top-k ordered elements
571
+
572
+ Examples
573
+ --------
574
+ >>> import pyarrow as pa
575
+ >>> import pyarrow.compute as pc
576
+ >>> arr = pa.array(["a", "b", "c", None, "e", "f"])
577
+ >>> pc.top_k_unstable(arr, k=3)
578
+ <pyarrow.lib.UInt64Array object at ...>
579
+ [
580
+ 5,
581
+ 4,
582
+ 2
583
+ ]
584
+ """
585
+ if sort_keys is None:
586
+ sort_keys = []
587
+ if isinstance(values, (pa.Array, pa.ChunkedArray)):
588
+ sort_keys.append(("dummy", "descending"))
589
+ else:
590
+ sort_keys = map(lambda key_name: (key_name, "descending"), sort_keys)
591
+ options = SelectKOptions(k, sort_keys)
592
+ return call_function("select_k_unstable", [values], options, memory_pool)
593
+
594
+
595
+ def bottom_k_unstable(values, k, sort_keys=None, *, memory_pool=None):
596
+ """
597
+ Select the indices of the bottom-k ordered elements from
598
+ array- or table-like data.
599
+
600
+ This is a specialization for :func:`select_k_unstable`. Output is not
601
+ guaranteed to be stable.
602
+
603
+ Parameters
604
+ ----------
605
+ values : Array, ChunkedArray, RecordBatch, or Table
606
+ Data to sort and get bottom indices from.
607
+ k : int
608
+ The number of `k` elements to keep.
609
+ sort_keys : List-like
610
+ Column key names to order by when input is table-like data.
611
+ memory_pool : MemoryPool, optional
612
+ If not passed, will allocate memory from the default memory pool.
613
+
614
+ Returns
615
+ -------
616
+ result : Array of indices
617
+ Indices of the bottom-k ordered elements
618
+
619
+ Examples
620
+ --------
621
+ >>> import pyarrow as pa
622
+ >>> import pyarrow.compute as pc
623
+ >>> arr = pa.array(["a", "b", "c", None, "e", "f"])
624
+ >>> pc.bottom_k_unstable(arr, k=3)
625
+ <pyarrow.lib.UInt64Array object at ...>
626
+ [
627
+ 0,
628
+ 1,
629
+ 2
630
+ ]
631
+ """
632
+ if sort_keys is None:
633
+ sort_keys = []
634
+ if isinstance(values, (pa.Array, pa.ChunkedArray)):
635
+ sort_keys.append(("dummy", "ascending"))
636
+ else:
637
+ sort_keys = map(lambda key_name: (key_name, "ascending"), sort_keys)
638
+ options = SelectKOptions(k, sort_keys)
639
+ return call_function("select_k_unstable", [values], options, memory_pool)
640
+
641
+
642
+ def random(n, *, initializer='system', options=None, memory_pool=None):
643
+ """
644
+ Generate numbers in the range [0, 1).
645
+
646
+ Generated values are uniformly-distributed, double-precision
647
+ in range [0, 1). Algorithm and seed can be changed via RandomOptions.
648
+
649
+ Parameters
650
+ ----------
651
+ n : int
652
+ Number of values to generate, must be greater than or equal to 0
653
+ initializer : int or str
654
+ How to initialize the underlying random generator.
655
+ If an integer is given, it is used as a seed.
656
+ If "system" is given, the random generator is initialized with
657
+ a system-specific source of (hopefully true) randomness.
658
+ Other values are invalid.
659
+ options : pyarrow.compute.RandomOptions, optional
660
+ Alternative way of passing options.
661
+ memory_pool : pyarrow.MemoryPool, optional
662
+ If not passed, will allocate memory from the default memory pool.
663
+ """
664
+ options = RandomOptions(initializer=initializer)
665
+ return call_function("random", [], options, memory_pool, length=n)
666
+
667
+
668
+ def field(*name_or_index):
669
+ """Reference a column of the dataset.
670
+
671
+ Stores only the field's name. Type and other information is known only when
672
+ the expression is bound to a dataset having an explicit scheme.
673
+
674
+ Nested references are allowed by passing multiple names or a tuple of
675
+ names. For example ``('foo', 'bar')`` references the field named "bar"
676
+ inside the field named "foo".
677
+
678
+ Parameters
679
+ ----------
680
+ *name_or_index : string, multiple strings, tuple or int
681
+ The name or index of the (possibly nested) field the expression
682
+ references to.
683
+
684
+ Returns
685
+ -------
686
+ field_expr : Expression
687
+ Reference to the given field
688
+
689
+ Examples
690
+ --------
691
+ >>> import pyarrow.compute as pc
692
+ >>> pc.field("a")
693
+ <pyarrow.compute.Expression a>
694
+ >>> pc.field(1)
695
+ <pyarrow.compute.Expression FieldPath(1)>
696
+ >>> pc.field(("a", "b"))
697
+ <pyarrow.compute.Expression FieldRef.Nested(FieldRef.Name(a) ...
698
+ >>> pc.field("a", "b")
699
+ <pyarrow.compute.Expression FieldRef.Nested(FieldRef.Name(a) ...
700
+ """
701
+ n = len(name_or_index)
702
+ if n == 1:
703
+ if isinstance(name_or_index[0], (str, int)):
704
+ return Expression._field(name_or_index[0])
705
+ elif isinstance(name_or_index[0], tuple):
706
+ return Expression._nested_field(name_or_index[0])
707
+ else:
708
+ raise TypeError(
709
+ "field reference should be str, multiple str, tuple or "
710
+ f"integer, got {type(name_or_index[0])}"
711
+ )
712
+ # In case of multiple strings not supplied in a tuple
713
+ else:
714
+ return Expression._nested_field(name_or_index)
715
+
716
+
717
+ def scalar(value):
718
+ """Expression representing a scalar value.
719
+
720
+ Parameters
721
+ ----------
722
+ value : bool, int, float or string
723
+ Python value of the scalar. Note that only a subset of types are
724
+ currently supported.
725
+
726
+ Returns
727
+ -------
728
+ scalar_expr : Expression
729
+ An Expression representing the scalar value
730
+ """
731
+ return Expression._scalar(value)
env-llmeval/lib/python3.10/site-packages/pyarrow/config.pxi ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from pyarrow.includes.libarrow cimport GetBuildInfo
19
+
20
+ from collections import namedtuple
21
+ import os
22
+
23
+
24
+ VersionInfo = namedtuple('VersionInfo', ('major', 'minor', 'patch'))
25
+
26
+ BuildInfo = namedtuple(
27
+ 'BuildInfo',
28
+ ('version', 'version_info', 'so_version', 'full_so_version',
29
+ 'compiler_id', 'compiler_version', 'compiler_flags',
30
+ 'git_id', 'git_description', 'package_kind', 'build_type'))
31
+
32
+ RuntimeInfo = namedtuple('RuntimeInfo',
33
+ ('simd_level', 'detected_simd_level'))
34
+
35
+ cdef _build_info():
36
+ cdef:
37
+ const CBuildInfo* c_info
38
+
39
+ c_info = &GetBuildInfo()
40
+
41
+ return BuildInfo(version=frombytes(c_info.version_string),
42
+ version_info=VersionInfo(c_info.version_major,
43
+ c_info.version_minor,
44
+ c_info.version_patch),
45
+ so_version=frombytes(c_info.so_version),
46
+ full_so_version=frombytes(c_info.full_so_version),
47
+ compiler_id=frombytes(c_info.compiler_id),
48
+ compiler_version=frombytes(c_info.compiler_version),
49
+ compiler_flags=frombytes(c_info.compiler_flags),
50
+ git_id=frombytes(c_info.git_id),
51
+ git_description=frombytes(c_info.git_description),
52
+ package_kind=frombytes(c_info.package_kind),
53
+ build_type=frombytes(c_info.build_type).lower(),
54
+ )
55
+
56
+
57
+ cpp_build_info = _build_info()
58
+ cpp_version = cpp_build_info.version
59
+ cpp_version_info = cpp_build_info.version_info
60
+
61
+
62
+ def runtime_info():
63
+ """
64
+ Get runtime information.
65
+
66
+ Returns
67
+ -------
68
+ info : pyarrow.RuntimeInfo
69
+ """
70
+ cdef:
71
+ CRuntimeInfo c_info
72
+
73
+ c_info = GetRuntimeInfo()
74
+
75
+ return RuntimeInfo(
76
+ simd_level=frombytes(c_info.simd_level),
77
+ detected_simd_level=frombytes(c_info.detected_simd_level))
78
+
79
+
80
+ def set_timezone_db_path(path):
81
+ """
82
+ Configure the path to text timezone database on Windows.
83
+
84
+ Parameters
85
+ ----------
86
+ path : str
87
+ Path to text timezone database.
88
+ """
89
+ cdef:
90
+ CGlobalOptions options
91
+
92
+ if path is not None:
93
+ options.timezone_db_path = <c_string>tobytes(path)
94
+
95
+ check_status(Initialize(options))
env-llmeval/lib/python3.10/site-packages/pyarrow/csv.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ from pyarrow._csv import ( # noqa
20
+ ReadOptions, ParseOptions, ConvertOptions, ISO8601,
21
+ open_csv, read_csv, CSVStreamingReader, write_csv,
22
+ WriteOptions, CSVWriter, InvalidRow)
env-llmeval/lib/python3.10/site-packages/pyarrow/cuda.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # flake8: noqa
19
+
20
+
21
+ from pyarrow._cuda import (Context, IpcMemHandle, CudaBuffer,
22
+ HostBuffer, BufferReader, BufferWriter,
23
+ new_host_buffer,
24
+ serialize_record_batch, read_message,
25
+ read_record_batch)
env-llmeval/lib/python3.10/site-packages/pyarrow/dataset.py ADDED
@@ -0,0 +1,1023 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ """Dataset is currently unstable. APIs subject to change without notice."""
19
+
20
+ import pyarrow as pa
21
+ from pyarrow.util import _is_iterable, _stringify_path, _is_path_like
22
+
23
+ try:
24
+ from pyarrow._dataset import ( # noqa
25
+ CsvFileFormat,
26
+ CsvFragmentScanOptions,
27
+ JsonFileFormat,
28
+ JsonFragmentScanOptions,
29
+ Dataset,
30
+ DatasetFactory,
31
+ DirectoryPartitioning,
32
+ FeatherFileFormat,
33
+ FilenamePartitioning,
34
+ FileFormat,
35
+ FileFragment,
36
+ FileSystemDataset,
37
+ FileSystemDatasetFactory,
38
+ FileSystemFactoryOptions,
39
+ FileWriteOptions,
40
+ Fragment,
41
+ FragmentScanOptions,
42
+ HivePartitioning,
43
+ IpcFileFormat,
44
+ IpcFileWriteOptions,
45
+ InMemoryDataset,
46
+ Partitioning,
47
+ PartitioningFactory,
48
+ Scanner,
49
+ TaggedRecordBatch,
50
+ UnionDataset,
51
+ UnionDatasetFactory,
52
+ WrittenFile,
53
+ get_partition_keys,
54
+ get_partition_keys as _get_partition_keys, # keep for backwards compatibility
55
+ _filesystemdataset_write,
56
+ )
57
+ except ImportError as exc:
58
+ raise ImportError(
59
+ f"The pyarrow installation is not built with support for 'dataset' ({str(exc)})"
60
+ ) from None
61
+
62
+ # keep Expression functionality exposed here for backwards compatibility
63
+ from pyarrow.compute import Expression, scalar, field # noqa
64
+
65
+
66
+ _orc_available = False
67
+ _orc_msg = (
68
+ "The pyarrow installation is not built with support for the ORC file "
69
+ "format."
70
+ )
71
+
72
+ try:
73
+ from pyarrow._dataset_orc import OrcFileFormat
74
+ _orc_available = True
75
+ except ImportError:
76
+ pass
77
+
78
+ _parquet_available = False
79
+ _parquet_msg = (
80
+ "The pyarrow installation is not built with support for the Parquet file "
81
+ "format."
82
+ )
83
+
84
+ try:
85
+ from pyarrow._dataset_parquet import ( # noqa
86
+ ParquetDatasetFactory,
87
+ ParquetFactoryOptions,
88
+ ParquetFileFormat,
89
+ ParquetFileFragment,
90
+ ParquetFileWriteOptions,
91
+ ParquetFragmentScanOptions,
92
+ ParquetReadOptions,
93
+ RowGroupInfo,
94
+ )
95
+ _parquet_available = True
96
+ except ImportError:
97
+ pass
98
+
99
+
100
+ try:
101
+ from pyarrow._dataset_parquet_encryption import ( # noqa
102
+ ParquetDecryptionConfig,
103
+ ParquetEncryptionConfig,
104
+ )
105
+ except ImportError:
106
+ pass
107
+
108
+
109
+ def __getattr__(name):
110
+ if name == "OrcFileFormat" and not _orc_available:
111
+ raise ImportError(_orc_msg)
112
+
113
+ if name == "ParquetFileFormat" and not _parquet_available:
114
+ raise ImportError(_parquet_msg)
115
+
116
+ raise AttributeError(
117
+ "module 'pyarrow.dataset' has no attribute '{0}'".format(name)
118
+ )
119
+
120
+
121
+ def partitioning(schema=None, field_names=None, flavor=None,
122
+ dictionaries=None):
123
+ """
124
+ Specify a partitioning scheme.
125
+
126
+ The supported schemes include:
127
+
128
+ - "DirectoryPartitioning": this scheme expects one segment in the file path
129
+ for each field in the specified schema (all fields are required to be
130
+ present). For example given schema<year:int16, month:int8> the path
131
+ "/2009/11" would be parsed to ("year"_ == 2009 and "month"_ == 11).
132
+ - "HivePartitioning": a scheme for "/$key=$value/" nested directories as
133
+ found in Apache Hive. This is a multi-level, directory based partitioning
134
+ scheme. Data is partitioned by static values of a particular column in
135
+ the schema. Partition keys are represented in the form $key=$value in
136
+ directory names. Field order is ignored, as are missing or unrecognized
137
+ field names.
138
+ For example, given schema<year:int16, month:int8, day:int8>, a possible
139
+ path would be "/year=2009/month=11/day=15" (but the field order does not
140
+ need to match).
141
+ - "FilenamePartitioning": this scheme expects the partitions will have
142
+ filenames containing the field values separated by "_".
143
+ For example, given schema<year:int16, month:int8, day:int8>, a possible
144
+ partition filename "2009_11_part-0.parquet" would be parsed
145
+ to ("year"_ == 2009 and "month"_ == 11).
146
+
147
+ Parameters
148
+ ----------
149
+ schema : pyarrow.Schema, default None
150
+ The schema that describes the partitions present in the file path.
151
+ If not specified, and `field_names` and/or `flavor` are specified,
152
+ the schema will be inferred from the file path (and a
153
+ PartitioningFactory is returned).
154
+ field_names : list of str, default None
155
+ A list of strings (field names). If specified, the schema's types are
156
+ inferred from the file paths (only valid for DirectoryPartitioning).
157
+ flavor : str, default None
158
+ The default is DirectoryPartitioning. Specify ``flavor="hive"`` for
159
+ a HivePartitioning, and ``flavor="filename"`` for a
160
+ FilenamePartitioning.
161
+ dictionaries : dict[str, Array]
162
+ If the type of any field of `schema` is a dictionary type, the
163
+ corresponding entry of `dictionaries` must be an array containing
164
+ every value which may be taken by the corresponding column or an
165
+ error will be raised in parsing. Alternatively, pass `infer` to have
166
+ Arrow discover the dictionary values, in which case a
167
+ PartitioningFactory is returned.
168
+
169
+ Returns
170
+ -------
171
+ Partitioning or PartitioningFactory
172
+ The partitioning scheme
173
+
174
+ Examples
175
+ --------
176
+
177
+ Specify the Schema for paths like "/2009/June":
178
+
179
+ >>> import pyarrow as pa
180
+ >>> import pyarrow.dataset as ds
181
+ >>> part = ds.partitioning(pa.schema([("year", pa.int16()),
182
+ ... ("month", pa.string())]))
183
+
184
+ or let the types be inferred by only specifying the field names:
185
+
186
+ >>> part = ds.partitioning(field_names=["year", "month"])
187
+
188
+ For paths like "/2009/June", the year will be inferred as int32 while month
189
+ will be inferred as string.
190
+
191
+ Specify a Schema with dictionary encoding, providing dictionary values:
192
+
193
+ >>> part = ds.partitioning(
194
+ ... pa.schema([
195
+ ... ("year", pa.int16()),
196
+ ... ("month", pa.dictionary(pa.int8(), pa.string()))
197
+ ... ]),
198
+ ... dictionaries={
199
+ ... "month": pa.array(["January", "February", "March"]),
200
+ ... })
201
+
202
+ Alternatively, specify a Schema with dictionary encoding, but have Arrow
203
+ infer the dictionary values:
204
+
205
+ >>> part = ds.partitioning(
206
+ ... pa.schema([
207
+ ... ("year", pa.int16()),
208
+ ... ("month", pa.dictionary(pa.int8(), pa.string()))
209
+ ... ]),
210
+ ... dictionaries="infer")
211
+
212
+ Create a Hive scheme for a path like "/year=2009/month=11":
213
+
214
+ >>> part = ds.partitioning(
215
+ ... pa.schema([("year", pa.int16()), ("month", pa.int8())]),
216
+ ... flavor="hive")
217
+
218
+ A Hive scheme can also be discovered from the directory structure (and
219
+ types will be inferred):
220
+
221
+ >>> part = ds.partitioning(flavor="hive")
222
+ """
223
+ if flavor is None:
224
+ # default flavor
225
+ if schema is not None:
226
+ if field_names is not None:
227
+ raise ValueError(
228
+ "Cannot specify both 'schema' and 'field_names'")
229
+ if dictionaries == 'infer':
230
+ return DirectoryPartitioning.discover(schema=schema)
231
+ return DirectoryPartitioning(schema, dictionaries)
232
+ elif field_names is not None:
233
+ if isinstance(field_names, list):
234
+ return DirectoryPartitioning.discover(field_names)
235
+ else:
236
+ raise ValueError(
237
+ "Expected list of field names, got {}".format(
238
+ type(field_names)))
239
+ else:
240
+ raise ValueError(
241
+ "For the default directory flavor, need to specify "
242
+ "a Schema or a list of field names")
243
+ if flavor == "filename":
244
+ if schema is not None:
245
+ if field_names is not None:
246
+ raise ValueError(
247
+ "Cannot specify both 'schema' and 'field_names'")
248
+ if dictionaries == 'infer':
249
+ return FilenamePartitioning.discover(schema=schema)
250
+ return FilenamePartitioning(schema, dictionaries)
251
+ elif field_names is not None:
252
+ if isinstance(field_names, list):
253
+ return FilenamePartitioning.discover(field_names)
254
+ else:
255
+ raise ValueError(
256
+ "Expected list of field names, got {}".format(
257
+ type(field_names)))
258
+ else:
259
+ raise ValueError(
260
+ "For the filename flavor, need to specify "
261
+ "a Schema or a list of field names")
262
+ elif flavor == 'hive':
263
+ if field_names is not None:
264
+ raise ValueError("Cannot specify 'field_names' for flavor 'hive'")
265
+ elif schema is not None:
266
+ if isinstance(schema, pa.Schema):
267
+ if dictionaries == 'infer':
268
+ return HivePartitioning.discover(schema=schema)
269
+ return HivePartitioning(schema, dictionaries)
270
+ else:
271
+ raise ValueError(
272
+ "Expected Schema for 'schema', got {}".format(
273
+ type(schema)))
274
+ else:
275
+ return HivePartitioning.discover()
276
+ else:
277
+ raise ValueError("Unsupported flavor")
278
+
279
+
280
+ def _ensure_partitioning(scheme):
281
+ """
282
+ Validate input and return a Partitioning(Factory).
283
+
284
+ It passes None through if no partitioning scheme is defined.
285
+ """
286
+ if scheme is None:
287
+ pass
288
+ elif isinstance(scheme, str):
289
+ scheme = partitioning(flavor=scheme)
290
+ elif isinstance(scheme, list):
291
+ scheme = partitioning(field_names=scheme)
292
+ elif isinstance(scheme, (Partitioning, PartitioningFactory)):
293
+ pass
294
+ else:
295
+ ValueError("Expected Partitioning or PartitioningFactory, got {}"
296
+ .format(type(scheme)))
297
+ return scheme
298
+
299
+
300
+ def _ensure_format(obj):
301
+ if isinstance(obj, FileFormat):
302
+ return obj
303
+ elif obj == "parquet":
304
+ if not _parquet_available:
305
+ raise ValueError(_parquet_msg)
306
+ return ParquetFileFormat()
307
+ elif obj in {"ipc", "arrow"}:
308
+ return IpcFileFormat()
309
+ elif obj == "feather":
310
+ return FeatherFileFormat()
311
+ elif obj == "csv":
312
+ return CsvFileFormat()
313
+ elif obj == "orc":
314
+ if not _orc_available:
315
+ raise ValueError(_orc_msg)
316
+ return OrcFileFormat()
317
+ elif obj == "json":
318
+ return JsonFileFormat()
319
+ else:
320
+ raise ValueError("format '{}' is not supported".format(obj))
321
+
322
+
323
+ def _ensure_multiple_sources(paths, filesystem=None):
324
+ """
325
+ Treat a list of paths as files belonging to a single file system
326
+
327
+ If the file system is local then also validates that all paths
328
+ are referencing existing *files* otherwise any non-file paths will be
329
+ silently skipped (for example on a remote filesystem).
330
+
331
+ Parameters
332
+ ----------
333
+ paths : list of path-like
334
+ Note that URIs are not allowed.
335
+ filesystem : FileSystem or str, optional
336
+ If an URI is passed, then its path component will act as a prefix for
337
+ the file paths.
338
+
339
+ Returns
340
+ -------
341
+ (FileSystem, list of str)
342
+ File system object and a list of normalized paths.
343
+
344
+ Raises
345
+ ------
346
+ TypeError
347
+ If the passed filesystem has wrong type.
348
+ IOError
349
+ If the file system is local and a referenced path is not available or
350
+ not a file.
351
+ """
352
+ from pyarrow.fs import (
353
+ LocalFileSystem, SubTreeFileSystem, _MockFileSystem, FileType,
354
+ _ensure_filesystem
355
+ )
356
+
357
+ if filesystem is None:
358
+ # fall back to local file system as the default
359
+ filesystem = LocalFileSystem()
360
+ else:
361
+ # construct a filesystem if it is a valid URI
362
+ filesystem = _ensure_filesystem(filesystem)
363
+
364
+ is_local = (
365
+ isinstance(filesystem, (LocalFileSystem, _MockFileSystem)) or
366
+ (isinstance(filesystem, SubTreeFileSystem) and
367
+ isinstance(filesystem.base_fs, LocalFileSystem))
368
+ )
369
+
370
+ # allow normalizing irregular paths such as Windows local paths
371
+ paths = [filesystem.normalize_path(_stringify_path(p)) for p in paths]
372
+
373
+ # validate that all of the paths are pointing to existing *files*
374
+ # possible improvement is to group the file_infos by type and raise for
375
+ # multiple paths per error category
376
+ if is_local:
377
+ for info in filesystem.get_file_info(paths):
378
+ file_type = info.type
379
+ if file_type == FileType.File:
380
+ continue
381
+ elif file_type == FileType.NotFound:
382
+ raise FileNotFoundError(info.path)
383
+ elif file_type == FileType.Directory:
384
+ raise IsADirectoryError(
385
+ 'Path {} points to a directory, but only file paths are '
386
+ 'supported. To construct a nested or union dataset pass '
387
+ 'a list of dataset objects instead.'.format(info.path)
388
+ )
389
+ else:
390
+ raise IOError(
391
+ 'Path {} exists but its type is unknown (could be a '
392
+ 'special file such as a Unix socket or character device, '
393
+ 'or Windows NUL / CON / ...)'.format(info.path)
394
+ )
395
+
396
+ return filesystem, paths
397
+
398
+
399
+ def _ensure_single_source(path, filesystem=None):
400
+ """
401
+ Treat path as either a recursively traversable directory or a single file.
402
+
403
+ Parameters
404
+ ----------
405
+ path : path-like
406
+ filesystem : FileSystem or str, optional
407
+ If an URI is passed, then its path component will act as a prefix for
408
+ the file paths.
409
+
410
+ Returns
411
+ -------
412
+ (FileSystem, list of str or fs.Selector)
413
+ File system object and either a single item list pointing to a file or
414
+ an fs.Selector object pointing to a directory.
415
+
416
+ Raises
417
+ ------
418
+ TypeError
419
+ If the passed filesystem has wrong type.
420
+ FileNotFoundError
421
+ If the referenced file or directory doesn't exist.
422
+ """
423
+ from pyarrow.fs import FileType, FileSelector, _resolve_filesystem_and_path
424
+
425
+ # at this point we already checked that `path` is a path-like
426
+ filesystem, path = _resolve_filesystem_and_path(path, filesystem)
427
+
428
+ # ensure that the path is normalized before passing to dataset discovery
429
+ path = filesystem.normalize_path(path)
430
+
431
+ # retrieve the file descriptor
432
+ file_info = filesystem.get_file_info(path)
433
+
434
+ # depending on the path type either return with a recursive
435
+ # directory selector or as a list containing a single file
436
+ if file_info.type == FileType.Directory:
437
+ paths_or_selector = FileSelector(path, recursive=True)
438
+ elif file_info.type == FileType.File:
439
+ paths_or_selector = [path]
440
+ else:
441
+ raise FileNotFoundError(path)
442
+
443
+ return filesystem, paths_or_selector
444
+
445
+
446
+ def _filesystem_dataset(source, schema=None, filesystem=None,
447
+ partitioning=None, format=None,
448
+ partition_base_dir=None, exclude_invalid_files=None,
449
+ selector_ignore_prefixes=None):
450
+ """
451
+ Create a FileSystemDataset which can be used to build a Dataset.
452
+
453
+ Parameters are documented in the dataset function.
454
+
455
+ Returns
456
+ -------
457
+ FileSystemDataset
458
+ """
459
+ format = _ensure_format(format or 'parquet')
460
+ partitioning = _ensure_partitioning(partitioning)
461
+
462
+ if isinstance(source, (list, tuple)):
463
+ fs, paths_or_selector = _ensure_multiple_sources(source, filesystem)
464
+ else:
465
+ fs, paths_or_selector = _ensure_single_source(source, filesystem)
466
+
467
+ options = FileSystemFactoryOptions(
468
+ partitioning=partitioning,
469
+ partition_base_dir=partition_base_dir,
470
+ exclude_invalid_files=exclude_invalid_files,
471
+ selector_ignore_prefixes=selector_ignore_prefixes
472
+ )
473
+ factory = FileSystemDatasetFactory(fs, paths_or_selector, format, options)
474
+
475
+ return factory.finish(schema)
476
+
477
+
478
+ def _in_memory_dataset(source, schema=None, **kwargs):
479
+ if any(v is not None for v in kwargs.values()):
480
+ raise ValueError(
481
+ "For in-memory datasets, you cannot pass any additional arguments")
482
+ return InMemoryDataset(source, schema)
483
+
484
+
485
+ def _union_dataset(children, schema=None, **kwargs):
486
+ if any(v is not None for v in kwargs.values()):
487
+ raise ValueError(
488
+ "When passing a list of Datasets, you cannot pass any additional "
489
+ "arguments"
490
+ )
491
+
492
+ if schema is None:
493
+ # unify the children datasets' schemas
494
+ schema = pa.unify_schemas([child.schema for child in children])
495
+
496
+ for child in children:
497
+ if getattr(child, "_scan_options", None):
498
+ raise ValueError(
499
+ "Creating an UnionDataset from filtered or projected Datasets "
500
+ "is currently not supported. Union the unfiltered datasets "
501
+ "and apply the filter to the resulting union."
502
+ )
503
+
504
+ # create datasets with the requested schema
505
+ children = [child.replace_schema(schema) for child in children]
506
+
507
+ return UnionDataset(schema, children)
508
+
509
+
510
+ def parquet_dataset(metadata_path, schema=None, filesystem=None, format=None,
511
+ partitioning=None, partition_base_dir=None):
512
+ """
513
+ Create a FileSystemDataset from a `_metadata` file created via
514
+ `pyarrow.parquet.write_metadata`.
515
+
516
+ Parameters
517
+ ----------
518
+ metadata_path : path,
519
+ Path pointing to a single file parquet metadata file
520
+ schema : Schema, optional
521
+ Optionally provide the Schema for the Dataset, in which case it will
522
+ not be inferred from the source.
523
+ filesystem : FileSystem or URI string, default None
524
+ If a single path is given as source and filesystem is None, then the
525
+ filesystem will be inferred from the path.
526
+ If an URI string is passed, then a filesystem object is constructed
527
+ using the URI's optional path component as a directory prefix. See the
528
+ examples below.
529
+ Note that the URIs on Windows must follow 'file:///C:...' or
530
+ 'file:/C:...' patterns.
531
+ format : ParquetFileFormat
532
+ An instance of a ParquetFileFormat if special options needs to be
533
+ passed.
534
+ partitioning : Partitioning, PartitioningFactory, str, list of str
535
+ The partitioning scheme specified with the ``partitioning()``
536
+ function. A flavor string can be used as shortcut, and with a list of
537
+ field names a DirectoryPartitioning will be inferred.
538
+ partition_base_dir : str, optional
539
+ For the purposes of applying the partitioning, paths will be
540
+ stripped of the partition_base_dir. Files not matching the
541
+ partition_base_dir prefix will be skipped for partitioning discovery.
542
+ The ignored files will still be part of the Dataset, but will not
543
+ have partition information.
544
+
545
+ Returns
546
+ -------
547
+ FileSystemDataset
548
+ The dataset corresponding to the given metadata
549
+ """
550
+ from pyarrow.fs import LocalFileSystem, _ensure_filesystem
551
+
552
+ if format is None:
553
+ format = ParquetFileFormat()
554
+ elif not isinstance(format, ParquetFileFormat):
555
+ raise ValueError("format argument must be a ParquetFileFormat")
556
+
557
+ if filesystem is None:
558
+ filesystem = LocalFileSystem()
559
+ else:
560
+ filesystem = _ensure_filesystem(filesystem)
561
+
562
+ metadata_path = filesystem.normalize_path(_stringify_path(metadata_path))
563
+ options = ParquetFactoryOptions(
564
+ partition_base_dir=partition_base_dir,
565
+ partitioning=_ensure_partitioning(partitioning)
566
+ )
567
+
568
+ factory = ParquetDatasetFactory(
569
+ metadata_path, filesystem, format, options=options)
570
+ return factory.finish(schema)
571
+
572
+
573
+ def dataset(source, schema=None, format=None, filesystem=None,
574
+ partitioning=None, partition_base_dir=None,
575
+ exclude_invalid_files=None, ignore_prefixes=None):
576
+ """
577
+ Open a dataset.
578
+
579
+ Datasets provides functionality to efficiently work with tabular,
580
+ potentially larger than memory and multi-file dataset.
581
+
582
+ - A unified interface for different sources, like Parquet and Feather
583
+ - Discovery of sources (crawling directories, handle directory-based
584
+ partitioned datasets, basic schema normalization)
585
+ - Optimized reading with predicate pushdown (filtering rows), projection
586
+ (selecting columns), parallel reading or fine-grained managing of tasks.
587
+
588
+ Note that this is the high-level API, to have more control over the dataset
589
+ construction use the low-level API classes (FileSystemDataset,
590
+ FilesystemDatasetFactory, etc.)
591
+
592
+ Parameters
593
+ ----------
594
+ source : path, list of paths, dataset, list of datasets, (list of) \
595
+ RecordBatch or Table, iterable of RecordBatch, RecordBatchReader, or URI
596
+ Path pointing to a single file:
597
+ Open a FileSystemDataset from a single file.
598
+ Path pointing to a directory:
599
+ The directory gets discovered recursively according to a
600
+ partitioning scheme if given.
601
+ List of file paths:
602
+ Create a FileSystemDataset from explicitly given files. The files
603
+ must be located on the same filesystem given by the filesystem
604
+ parameter.
605
+ Note that in contrary of construction from a single file, passing
606
+ URIs as paths is not allowed.
607
+ List of datasets:
608
+ A nested UnionDataset gets constructed, it allows arbitrary
609
+ composition of other datasets.
610
+ Note that additional keyword arguments are not allowed.
611
+ (List of) batches or tables, iterable of batches, or RecordBatchReader:
612
+ Create an InMemoryDataset. If an iterable or empty list is given,
613
+ a schema must also be given. If an iterable or RecordBatchReader
614
+ is given, the resulting dataset can only be scanned once; further
615
+ attempts will raise an error.
616
+ schema : Schema, optional
617
+ Optionally provide the Schema for the Dataset, in which case it will
618
+ not be inferred from the source.
619
+ format : FileFormat or str
620
+ Currently "parquet", "ipc"/"arrow"/"feather", "csv", "json", and "orc" are
621
+ supported. For Feather, only version 2 files are supported.
622
+ filesystem : FileSystem or URI string, default None
623
+ If a single path is given as source and filesystem is None, then the
624
+ filesystem will be inferred from the path.
625
+ If an URI string is passed, then a filesystem object is constructed
626
+ using the URI's optional path component as a directory prefix. See the
627
+ examples below.
628
+ Note that the URIs on Windows must follow 'file:///C:...' or
629
+ 'file:/C:...' patterns.
630
+ partitioning : Partitioning, PartitioningFactory, str, list of str
631
+ The partitioning scheme specified with the ``partitioning()``
632
+ function. A flavor string can be used as shortcut, and with a list of
633
+ field names a DirectoryPartitioning will be inferred.
634
+ partition_base_dir : str, optional
635
+ For the purposes of applying the partitioning, paths will be
636
+ stripped of the partition_base_dir. Files not matching the
637
+ partition_base_dir prefix will be skipped for partitioning discovery.
638
+ The ignored files will still be part of the Dataset, but will not
639
+ have partition information.
640
+ exclude_invalid_files : bool, optional (default True)
641
+ If True, invalid files will be excluded (file format specific check).
642
+ This will incur IO for each files in a serial and single threaded
643
+ fashion. Disabling this feature will skip the IO, but unsupported
644
+ files may be present in the Dataset (resulting in an error at scan
645
+ time).
646
+ ignore_prefixes : list, optional
647
+ Files matching any of these prefixes will be ignored by the
648
+ discovery process. This is matched to the basename of a path.
649
+ By default this is ['.', '_'].
650
+ Note that discovery happens only if a directory is passed as source.
651
+
652
+ Returns
653
+ -------
654
+ dataset : Dataset
655
+ Either a FileSystemDataset or a UnionDataset depending on the source
656
+ parameter.
657
+
658
+ Examples
659
+ --------
660
+ Creating an example Table:
661
+
662
+ >>> import pyarrow as pa
663
+ >>> import pyarrow.parquet as pq
664
+ >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
665
+ ... 'n_legs': [2, 2, 4, 4, 5, 100],
666
+ ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
667
+ ... "Brittle stars", "Centipede"]})
668
+ >>> pq.write_table(table, "file.parquet")
669
+
670
+ Opening a single file:
671
+
672
+ >>> import pyarrow.dataset as ds
673
+ >>> dataset = ds.dataset("file.parquet", format="parquet")
674
+ >>> dataset.to_table()
675
+ pyarrow.Table
676
+ year: int64
677
+ n_legs: int64
678
+ animal: string
679
+ ----
680
+ year: [[2020,2022,2021,2022,2019,2021]]
681
+ n_legs: [[2,2,4,4,5,100]]
682
+ animal: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]]
683
+
684
+ Opening a single file with an explicit schema:
685
+
686
+ >>> myschema = pa.schema([
687
+ ... ('n_legs', pa.int64()),
688
+ ... ('animal', pa.string())])
689
+ >>> dataset = ds.dataset("file.parquet", schema=myschema, format="parquet")
690
+ >>> dataset.to_table()
691
+ pyarrow.Table
692
+ n_legs: int64
693
+ animal: string
694
+ ----
695
+ n_legs: [[2,2,4,4,5,100]]
696
+ animal: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]]
697
+
698
+ Opening a dataset for a single directory:
699
+
700
+ >>> ds.write_dataset(table, "partitioned_dataset", format="parquet",
701
+ ... partitioning=['year'])
702
+ >>> dataset = ds.dataset("partitioned_dataset", format="parquet")
703
+ >>> dataset.to_table()
704
+ pyarrow.Table
705
+ n_legs: int64
706
+ animal: string
707
+ ----
708
+ n_legs: [[5],[2],[4,100],[2,4]]
709
+ animal: [["Brittle stars"],["Flamingo"],...["Parrot","Horse"]]
710
+
711
+ For a single directory from a S3 bucket:
712
+
713
+ >>> ds.dataset("s3://mybucket/nyc-taxi/",
714
+ ... format="parquet") # doctest: +SKIP
715
+
716
+ Opening a dataset from a list of relatives local paths:
717
+
718
+ >>> dataset = ds.dataset([
719
+ ... "partitioned_dataset/2019/part-0.parquet",
720
+ ... "partitioned_dataset/2020/part-0.parquet",
721
+ ... "partitioned_dataset/2021/part-0.parquet",
722
+ ... ], format='parquet')
723
+ >>> dataset.to_table()
724
+ pyarrow.Table
725
+ n_legs: int64
726
+ animal: string
727
+ ----
728
+ n_legs: [[5],[2],[4,100]]
729
+ animal: [["Brittle stars"],["Flamingo"],["Dog","Centipede"]]
730
+
731
+ With filesystem provided:
732
+
733
+ >>> paths = [
734
+ ... 'part0/data.parquet',
735
+ ... 'part1/data.parquet',
736
+ ... 'part3/data.parquet',
737
+ ... ]
738
+ >>> ds.dataset(paths, filesystem='file:///directory/prefix,
739
+ ... format='parquet') # doctest: +SKIP
740
+
741
+ Which is equivalent with:
742
+
743
+ >>> fs = SubTreeFileSystem("/directory/prefix",
744
+ ... LocalFileSystem()) # doctest: +SKIP
745
+ >>> ds.dataset(paths, filesystem=fs, format='parquet') # doctest: +SKIP
746
+
747
+ With a remote filesystem URI:
748
+
749
+ >>> paths = [
750
+ ... 'nested/directory/part0/data.parquet',
751
+ ... 'nested/directory/part1/data.parquet',
752
+ ... 'nested/directory/part3/data.parquet',
753
+ ... ]
754
+ >>> ds.dataset(paths, filesystem='s3://bucket/',
755
+ ... format='parquet') # doctest: +SKIP
756
+
757
+ Similarly to the local example, the directory prefix may be included in the
758
+ filesystem URI:
759
+
760
+ >>> ds.dataset(paths, filesystem='s3://bucket/nested/directory',
761
+ ... format='parquet') # doctest: +SKIP
762
+
763
+ Construction of a nested dataset:
764
+
765
+ >>> ds.dataset([
766
+ ... dataset("s3://old-taxi-data", format="parquet"),
767
+ ... dataset("local/path/to/data", format="ipc")
768
+ ... ]) # doctest: +SKIP
769
+ """
770
+ # collect the keyword arguments for later reuse
771
+ kwargs = dict(
772
+ schema=schema,
773
+ filesystem=filesystem,
774
+ partitioning=partitioning,
775
+ format=format,
776
+ partition_base_dir=partition_base_dir,
777
+ exclude_invalid_files=exclude_invalid_files,
778
+ selector_ignore_prefixes=ignore_prefixes
779
+ )
780
+
781
+ if _is_path_like(source):
782
+ return _filesystem_dataset(source, **kwargs)
783
+ elif isinstance(source, (tuple, list)):
784
+ if all(_is_path_like(elem) for elem in source):
785
+ return _filesystem_dataset(source, **kwargs)
786
+ elif all(isinstance(elem, Dataset) for elem in source):
787
+ return _union_dataset(source, **kwargs)
788
+ elif all(isinstance(elem, (pa.RecordBatch, pa.Table))
789
+ for elem in source):
790
+ return _in_memory_dataset(source, **kwargs)
791
+ else:
792
+ unique_types = set(type(elem).__name__ for elem in source)
793
+ type_names = ', '.join('{}'.format(t) for t in unique_types)
794
+ raise TypeError(
795
+ 'Expected a list of path-like or dataset objects, or a list '
796
+ 'of batches or tables. The given list contains the following '
797
+ 'types: {}'.format(type_names)
798
+ )
799
+ elif isinstance(source, (pa.RecordBatch, pa.Table)):
800
+ return _in_memory_dataset(source, **kwargs)
801
+ else:
802
+ raise TypeError(
803
+ 'Expected a path-like, list of path-likes or a list of Datasets '
804
+ 'instead of the given type: {}'.format(type(source).__name__)
805
+ )
806
+
807
+
808
+ def _ensure_write_partitioning(part, schema, flavor):
809
+ if isinstance(part, PartitioningFactory):
810
+ raise ValueError("A PartitioningFactory cannot be used. "
811
+ "Did you call the partitioning function "
812
+ "without supplying a schema?")
813
+
814
+ if isinstance(part, Partitioning) and flavor:
815
+ raise ValueError(
816
+ "Providing a partitioning_flavor with "
817
+ "a Partitioning object is not supported"
818
+ )
819
+ elif isinstance(part, (tuple, list)):
820
+ # Name of fields were provided instead of a partitioning object.
821
+ # Create a partitioning factory with those field names.
822
+ part = partitioning(
823
+ schema=pa.schema([schema.field(f) for f in part]),
824
+ flavor=flavor
825
+ )
826
+ elif part is None:
827
+ part = partitioning(pa.schema([]), flavor=flavor)
828
+
829
+ if not isinstance(part, Partitioning):
830
+ raise ValueError(
831
+ "partitioning must be a Partitioning object or "
832
+ "a list of column names"
833
+ )
834
+
835
+ return part
836
+
837
+
838
+ def write_dataset(data, base_dir, *, basename_template=None, format=None,
839
+ partitioning=None, partitioning_flavor=None, schema=None,
840
+ filesystem=None, file_options=None, use_threads=True,
841
+ max_partitions=None, max_open_files=None,
842
+ max_rows_per_file=None, min_rows_per_group=None,
843
+ max_rows_per_group=None, file_visitor=None,
844
+ existing_data_behavior='error', create_dir=True):
845
+ """
846
+ Write a dataset to a given format and partitioning.
847
+
848
+ Parameters
849
+ ----------
850
+ data : Dataset, Table/RecordBatch, RecordBatchReader, list of \
851
+ Table/RecordBatch, or iterable of RecordBatch
852
+ The data to write. This can be a Dataset instance or
853
+ in-memory Arrow data. If an iterable is given, the schema must
854
+ also be given.
855
+ base_dir : str
856
+ The root directory where to write the dataset.
857
+ basename_template : str, optional
858
+ A template string used to generate basenames of written data files.
859
+ The token '{i}' will be replaced with an automatically incremented
860
+ integer. If not specified, it defaults to
861
+ "part-{i}." + format.default_extname
862
+ format : FileFormat or str
863
+ The format in which to write the dataset. Currently supported:
864
+ "parquet", "ipc"/"arrow"/"feather", and "csv". If a FileSystemDataset
865
+ is being written and `format` is not specified, it defaults to the
866
+ same format as the specified FileSystemDataset. When writing a
867
+ Table or RecordBatch, this keyword is required.
868
+ partitioning : Partitioning or list[str], optional
869
+ The partitioning scheme specified with the ``partitioning()``
870
+ function or a list of field names. When providing a list of
871
+ field names, you can use ``partitioning_flavor`` to drive which
872
+ partitioning type should be used.
873
+ partitioning_flavor : str, optional
874
+ One of the partitioning flavors supported by
875
+ ``pyarrow.dataset.partitioning``. If omitted will use the
876
+ default of ``partitioning()`` which is directory partitioning.
877
+ schema : Schema, optional
878
+ filesystem : FileSystem, optional
879
+ file_options : pyarrow.dataset.FileWriteOptions, optional
880
+ FileFormat specific write options, created using the
881
+ ``FileFormat.make_write_options()`` function.
882
+ use_threads : bool, default True
883
+ Write files in parallel. If enabled, then maximum parallelism will be
884
+ used determined by the number of available CPU cores.
885
+ max_partitions : int, default 1024
886
+ Maximum number of partitions any batch may be written into.
887
+ max_open_files : int, default 1024
888
+ If greater than 0 then this will limit the maximum number of
889
+ files that can be left open. If an attempt is made to open
890
+ too many files then the least recently used file will be closed.
891
+ If this setting is set too low you may end up fragmenting your
892
+ data into many small files.
893
+ max_rows_per_file : int, default 0
894
+ Maximum number of rows per file. If greater than 0 then this will
895
+ limit how many rows are placed in any single file. Otherwise there
896
+ will be no limit and one file will be created in each output
897
+ directory unless files need to be closed to respect max_open_files
898
+ min_rows_per_group : int, default 0
899
+ Minimum number of rows per group. When the value is greater than 0,
900
+ the dataset writer will batch incoming data and only write the row
901
+ groups to the disk when sufficient rows have accumulated.
902
+ max_rows_per_group : int, default 1024 * 1024
903
+ Maximum number of rows per group. If the value is greater than 0,
904
+ then the dataset writer may split up large incoming batches into
905
+ multiple row groups. If this value is set, then min_rows_per_group
906
+ should also be set. Otherwise it could end up with very small row
907
+ groups.
908
+ file_visitor : function
909
+ If set, this function will be called with a WrittenFile instance
910
+ for each file created during the call. This object will have both
911
+ a path attribute and a metadata attribute.
912
+
913
+ The path attribute will be a string containing the path to
914
+ the created file.
915
+
916
+ The metadata attribute will be the parquet metadata of the file.
917
+ This metadata will have the file path attribute set and can be used
918
+ to build a _metadata file. The metadata attribute will be None if
919
+ the format is not parquet.
920
+
921
+ Example visitor which simple collects the filenames created::
922
+
923
+ visited_paths = []
924
+
925
+ def file_visitor(written_file):
926
+ visited_paths.append(written_file.path)
927
+ existing_data_behavior : 'error' | 'overwrite_or_ignore' | \
928
+ 'delete_matching'
929
+ Controls how the dataset will handle data that already exists in
930
+ the destination. The default behavior ('error') is to raise an error
931
+ if any data exists in the destination.
932
+
933
+ 'overwrite_or_ignore' will ignore any existing data and will
934
+ overwrite files with the same name as an output file. Other
935
+ existing files will be ignored. This behavior, in combination
936
+ with a unique basename_template for each write, will allow for
937
+ an append workflow.
938
+
939
+ 'delete_matching' is useful when you are writing a partitioned
940
+ dataset. The first time each partition directory is encountered
941
+ the entire directory will be deleted. This allows you to overwrite
942
+ old partitions completely.
943
+ create_dir : bool, default True
944
+ If False, directories will not be created. This can be useful for
945
+ filesystems that do not require directories.
946
+ """
947
+ from pyarrow.fs import _resolve_filesystem_and_path
948
+
949
+ if isinstance(data, (list, tuple)):
950
+ schema = schema or data[0].schema
951
+ data = InMemoryDataset(data, schema=schema)
952
+ elif isinstance(data, (pa.RecordBatch, pa.Table)):
953
+ schema = schema or data.schema
954
+ data = InMemoryDataset(data, schema=schema)
955
+ elif isinstance(data, pa.ipc.RecordBatchReader) or _is_iterable(data):
956
+ data = Scanner.from_batches(data, schema=schema)
957
+ schema = None
958
+ elif not isinstance(data, (Dataset, Scanner)):
959
+ raise ValueError(
960
+ "Only Dataset, Scanner, Table/RecordBatch, RecordBatchReader, "
961
+ "a list of Tables/RecordBatches, or iterable of batches are "
962
+ "supported."
963
+ )
964
+
965
+ if format is None and isinstance(data, FileSystemDataset):
966
+ format = data.format
967
+ else:
968
+ format = _ensure_format(format)
969
+
970
+ if file_options is None:
971
+ file_options = format.make_write_options()
972
+
973
+ if format != file_options.format:
974
+ raise TypeError("Supplied FileWriteOptions have format {}, "
975
+ "which doesn't match supplied FileFormat {}".format(
976
+ format, file_options))
977
+
978
+ if basename_template is None:
979
+ basename_template = "part-{i}." + format.default_extname
980
+
981
+ if max_partitions is None:
982
+ max_partitions = 1024
983
+
984
+ if max_open_files is None:
985
+ max_open_files = 1024
986
+
987
+ if max_rows_per_file is None:
988
+ max_rows_per_file = 0
989
+
990
+ if max_rows_per_group is None:
991
+ max_rows_per_group = 1 << 20
992
+
993
+ if min_rows_per_group is None:
994
+ min_rows_per_group = 0
995
+
996
+ # at this point data is a Scanner or a Dataset, anything else
997
+ # was converted to one of those two. So we can grab the schema
998
+ # to build the partitioning object from Dataset.
999
+ if isinstance(data, Scanner):
1000
+ partitioning_schema = data.projected_schema
1001
+ else:
1002
+ partitioning_schema = data.schema
1003
+ partitioning = _ensure_write_partitioning(partitioning,
1004
+ schema=partitioning_schema,
1005
+ flavor=partitioning_flavor)
1006
+
1007
+ filesystem, base_dir = _resolve_filesystem_and_path(base_dir, filesystem)
1008
+
1009
+ if isinstance(data, Dataset):
1010
+ scanner = data.scanner(use_threads=use_threads)
1011
+ else:
1012
+ # scanner was passed directly by the user, in which case a schema
1013
+ # cannot be passed
1014
+ if schema is not None:
1015
+ raise ValueError("Cannot specify a schema when writing a Scanner")
1016
+ scanner = data
1017
+
1018
+ _filesystemdataset_write(
1019
+ scanner, base_dir, basename_template, filesystem, partitioning,
1020
+ file_options, max_partitions, file_visitor, existing_data_behavior,
1021
+ max_open_files, max_rows_per_file,
1022
+ min_rows_per_group, max_rows_per_group, create_dir
1023
+ )
env-llmeval/lib/python3.10/site-packages/pyarrow/feather.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ import os
20
+
21
+ from pyarrow.pandas_compat import _pandas_api # noqa
22
+ from pyarrow.lib import (Codec, Table, # noqa
23
+ concat_tables, schema)
24
+ import pyarrow.lib as ext
25
+ from pyarrow import _feather
26
+ from pyarrow._feather import FeatherError # noqa: F401
27
+
28
+
29
+ class FeatherDataset:
30
+ """
31
+ Encapsulates details of reading a list of Feather files.
32
+
33
+ Parameters
34
+ ----------
35
+ path_or_paths : List[str]
36
+ A list of file names
37
+ validate_schema : bool, default True
38
+ Check that individual file schemas are all the same / compatible
39
+ """
40
+
41
+ def __init__(self, path_or_paths, validate_schema=True):
42
+ self.paths = path_or_paths
43
+ self.validate_schema = validate_schema
44
+
45
+ def read_table(self, columns=None):
46
+ """
47
+ Read multiple feather files as a single pyarrow.Table
48
+
49
+ Parameters
50
+ ----------
51
+ columns : List[str]
52
+ Names of columns to read from the file
53
+
54
+ Returns
55
+ -------
56
+ pyarrow.Table
57
+ Content of the file as a table (of columns)
58
+ """
59
+ _fil = read_table(self.paths[0], columns=columns)
60
+ self._tables = [_fil]
61
+ self.schema = _fil.schema
62
+
63
+ for path in self.paths[1:]:
64
+ table = read_table(path, columns=columns)
65
+ if self.validate_schema:
66
+ self.validate_schemas(path, table)
67
+ self._tables.append(table)
68
+ return concat_tables(self._tables)
69
+
70
+ def validate_schemas(self, piece, table):
71
+ if not self.schema.equals(table.schema):
72
+ raise ValueError('Schema in {!s} was different. \n'
73
+ '{!s}\n\nvs\n\n{!s}'
74
+ .format(piece, self.schema,
75
+ table.schema))
76
+
77
+ def read_pandas(self, columns=None, use_threads=True):
78
+ """
79
+ Read multiple Parquet files as a single pandas DataFrame
80
+
81
+ Parameters
82
+ ----------
83
+ columns : List[str]
84
+ Names of columns to read from the file
85
+ use_threads : bool, default True
86
+ Use multiple threads when converting to pandas
87
+
88
+ Returns
89
+ -------
90
+ pandas.DataFrame
91
+ Content of the file as a pandas DataFrame (of columns)
92
+ """
93
+ return self.read_table(columns=columns).to_pandas(
94
+ use_threads=use_threads)
95
+
96
+
97
+ def check_chunked_overflow(name, col):
98
+ if col.num_chunks == 1:
99
+ return
100
+
101
+ if col.type in (ext.binary(), ext.string()):
102
+ raise ValueError("Column '{}' exceeds 2GB maximum capacity of "
103
+ "a Feather binary column. This restriction may be "
104
+ "lifted in the future".format(name))
105
+ else:
106
+ # TODO(wesm): Not sure when else this might be reached
107
+ raise ValueError("Column '{}' of type {} was chunked on conversion "
108
+ "to Arrow and cannot be currently written to "
109
+ "Feather format".format(name, str(col.type)))
110
+
111
+
112
+ _FEATHER_SUPPORTED_CODECS = {'lz4', 'zstd', 'uncompressed'}
113
+
114
+
115
+ def write_feather(df, dest, compression=None, compression_level=None,
116
+ chunksize=None, version=2):
117
+ """
118
+ Write a pandas.DataFrame to Feather format.
119
+
120
+ Parameters
121
+ ----------
122
+ df : pandas.DataFrame or pyarrow.Table
123
+ Data to write out as Feather format.
124
+ dest : str
125
+ Local destination path.
126
+ compression : string, default None
127
+ Can be one of {"zstd", "lz4", "uncompressed"}. The default of None uses
128
+ LZ4 for V2 files if it is available, otherwise uncompressed.
129
+ compression_level : int, default None
130
+ Use a compression level particular to the chosen compressor. If None
131
+ use the default compression level
132
+ chunksize : int, default None
133
+ For V2 files, the internal maximum size of Arrow RecordBatch chunks
134
+ when writing the Arrow IPC file format. None means use the default,
135
+ which is currently 64K
136
+ version : int, default 2
137
+ Feather file version. Version 2 is the current. Version 1 is the more
138
+ limited legacy format
139
+ """
140
+ if _pandas_api.have_pandas:
141
+ if (_pandas_api.has_sparse and
142
+ isinstance(df, _pandas_api.pd.SparseDataFrame)):
143
+ df = df.to_dense()
144
+
145
+ if _pandas_api.is_data_frame(df):
146
+ # Feather v1 creates a new column in the resultant Table to
147
+ # store index information if index type is not RangeIndex
148
+
149
+ if version == 1:
150
+ preserve_index = False
151
+ elif version == 2:
152
+ preserve_index = None
153
+ else:
154
+ raise ValueError("Version value should either be 1 or 2")
155
+
156
+ table = Table.from_pandas(df, preserve_index=preserve_index)
157
+
158
+ if version == 1:
159
+ # Version 1 does not chunking
160
+ for i, name in enumerate(table.schema.names):
161
+ col = table[i]
162
+ check_chunked_overflow(name, col)
163
+ else:
164
+ table = df
165
+
166
+ if version == 1:
167
+ if len(table.column_names) > len(set(table.column_names)):
168
+ raise ValueError("cannot serialize duplicate column names")
169
+
170
+ if compression is not None:
171
+ raise ValueError("Feather V1 files do not support compression "
172
+ "option")
173
+
174
+ if chunksize is not None:
175
+ raise ValueError("Feather V1 files do not support chunksize "
176
+ "option")
177
+ else:
178
+ if compression is None and Codec.is_available('lz4_frame'):
179
+ compression = 'lz4'
180
+ elif (compression is not None and
181
+ compression not in _FEATHER_SUPPORTED_CODECS):
182
+ raise ValueError('compression="{}" not supported, must be '
183
+ 'one of {}'.format(compression,
184
+ _FEATHER_SUPPORTED_CODECS))
185
+
186
+ try:
187
+ _feather.write_feather(table, dest, compression=compression,
188
+ compression_level=compression_level,
189
+ chunksize=chunksize, version=version)
190
+ except Exception:
191
+ if isinstance(dest, str):
192
+ try:
193
+ os.remove(dest)
194
+ except os.error:
195
+ pass
196
+ raise
197
+
198
+
199
+ def read_feather(source, columns=None, use_threads=True,
200
+ memory_map=False, **kwargs):
201
+ """
202
+ Read a pandas.DataFrame from Feather format. To read as pyarrow.Table use
203
+ feather.read_table.
204
+
205
+ Parameters
206
+ ----------
207
+ source : str file path, or file-like object
208
+ You can use MemoryMappedFile as source, for explicitly use memory map.
209
+ columns : sequence, optional
210
+ Only read a specific set of columns. If not provided, all columns are
211
+ read.
212
+ use_threads : bool, default True
213
+ Whether to parallelize reading using multiple threads. If false the
214
+ restriction is used in the conversion to Pandas as well as in the
215
+ reading from Feather format.
216
+ memory_map : boolean, default False
217
+ Use memory mapping when opening file on disk, when source is a str.
218
+ **kwargs
219
+ Additional keyword arguments passed on to `pyarrow.Table.to_pandas`.
220
+
221
+ Returns
222
+ -------
223
+ df : pandas.DataFrame
224
+ The contents of the Feather file as a pandas.DataFrame
225
+ """
226
+ return (read_table(
227
+ source, columns=columns, memory_map=memory_map,
228
+ use_threads=use_threads).to_pandas(use_threads=use_threads, **kwargs))
229
+
230
+
231
+ def read_table(source, columns=None, memory_map=False, use_threads=True):
232
+ """
233
+ Read a pyarrow.Table from Feather format
234
+
235
+ Parameters
236
+ ----------
237
+ source : str file path, or file-like object
238
+ You can use MemoryMappedFile as source, for explicitly use memory map.
239
+ columns : sequence, optional
240
+ Only read a specific set of columns. If not provided, all columns are
241
+ read.
242
+ memory_map : boolean, default False
243
+ Use memory mapping when opening file on disk, when source is a str
244
+ use_threads : bool, default True
245
+ Whether to parallelize reading using multiple threads.
246
+
247
+ Returns
248
+ -------
249
+ table : pyarrow.Table
250
+ The contents of the Feather file as a pyarrow.Table
251
+ """
252
+ reader = _feather.FeatherReader(
253
+ source, use_memory_map=memory_map, use_threads=use_threads)
254
+
255
+ if columns is None:
256
+ return reader.read()
257
+
258
+ column_types = [type(column) for column in columns]
259
+ if all(map(lambda t: t == int, column_types)):
260
+ table = reader.read_indices(columns)
261
+ elif all(map(lambda t: t == str, column_types)):
262
+ table = reader.read_names(columns)
263
+ else:
264
+ column_type_names = [t.__name__ for t in column_types]
265
+ raise TypeError("Columns must be indices or names. "
266
+ "Got columns {} of types {}"
267
+ .format(columns, column_type_names))
268
+
269
+ # Feather v1 already respects the column selection
270
+ if reader.version < 3:
271
+ return table
272
+ # Feather v2 reads with sorted / deduplicated selection
273
+ elif sorted(set(columns)) == columns:
274
+ return table
275
+ else:
276
+ # follow exact order / selection of names
277
+ return table.select(columns)
env-llmeval/lib/python3.10/site-packages/pyarrow/fs.py ADDED
@@ -0,0 +1,444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ """
19
+ FileSystem abstraction to interact with various local and remote filesystems.
20
+ """
21
+
22
+ from pyarrow.util import _is_path_like, _stringify_path
23
+
24
+ from pyarrow._fs import ( # noqa
25
+ FileSelector,
26
+ FileType,
27
+ FileInfo,
28
+ FileSystem,
29
+ LocalFileSystem,
30
+ SubTreeFileSystem,
31
+ _MockFileSystem,
32
+ FileSystemHandler,
33
+ PyFileSystem,
34
+ _copy_files,
35
+ _copy_files_selector,
36
+ )
37
+
38
+ # For backward compatibility.
39
+ FileStats = FileInfo
40
+
41
+ _not_imported = []
42
+
43
+ try:
44
+ from pyarrow._hdfs import HadoopFileSystem # noqa
45
+ except ImportError:
46
+ _not_imported.append("HadoopFileSystem")
47
+
48
+ try:
49
+ from pyarrow._gcsfs import GcsFileSystem # noqa
50
+ except ImportError:
51
+ _not_imported.append("GcsFileSystem")
52
+
53
+ try:
54
+ from pyarrow._s3fs import ( # noqa
55
+ AwsDefaultS3RetryStrategy, AwsStandardS3RetryStrategy,
56
+ S3FileSystem, S3LogLevel, S3RetryStrategy, ensure_s3_initialized,
57
+ finalize_s3, ensure_s3_finalized, initialize_s3, resolve_s3_region)
58
+ except ImportError:
59
+ _not_imported.append("S3FileSystem")
60
+ else:
61
+ # GH-38364: we don't initialize S3 eagerly as that could lead
62
+ # to crashes at shutdown even when S3 isn't used.
63
+ # Instead, S3 is initialized lazily using `ensure_s3_initialized`
64
+ # in assorted places.
65
+ import atexit
66
+ atexit.register(ensure_s3_finalized)
67
+
68
+
69
+ def __getattr__(name):
70
+ if name in _not_imported:
71
+ raise ImportError(
72
+ "The pyarrow installation is not built with support for "
73
+ "'{0}'".format(name)
74
+ )
75
+
76
+ raise AttributeError(
77
+ "module 'pyarrow.fs' has no attribute '{0}'".format(name)
78
+ )
79
+
80
+
81
+ def _filesystem_from_str(uri):
82
+ # instantiate the file system from an uri, if the uri has a path
83
+ # component then it will be treated as a path prefix
84
+ filesystem, prefix = FileSystem.from_uri(uri)
85
+ prefix = filesystem.normalize_path(prefix)
86
+ if prefix:
87
+ # validate that the prefix is pointing to a directory
88
+ prefix_info = filesystem.get_file_info([prefix])[0]
89
+ if prefix_info.type != FileType.Directory:
90
+ raise ValueError(
91
+ "The path component of the filesystem URI must point to a "
92
+ "directory but it has a type: `{}`. The path component "
93
+ "is `{}` and the given filesystem URI is `{}`".format(
94
+ prefix_info.type.name, prefix_info.path, uri
95
+ )
96
+ )
97
+ filesystem = SubTreeFileSystem(prefix, filesystem)
98
+ return filesystem
99
+
100
+
101
+ def _ensure_filesystem(
102
+ filesystem, use_mmap=False, allow_legacy_filesystem=False
103
+ ):
104
+ if isinstance(filesystem, FileSystem):
105
+ return filesystem
106
+ elif isinstance(filesystem, str):
107
+ if use_mmap:
108
+ raise ValueError(
109
+ "Specifying to use memory mapping not supported for "
110
+ "filesystem specified as an URI string"
111
+ )
112
+ return _filesystem_from_str(filesystem)
113
+
114
+ # handle fsspec-compatible filesystems
115
+ try:
116
+ import fsspec
117
+ except ImportError:
118
+ pass
119
+ else:
120
+ if isinstance(filesystem, fsspec.AbstractFileSystem):
121
+ if type(filesystem).__name__ == 'LocalFileSystem':
122
+ # In case its a simple LocalFileSystem, use native arrow one
123
+ return LocalFileSystem(use_mmap=use_mmap)
124
+ return PyFileSystem(FSSpecHandler(filesystem))
125
+
126
+ # map old filesystems to new ones
127
+ import pyarrow.filesystem as legacyfs
128
+
129
+ if isinstance(filesystem, legacyfs.LocalFileSystem):
130
+ return LocalFileSystem(use_mmap=use_mmap)
131
+ # TODO handle HDFS?
132
+ if allow_legacy_filesystem and isinstance(filesystem, legacyfs.FileSystem):
133
+ return filesystem
134
+
135
+ raise TypeError(
136
+ "Unrecognized filesystem: {}. `filesystem` argument must be a "
137
+ "FileSystem instance or a valid file system URI'".format(
138
+ type(filesystem))
139
+ )
140
+
141
+
142
+ def _resolve_filesystem_and_path(
143
+ path, filesystem=None, allow_legacy_filesystem=False, memory_map=False
144
+ ):
145
+ """
146
+ Return filesystem/path from path which could be an URI or a plain
147
+ filesystem path.
148
+ """
149
+ if not _is_path_like(path):
150
+ if filesystem is not None:
151
+ raise ValueError(
152
+ "'filesystem' passed but the specified path is file-like, so"
153
+ " there is nothing to open with 'filesystem'."
154
+ )
155
+ return filesystem, path
156
+
157
+ if filesystem is not None:
158
+ filesystem = _ensure_filesystem(
159
+ filesystem, use_mmap=memory_map,
160
+ allow_legacy_filesystem=allow_legacy_filesystem
161
+ )
162
+ if isinstance(filesystem, LocalFileSystem):
163
+ path = _stringify_path(path)
164
+ elif not isinstance(path, str):
165
+ raise TypeError(
166
+ "Expected string path; path-like objects are only allowed "
167
+ "with a local filesystem"
168
+ )
169
+ if not allow_legacy_filesystem:
170
+ path = filesystem.normalize_path(path)
171
+ return filesystem, path
172
+
173
+ path = _stringify_path(path)
174
+
175
+ # if filesystem is not given, try to automatically determine one
176
+ # first check if the file exists as a local (relative) file path
177
+ # if not then try to parse the path as an URI
178
+ filesystem = LocalFileSystem(use_mmap=memory_map)
179
+
180
+ try:
181
+ file_info = filesystem.get_file_info(path)
182
+ except ValueError: # ValueError means path is likely an URI
183
+ file_info = None
184
+ exists_locally = False
185
+ else:
186
+ exists_locally = (file_info.type != FileType.NotFound)
187
+
188
+ # if the file or directory doesn't exists locally, then assume that
189
+ # the path is an URI describing the file system as well
190
+ if not exists_locally:
191
+ try:
192
+ filesystem, path = FileSystem.from_uri(path)
193
+ except ValueError as e:
194
+ # neither an URI nor a locally existing path, so assume that
195
+ # local path was given and propagate a nicer file not found error
196
+ # instead of a more confusing scheme parsing error
197
+ if "empty scheme" not in str(e) \
198
+ and "Cannot parse URI" not in str(e):
199
+ raise
200
+ else:
201
+ path = filesystem.normalize_path(path)
202
+
203
+ return filesystem, path
204
+
205
+
206
+ def copy_files(source, destination,
207
+ source_filesystem=None, destination_filesystem=None,
208
+ *, chunk_size=1024*1024, use_threads=True):
209
+ """
210
+ Copy files between FileSystems.
211
+
212
+ This functions allows you to recursively copy directories of files from
213
+ one file system to another, such as from S3 to your local machine.
214
+
215
+ Parameters
216
+ ----------
217
+ source : string
218
+ Source file path or URI to a single file or directory.
219
+ If a directory, files will be copied recursively from this path.
220
+ destination : string
221
+ Destination file path or URI. If `source` is a file, `destination`
222
+ is also interpreted as the destination file (not directory).
223
+ Directories will be created as necessary.
224
+ source_filesystem : FileSystem, optional
225
+ Source filesystem, needs to be specified if `source` is not a URI,
226
+ otherwise inferred.
227
+ destination_filesystem : FileSystem, optional
228
+ Destination filesystem, needs to be specified if `destination` is not
229
+ a URI, otherwise inferred.
230
+ chunk_size : int, default 1MB
231
+ The maximum size of block to read before flushing to the
232
+ destination file. A larger chunk_size will use more memory while
233
+ copying but may help accommodate high latency FileSystems.
234
+ use_threads : bool, default True
235
+ Whether to use multiple threads to accelerate copying.
236
+
237
+ Examples
238
+ --------
239
+ Inspect an S3 bucket's files:
240
+
241
+ >>> s3, path = fs.FileSystem.from_uri(
242
+ ... "s3://registry.opendata.aws/roda/ndjson/")
243
+ >>> selector = fs.FileSelector(path)
244
+ >>> s3.get_file_info(selector)
245
+ [<FileInfo for 'registry.opendata.aws/roda/ndjson/index.ndjson':...]
246
+
247
+ Copy one file from S3 bucket to a local directory:
248
+
249
+ >>> fs.copy_files("s3://registry.opendata.aws/roda/ndjson/index.ndjson",
250
+ ... "file:///{}/index_copy.ndjson".format(local_path))
251
+
252
+ >>> fs.LocalFileSystem().get_file_info(str(local_path)+
253
+ ... '/index_copy.ndjson')
254
+ <FileInfo for '.../index_copy.ndjson': type=FileType.File, size=...>
255
+
256
+ Copy file using a FileSystem object:
257
+
258
+ >>> fs.copy_files("registry.opendata.aws/roda/ndjson/index.ndjson",
259
+ ... "file:///{}/index_copy.ndjson".format(local_path),
260
+ ... source_filesystem=fs.S3FileSystem())
261
+ """
262
+ source_fs, source_path = _resolve_filesystem_and_path(
263
+ source, source_filesystem
264
+ )
265
+ destination_fs, destination_path = _resolve_filesystem_and_path(
266
+ destination, destination_filesystem
267
+ )
268
+
269
+ file_info = source_fs.get_file_info(source_path)
270
+ if file_info.type == FileType.Directory:
271
+ source_sel = FileSelector(source_path, recursive=True)
272
+ _copy_files_selector(source_fs, source_sel,
273
+ destination_fs, destination_path,
274
+ chunk_size, use_threads)
275
+ else:
276
+ _copy_files(source_fs, source_path,
277
+ destination_fs, destination_path,
278
+ chunk_size, use_threads)
279
+
280
+
281
+ class FSSpecHandler(FileSystemHandler):
282
+ """
283
+ Handler for fsspec-based Python filesystems.
284
+
285
+ https://filesystem-spec.readthedocs.io/en/latest/index.html
286
+
287
+ Parameters
288
+ ----------
289
+ fs : FSSpec-compliant filesystem instance
290
+
291
+ Examples
292
+ --------
293
+ >>> PyFileSystem(FSSpecHandler(fsspec_fs)) # doctest: +SKIP
294
+ """
295
+
296
+ def __init__(self, fs):
297
+ self.fs = fs
298
+
299
+ def __eq__(self, other):
300
+ if isinstance(other, FSSpecHandler):
301
+ return self.fs == other.fs
302
+ return NotImplemented
303
+
304
+ def __ne__(self, other):
305
+ if isinstance(other, FSSpecHandler):
306
+ return self.fs != other.fs
307
+ return NotImplemented
308
+
309
+ def get_type_name(self):
310
+ protocol = self.fs.protocol
311
+ if isinstance(protocol, list):
312
+ protocol = protocol[0]
313
+ return "fsspec+{0}".format(protocol)
314
+
315
+ def normalize_path(self, path):
316
+ return path
317
+
318
+ @staticmethod
319
+ def _create_file_info(path, info):
320
+ size = info["size"]
321
+ if info["type"] == "file":
322
+ ftype = FileType.File
323
+ elif info["type"] == "directory":
324
+ ftype = FileType.Directory
325
+ # some fsspec filesystems include a file size for directories
326
+ size = None
327
+ else:
328
+ ftype = FileType.Unknown
329
+ return FileInfo(path, ftype, size=size, mtime=info.get("mtime", None))
330
+
331
+ def get_file_info(self, paths):
332
+ infos = []
333
+ for path in paths:
334
+ try:
335
+ info = self.fs.info(path)
336
+ except FileNotFoundError:
337
+ infos.append(FileInfo(path, FileType.NotFound))
338
+ else:
339
+ infos.append(self._create_file_info(path, info))
340
+ return infos
341
+
342
+ def get_file_info_selector(self, selector):
343
+ if not self.fs.isdir(selector.base_dir):
344
+ if self.fs.exists(selector.base_dir):
345
+ raise NotADirectoryError(selector.base_dir)
346
+ else:
347
+ if selector.allow_not_found:
348
+ return []
349
+ else:
350
+ raise FileNotFoundError(selector.base_dir)
351
+
352
+ if selector.recursive:
353
+ maxdepth = None
354
+ else:
355
+ maxdepth = 1
356
+
357
+ infos = []
358
+ selected_files = self.fs.find(
359
+ selector.base_dir, maxdepth=maxdepth, withdirs=True, detail=True
360
+ )
361
+ for path, info in selected_files.items():
362
+ _path = path.strip("/")
363
+ base_dir = selector.base_dir.strip("/")
364
+ # Need to exclude base directory from selected files if present
365
+ # (fsspec filesystems, see GH-37555)
366
+ if _path != base_dir:
367
+ infos.append(self._create_file_info(path, info))
368
+
369
+ return infos
370
+
371
+ def create_dir(self, path, recursive):
372
+ # mkdir also raises FileNotFoundError when base directory is not found
373
+ try:
374
+ self.fs.mkdir(path, create_parents=recursive)
375
+ except FileExistsError:
376
+ pass
377
+
378
+ def delete_dir(self, path):
379
+ self.fs.rm(path, recursive=True)
380
+
381
+ def _delete_dir_contents(self, path, missing_dir_ok):
382
+ try:
383
+ subpaths = self.fs.listdir(path, detail=False)
384
+ except FileNotFoundError:
385
+ if missing_dir_ok:
386
+ return
387
+ raise
388
+ for subpath in subpaths:
389
+ if self.fs.isdir(subpath):
390
+ self.fs.rm(subpath, recursive=True)
391
+ elif self.fs.isfile(subpath):
392
+ self.fs.rm(subpath)
393
+
394
+ def delete_dir_contents(self, path, missing_dir_ok):
395
+ if path.strip("/") == "":
396
+ raise ValueError(
397
+ "delete_dir_contents called on path '", path, "'")
398
+ self._delete_dir_contents(path, missing_dir_ok)
399
+
400
+ def delete_root_dir_contents(self):
401
+ self._delete_dir_contents("/")
402
+
403
+ def delete_file(self, path):
404
+ # fs.rm correctly raises IsADirectoryError when `path` is a directory
405
+ # instead of a file and `recursive` is not set to True
406
+ if not self.fs.exists(path):
407
+ raise FileNotFoundError(path)
408
+ self.fs.rm(path)
409
+
410
+ def move(self, src, dest):
411
+ self.fs.mv(src, dest, recursive=True)
412
+
413
+ def copy_file(self, src, dest):
414
+ # fs.copy correctly raises IsADirectoryError when `src` is a directory
415
+ # instead of a file
416
+ self.fs.copy(src, dest)
417
+
418
+ # TODO can we read/pass metadata (e.g. Content-Type) in the methods below?
419
+
420
+ def open_input_stream(self, path):
421
+ from pyarrow import PythonFile
422
+
423
+ if not self.fs.isfile(path):
424
+ raise FileNotFoundError(path)
425
+
426
+ return PythonFile(self.fs.open(path, mode="rb"), mode="r")
427
+
428
+ def open_input_file(self, path):
429
+ from pyarrow import PythonFile
430
+
431
+ if not self.fs.isfile(path):
432
+ raise FileNotFoundError(path)
433
+
434
+ return PythonFile(self.fs.open(path, mode="rb"), mode="r")
435
+
436
+ def open_output_stream(self, path, metadata):
437
+ from pyarrow import PythonFile
438
+
439
+ return PythonFile(self.fs.open(path, mode="wb"), mode="w")
440
+
441
+ def open_append_stream(self, path, metadata):
442
+ from pyarrow import PythonFile
443
+
444
+ return PythonFile(self.fs.open(path, mode="ab"), mode="w")
env-llmeval/lib/python3.10/site-packages/pyarrow/gandiva.pyx ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: profile=False
19
+ # distutils: language = c++
20
+ # cython: language_level = 3
21
+
22
+ from libcpp.memory cimport shared_ptr
23
+ from libcpp.string cimport string as c_string
24
+ from libcpp.vector cimport vector as c_vector
25
+ from libcpp.unordered_set cimport unordered_set as c_unordered_set
26
+ from libc.stdint cimport int64_t, int32_t
27
+
28
+ from pyarrow.includes.libarrow cimport *
29
+ from pyarrow.lib cimport (DataType, Field, MemoryPool, RecordBatch,
30
+ Schema, check_status, pyarrow_wrap_array,
31
+ pyarrow_wrap_data_type, ensure_type, _Weakrefable,
32
+ pyarrow_wrap_field)
33
+
34
+ from pyarrow.includes.libgandiva cimport (
35
+ CCondition, CGandivaExpression,
36
+ CNode, CProjector, CFilter,
37
+ CSelectionVector,
38
+ _ensure_selection_mode,
39
+ CConfiguration,
40
+ CConfigurationBuilder,
41
+ TreeExprBuilder_MakeExpression,
42
+ TreeExprBuilder_MakeFunction,
43
+ TreeExprBuilder_MakeBoolLiteral,
44
+ TreeExprBuilder_MakeUInt8Literal,
45
+ TreeExprBuilder_MakeUInt16Literal,
46
+ TreeExprBuilder_MakeUInt32Literal,
47
+ TreeExprBuilder_MakeUInt64Literal,
48
+ TreeExprBuilder_MakeInt8Literal,
49
+ TreeExprBuilder_MakeInt16Literal,
50
+ TreeExprBuilder_MakeInt32Literal,
51
+ TreeExprBuilder_MakeInt64Literal,
52
+ TreeExprBuilder_MakeFloatLiteral,
53
+ TreeExprBuilder_MakeDoubleLiteral,
54
+ TreeExprBuilder_MakeStringLiteral,
55
+ TreeExprBuilder_MakeBinaryLiteral,
56
+ TreeExprBuilder_MakeField,
57
+ TreeExprBuilder_MakeIf,
58
+ TreeExprBuilder_MakeAnd,
59
+ TreeExprBuilder_MakeOr,
60
+ TreeExprBuilder_MakeCondition,
61
+ TreeExprBuilder_MakeInExpressionInt32,
62
+ TreeExprBuilder_MakeInExpressionInt64,
63
+ TreeExprBuilder_MakeInExpressionTime32,
64
+ TreeExprBuilder_MakeInExpressionTime64,
65
+ TreeExprBuilder_MakeInExpressionDate32,
66
+ TreeExprBuilder_MakeInExpressionDate64,
67
+ TreeExprBuilder_MakeInExpressionTimeStamp,
68
+ TreeExprBuilder_MakeInExpressionString,
69
+ SelectionVector_MakeInt16,
70
+ SelectionVector_MakeInt32,
71
+ SelectionVector_MakeInt64,
72
+ Projector_Make,
73
+ Filter_Make,
74
+ CFunctionSignature,
75
+ GetRegisteredFunctionSignatures)
76
+
77
+
78
+ cdef class Node(_Weakrefable):
79
+ cdef:
80
+ shared_ptr[CNode] node
81
+
82
+ def __init__(self):
83
+ raise TypeError("Do not call {}'s constructor directly, use the "
84
+ "TreeExprBuilder API directly"
85
+ .format(self.__class__.__name__))
86
+
87
+ @staticmethod
88
+ cdef create(shared_ptr[CNode] node):
89
+ cdef Node self = Node.__new__(Node)
90
+ self.node = node
91
+ return self
92
+
93
+ def __str__(self):
94
+ return self.node.get().ToString().decode()
95
+
96
+ def __repr__(self):
97
+ type_format = object.__repr__(self)
98
+ return '{0}\n{1}'.format(type_format, str(self))
99
+
100
+ def return_type(self):
101
+ return pyarrow_wrap_data_type(self.node.get().return_type())
102
+
103
+
104
+ cdef class Expression(_Weakrefable):
105
+ cdef:
106
+ shared_ptr[CGandivaExpression] expression
107
+
108
+ cdef void init(self, shared_ptr[CGandivaExpression] expression):
109
+ self.expression = expression
110
+
111
+ def __str__(self):
112
+ return self.expression.get().ToString().decode()
113
+
114
+ def __repr__(self):
115
+ type_format = object.__repr__(self)
116
+ return '{0}\n{1}'.format(type_format, str(self))
117
+
118
+ def root(self):
119
+ return Node.create(self.expression.get().root())
120
+
121
+ def result(self):
122
+ return pyarrow_wrap_field(self.expression.get().result())
123
+
124
+
125
+ cdef class Condition(_Weakrefable):
126
+ cdef:
127
+ shared_ptr[CCondition] condition
128
+
129
+ def __init__(self):
130
+ raise TypeError("Do not call {}'s constructor directly, use the "
131
+ "TreeExprBuilder API instead"
132
+ .format(self.__class__.__name__))
133
+
134
+ @staticmethod
135
+ cdef create(shared_ptr[CCondition] condition):
136
+ cdef Condition self = Condition.__new__(Condition)
137
+ self.condition = condition
138
+ return self
139
+
140
+ def __str__(self):
141
+ return self.condition.get().ToString().decode()
142
+
143
+ def __repr__(self):
144
+ type_format = object.__repr__(self)
145
+ return '{0}\n{1}'.format(type_format, str(self))
146
+
147
+ def root(self):
148
+ return Node.create(self.condition.get().root())
149
+
150
+ def result(self):
151
+ return pyarrow_wrap_field(self.condition.get().result())
152
+
153
+
154
+ cdef class SelectionVector(_Weakrefable):
155
+ cdef:
156
+ shared_ptr[CSelectionVector] selection_vector
157
+
158
+ def __init__(self):
159
+ raise TypeError("Do not call {}'s constructor directly."
160
+ .format(self.__class__.__name__))
161
+
162
+ @staticmethod
163
+ cdef create(shared_ptr[CSelectionVector] selection_vector):
164
+ cdef SelectionVector self = SelectionVector.__new__(SelectionVector)
165
+ self.selection_vector = selection_vector
166
+ return self
167
+
168
+ def to_array(self):
169
+ cdef shared_ptr[CArray] result = self.selection_vector.get().ToArray()
170
+ return pyarrow_wrap_array(result)
171
+
172
+
173
+ cdef class Projector(_Weakrefable):
174
+ cdef:
175
+ shared_ptr[CProjector] projector
176
+ MemoryPool pool
177
+
178
+ def __init__(self):
179
+ raise TypeError("Do not call {}'s constructor directly, use "
180
+ "make_projector instead"
181
+ .format(self.__class__.__name__))
182
+
183
+ @staticmethod
184
+ cdef create(shared_ptr[CProjector] projector, MemoryPool pool):
185
+ cdef Projector self = Projector.__new__(Projector)
186
+ self.projector = projector
187
+ self.pool = pool
188
+ return self
189
+
190
+ @property
191
+ def llvm_ir(self):
192
+ return self.projector.get().DumpIR().decode()
193
+
194
+ def evaluate(self, RecordBatch batch, SelectionVector selection=None):
195
+ """
196
+ Evaluate the specified record batch and return the arrays at the
197
+ filtered positions.
198
+
199
+ Parameters
200
+ ----------
201
+ batch : pyarrow.RecordBatch
202
+ selection : pyarrow.gandiva.SelectionVector
203
+
204
+ Returns
205
+ -------
206
+ list[pyarrow.Array]
207
+ """
208
+ cdef vector[shared_ptr[CArray]] results
209
+ if selection is None:
210
+ check_status(self.projector.get().Evaluate(
211
+ batch.sp_batch.get()[0], self.pool.pool, &results))
212
+ else:
213
+ check_status(
214
+ self.projector.get().Evaluate(
215
+ batch.sp_batch.get()[0], selection.selection_vector.get(),
216
+ self.pool.pool, &results))
217
+ cdef shared_ptr[CArray] result
218
+ arrays = []
219
+ for result in results:
220
+ arrays.append(pyarrow_wrap_array(result))
221
+ return arrays
222
+
223
+
224
+ cdef class Filter(_Weakrefable):
225
+ cdef:
226
+ shared_ptr[CFilter] filter
227
+
228
+ def __init__(self):
229
+ raise TypeError("Do not call {}'s constructor directly, use "
230
+ "make_filter instead"
231
+ .format(self.__class__.__name__))
232
+
233
+ @staticmethod
234
+ cdef create(shared_ptr[CFilter] filter):
235
+ cdef Filter self = Filter.__new__(Filter)
236
+ self.filter = filter
237
+ return self
238
+
239
+ @property
240
+ def llvm_ir(self):
241
+ return self.filter.get().DumpIR().decode()
242
+
243
+ def evaluate(self, RecordBatch batch, MemoryPool pool, dtype='int32'):
244
+ """
245
+ Evaluate the specified record batch and return a selection vector.
246
+
247
+ Parameters
248
+ ----------
249
+ batch : pyarrow.RecordBatch
250
+ pool : MemoryPool
251
+ dtype : DataType or str, default int32
252
+
253
+ Returns
254
+ -------
255
+ pyarrow.gandiva.SelectionVector
256
+ """
257
+ cdef:
258
+ DataType type = ensure_type(dtype)
259
+ shared_ptr[CSelectionVector] selection
260
+
261
+ if type.id == _Type_INT16:
262
+ check_status(SelectionVector_MakeInt16(
263
+ batch.num_rows, pool.pool, &selection))
264
+ elif type.id == _Type_INT32:
265
+ check_status(SelectionVector_MakeInt32(
266
+ batch.num_rows, pool.pool, &selection))
267
+ elif type.id == _Type_INT64:
268
+ check_status(SelectionVector_MakeInt64(
269
+ batch.num_rows, pool.pool, &selection))
270
+ else:
271
+ raise ValueError("'dtype' of the selection vector should be "
272
+ "one of 'int16', 'int32' and 'int64'.")
273
+
274
+ check_status(self.filter.get().Evaluate(
275
+ batch.sp_batch.get()[0], selection))
276
+ return SelectionVector.create(selection)
277
+
278
+
279
+ cdef class TreeExprBuilder(_Weakrefable):
280
+
281
+ def make_literal(self, value, dtype):
282
+ """
283
+ Create a node on a literal.
284
+
285
+ Parameters
286
+ ----------
287
+ value : a literal value
288
+ dtype : DataType
289
+
290
+ Returns
291
+ -------
292
+ pyarrow.gandiva.Node
293
+ """
294
+ cdef:
295
+ DataType type = ensure_type(dtype)
296
+ shared_ptr[CNode] r
297
+
298
+ if type.id == _Type_BOOL:
299
+ r = TreeExprBuilder_MakeBoolLiteral(value)
300
+ elif type.id == _Type_UINT8:
301
+ r = TreeExprBuilder_MakeUInt8Literal(value)
302
+ elif type.id == _Type_UINT16:
303
+ r = TreeExprBuilder_MakeUInt16Literal(value)
304
+ elif type.id == _Type_UINT32:
305
+ r = TreeExprBuilder_MakeUInt32Literal(value)
306
+ elif type.id == _Type_UINT64:
307
+ r = TreeExprBuilder_MakeUInt64Literal(value)
308
+ elif type.id == _Type_INT8:
309
+ r = TreeExprBuilder_MakeInt8Literal(value)
310
+ elif type.id == _Type_INT16:
311
+ r = TreeExprBuilder_MakeInt16Literal(value)
312
+ elif type.id == _Type_INT32:
313
+ r = TreeExprBuilder_MakeInt32Literal(value)
314
+ elif type.id == _Type_INT64:
315
+ r = TreeExprBuilder_MakeInt64Literal(value)
316
+ elif type.id == _Type_FLOAT:
317
+ r = TreeExprBuilder_MakeFloatLiteral(value)
318
+ elif type.id == _Type_DOUBLE:
319
+ r = TreeExprBuilder_MakeDoubleLiteral(value)
320
+ elif type.id == _Type_STRING:
321
+ r = TreeExprBuilder_MakeStringLiteral(value.encode('UTF-8'))
322
+ elif type.id == _Type_BINARY:
323
+ r = TreeExprBuilder_MakeBinaryLiteral(value)
324
+ else:
325
+ raise TypeError("Didn't recognize dtype " + str(dtype))
326
+
327
+ return Node.create(r)
328
+
329
+ def make_expression(self, Node root_node not None,
330
+ Field return_field not None):
331
+ """
332
+ Create an expression with the specified root_node,
333
+ and the result written to result_field.
334
+
335
+ Parameters
336
+ ----------
337
+ root_node : pyarrow.gandiva.Node
338
+ return_field : pyarrow.Field
339
+
340
+ Returns
341
+ -------
342
+ pyarrow.gandiva.Expression
343
+ """
344
+ cdef shared_ptr[CGandivaExpression] r = TreeExprBuilder_MakeExpression(
345
+ root_node.node, return_field.sp_field)
346
+ cdef Expression expression = Expression()
347
+ expression.init(r)
348
+ return expression
349
+
350
+ def make_function(self, name, children, DataType return_type):
351
+ """
352
+ Create a node with a function.
353
+
354
+ Parameters
355
+ ----------
356
+ name : str
357
+ children : pyarrow.gandiva.NodeVector
358
+ return_type : DataType
359
+
360
+ Returns
361
+ -------
362
+ pyarrow.gandiva.Node
363
+ """
364
+ cdef c_vector[shared_ptr[CNode]] c_children
365
+ cdef Node child
366
+ for child in children:
367
+ if child is None:
368
+ raise TypeError("Child nodes must not be None")
369
+ c_children.push_back(child.node)
370
+ cdef shared_ptr[CNode] r = TreeExprBuilder_MakeFunction(
371
+ name.encode(), c_children, return_type.sp_type)
372
+ return Node.create(r)
373
+
374
+ def make_field(self, Field field not None):
375
+ """
376
+ Create a node with an Arrow field.
377
+
378
+ Parameters
379
+ ----------
380
+ field : pyarrow.Field
381
+
382
+ Returns
383
+ -------
384
+ pyarrow.gandiva.Node
385
+ """
386
+ cdef shared_ptr[CNode] r = TreeExprBuilder_MakeField(field.sp_field)
387
+ return Node.create(r)
388
+
389
+ def make_if(self, Node condition not None, Node this_node not None,
390
+ Node else_node not None, DataType return_type not None):
391
+ """
392
+ Create a node with an if-else expression.
393
+
394
+ Parameters
395
+ ----------
396
+ condition : pyarrow.gandiva.Node
397
+ this_node : pyarrow.gandiva.Node
398
+ else_node : pyarrow.gandiva.Node
399
+ return_type : DataType
400
+
401
+ Returns
402
+ -------
403
+ pyarrow.gandiva.Node
404
+ """
405
+ cdef shared_ptr[CNode] r = TreeExprBuilder_MakeIf(
406
+ condition.node, this_node.node, else_node.node,
407
+ return_type.sp_type)
408
+ return Node.create(r)
409
+
410
+ def make_and(self, children):
411
+ """
412
+ Create a Node with a boolean AND expression.
413
+
414
+ Parameters
415
+ ----------
416
+ children : list[pyarrow.gandiva.Node]
417
+
418
+ Returns
419
+ -------
420
+ pyarrow.gandiva.Node
421
+ """
422
+ cdef c_vector[shared_ptr[CNode]] c_children
423
+ cdef Node child
424
+ for child in children:
425
+ if child is None:
426
+ raise TypeError("Child nodes must not be None")
427
+ c_children.push_back(child.node)
428
+ cdef shared_ptr[CNode] r = TreeExprBuilder_MakeAnd(c_children)
429
+ return Node.create(r)
430
+
431
+ def make_or(self, children):
432
+ """
433
+ Create a Node with a boolean OR expression.
434
+
435
+ Parameters
436
+ ----------
437
+ children : list[pyarrow.gandiva.Node]
438
+
439
+ Returns
440
+ -------
441
+ pyarrow.gandiva.Node
442
+ """
443
+ cdef c_vector[shared_ptr[CNode]] c_children
444
+ cdef Node child
445
+ for child in children:
446
+ if child is None:
447
+ raise TypeError("Child nodes must not be None")
448
+ c_children.push_back(child.node)
449
+ cdef shared_ptr[CNode] r = TreeExprBuilder_MakeOr(c_children)
450
+ return Node.create(r)
451
+
452
+ def _make_in_expression_int32(self, Node node not None, values):
453
+ cdef shared_ptr[CNode] r
454
+ cdef c_unordered_set[int32_t] c_values
455
+ cdef int32_t v
456
+ for v in values:
457
+ c_values.insert(v)
458
+ r = TreeExprBuilder_MakeInExpressionInt32(node.node, c_values)
459
+ return Node.create(r)
460
+
461
+ def _make_in_expression_int64(self, Node node not None, values):
462
+ cdef shared_ptr[CNode] r
463
+ cdef c_unordered_set[int64_t] c_values
464
+ cdef int64_t v
465
+ for v in values:
466
+ c_values.insert(v)
467
+ r = TreeExprBuilder_MakeInExpressionInt64(node.node, c_values)
468
+ return Node.create(r)
469
+
470
+ def _make_in_expression_time32(self, Node node not None, values):
471
+ cdef shared_ptr[CNode] r
472
+ cdef c_unordered_set[int32_t] c_values
473
+ cdef int32_t v
474
+ for v in values:
475
+ c_values.insert(v)
476
+ r = TreeExprBuilder_MakeInExpressionTime32(node.node, c_values)
477
+ return Node.create(r)
478
+
479
+ def _make_in_expression_time64(self, Node node not None, values):
480
+ cdef shared_ptr[CNode] r
481
+ cdef c_unordered_set[int64_t] c_values
482
+ cdef int64_t v
483
+ for v in values:
484
+ c_values.insert(v)
485
+ r = TreeExprBuilder_MakeInExpressionTime64(node.node, c_values)
486
+ return Node.create(r)
487
+
488
+ def _make_in_expression_date32(self, Node node not None, values):
489
+ cdef shared_ptr[CNode] r
490
+ cdef c_unordered_set[int32_t] c_values
491
+ cdef int32_t v
492
+ for v in values:
493
+ c_values.insert(v)
494
+ r = TreeExprBuilder_MakeInExpressionDate32(node.node, c_values)
495
+ return Node.create(r)
496
+
497
+ def _make_in_expression_date64(self, Node node not None, values):
498
+ cdef shared_ptr[CNode] r
499
+ cdef c_unordered_set[int64_t] c_values
500
+ cdef int64_t v
501
+ for v in values:
502
+ c_values.insert(v)
503
+ r = TreeExprBuilder_MakeInExpressionDate64(node.node, c_values)
504
+ return Node.create(r)
505
+
506
+ def _make_in_expression_timestamp(self, Node node not None, values):
507
+ cdef shared_ptr[CNode] r
508
+ cdef c_unordered_set[int64_t] c_values
509
+ cdef int64_t v
510
+ for v in values:
511
+ c_values.insert(v)
512
+ r = TreeExprBuilder_MakeInExpressionTimeStamp(node.node, c_values)
513
+ return Node.create(r)
514
+
515
+ def _make_in_expression_binary(self, Node node not None, values):
516
+ cdef shared_ptr[CNode] r
517
+ cdef c_unordered_set[c_string] c_values
518
+ cdef c_string v
519
+ for v in values:
520
+ c_values.insert(v)
521
+ r = TreeExprBuilder_MakeInExpressionString(node.node, c_values)
522
+ return Node.create(r)
523
+
524
+ def _make_in_expression_string(self, Node node not None, values):
525
+ cdef shared_ptr[CNode] r
526
+ cdef c_unordered_set[c_string] c_values
527
+ cdef c_string _v
528
+ for v in values:
529
+ _v = v.encode('UTF-8')
530
+ c_values.insert(_v)
531
+ r = TreeExprBuilder_MakeInExpressionString(node.node, c_values)
532
+ return Node.create(r)
533
+
534
+ def make_in_expression(self, Node node not None, values, dtype):
535
+ """
536
+ Create a Node with an IN expression.
537
+
538
+ Parameters
539
+ ----------
540
+ node : pyarrow.gandiva.Node
541
+ values : iterable
542
+ dtype : DataType
543
+
544
+ Returns
545
+ -------
546
+ pyarrow.gandiva.Node
547
+ """
548
+ cdef DataType type = ensure_type(dtype)
549
+
550
+ if type.id == _Type_INT32:
551
+ return self._make_in_expression_int32(node, values)
552
+ elif type.id == _Type_INT64:
553
+ return self._make_in_expression_int64(node, values)
554
+ elif type.id == _Type_TIME32:
555
+ return self._make_in_expression_time32(node, values)
556
+ elif type.id == _Type_TIME64:
557
+ return self._make_in_expression_time64(node, values)
558
+ elif type.id == _Type_TIMESTAMP:
559
+ return self._make_in_expression_timestamp(node, values)
560
+ elif type.id == _Type_DATE32:
561
+ return self._make_in_expression_date32(node, values)
562
+ elif type.id == _Type_DATE64:
563
+ return self._make_in_expression_date64(node, values)
564
+ elif type.id == _Type_BINARY:
565
+ return self._make_in_expression_binary(node, values)
566
+ elif type.id == _Type_STRING:
567
+ return self._make_in_expression_string(node, values)
568
+ else:
569
+ raise TypeError("Data type " + str(dtype) + " not supported.")
570
+
571
+ def make_condition(self, Node condition not None):
572
+ """
573
+ Create a condition with the specified node.
574
+
575
+ Parameters
576
+ ----------
577
+ condition : pyarrow.gandiva.Node
578
+
579
+ Returns
580
+ -------
581
+ pyarrow.gandiva.Condition
582
+ """
583
+ cdef shared_ptr[CCondition] r = TreeExprBuilder_MakeCondition(
584
+ condition.node)
585
+ return Condition.create(r)
586
+
587
+ cdef class Configuration(_Weakrefable):
588
+ cdef:
589
+ shared_ptr[CConfiguration] configuration
590
+
591
+ def __cinit__(self, bint optimize=True, bint dump_ir=False):
592
+ """
593
+ Initialize the configuration with specified options.
594
+
595
+ Parameters
596
+ ----------
597
+ optimize : bool, default True
598
+ Whether to enable optimizations.
599
+ dump_ir : bool, default False
600
+ Whether to dump LLVM IR.
601
+ """
602
+ self.configuration = CConfigurationBuilder().build()
603
+ self.configuration.get().set_optimize(optimize)
604
+ self.configuration.get().set_dump_ir(dump_ir)
605
+
606
+ @staticmethod
607
+ cdef create(shared_ptr[CConfiguration] configuration):
608
+ """
609
+ Create a Configuration instance from an existing CConfiguration pointer.
610
+
611
+ Parameters
612
+ ----------
613
+ configuration : shared_ptr[CConfiguration]
614
+ Existing CConfiguration pointer.
615
+
616
+ Returns
617
+ -------
618
+ Configuration instance
619
+ """
620
+ cdef Configuration self = Configuration.__new__(Configuration)
621
+ self.configuration = configuration
622
+ return self
623
+
624
+
625
+ cpdef make_projector(Schema schema, children, MemoryPool pool,
626
+ str selection_mode="NONE",
627
+ Configuration configuration=None):
628
+ """
629
+ Construct a projection using expressions.
630
+
631
+ A projector is built for a specific schema and vector of expressions.
632
+ Once the projector is built, it can be used to evaluate many row batches.
633
+
634
+ Parameters
635
+ ----------
636
+ schema : pyarrow.Schema
637
+ Schema for the record batches, and the expressions.
638
+ children : list[pyarrow.gandiva.Expression]
639
+ List of projectable expression objects.
640
+ pool : pyarrow.MemoryPool
641
+ Memory pool used to allocate output arrays.
642
+ selection_mode : str, default "NONE"
643
+ Possible values are NONE, UINT16, UINT32, UINT64.
644
+ configuration : pyarrow.gandiva.Configuration, default None
645
+ Configuration for the projector.
646
+
647
+ Returns
648
+ -------
649
+ Projector instance
650
+ """
651
+ cdef:
652
+ Expression child
653
+ c_vector[shared_ptr[CGandivaExpression]] c_children
654
+ shared_ptr[CProjector] result
655
+
656
+ if configuration is None:
657
+ configuration = Configuration()
658
+
659
+ for child in children:
660
+ if child is None:
661
+ raise TypeError("Expressions must not be None")
662
+ c_children.push_back(child.expression)
663
+
664
+ check_status(
665
+ Projector_Make(schema.sp_schema, c_children,
666
+ _ensure_selection_mode(selection_mode),
667
+ configuration.configuration,
668
+ &result))
669
+ return Projector.create(result, pool)
670
+
671
+
672
+ cpdef make_filter(Schema schema, Condition condition,
673
+ Configuration configuration=None):
674
+ """
675
+ Construct a filter based on a condition.
676
+
677
+ A filter is built for a specific schema and condition. Once the filter is
678
+ built, it can be used to evaluate many row batches.
679
+
680
+ Parameters
681
+ ----------
682
+ schema : pyarrow.Schema
683
+ Schema for the record batches, and the condition.
684
+ condition : pyarrow.gandiva.Condition
685
+ Filter condition.
686
+ configuration : pyarrow.gandiva.Configuration, default None
687
+ Configuration for the filter.
688
+
689
+ Returns
690
+ -------
691
+ Filter instance
692
+ """
693
+ cdef shared_ptr[CFilter] result
694
+ if condition is None:
695
+ raise TypeError("Condition must not be None")
696
+
697
+ if configuration is None:
698
+ configuration = Configuration()
699
+
700
+ check_status(
701
+ Filter_Make(schema.sp_schema, condition.condition, configuration.configuration, &result))
702
+ return Filter.create(result)
703
+
704
+
705
+ cdef class FunctionSignature(_Weakrefable):
706
+ """
707
+ Signature of a Gandiva function including name, parameter types
708
+ and return type.
709
+ """
710
+
711
+ cdef:
712
+ shared_ptr[CFunctionSignature] signature
713
+
714
+ def __init__(self):
715
+ raise TypeError("Do not call {}'s constructor directly."
716
+ .format(self.__class__.__name__))
717
+
718
+ @staticmethod
719
+ cdef create(shared_ptr[CFunctionSignature] signature):
720
+ cdef FunctionSignature self = FunctionSignature.__new__(
721
+ FunctionSignature)
722
+ self.signature = signature
723
+ return self
724
+
725
+ def return_type(self):
726
+ return pyarrow_wrap_data_type(self.signature.get().ret_type())
727
+
728
+ def param_types(self):
729
+ result = []
730
+ cdef vector[shared_ptr[CDataType]] types = \
731
+ self.signature.get().param_types()
732
+ for t in types:
733
+ result.append(pyarrow_wrap_data_type(t))
734
+ return result
735
+
736
+ def name(self):
737
+ return self.signature.get().base_name().decode()
738
+
739
+ def __repr__(self):
740
+ signature = self.signature.get().ToString().decode()
741
+ return "FunctionSignature(" + signature + ")"
742
+
743
+
744
+ def get_registered_function_signatures():
745
+ """
746
+ Return the function in Gandiva's ExpressionRegistry.
747
+
748
+ Returns
749
+ -------
750
+ registry: a list of registered function signatures
751
+ """
752
+ results = []
753
+
754
+ cdef vector[shared_ptr[CFunctionSignature]] signatures = \
755
+ GetRegisteredFunctionSignatures()
756
+
757
+ for signature in signatures:
758
+ results.append(FunctionSignature.create(signature))
759
+
760
+ return results
env-llmeval/lib/python3.10/site-packages/pyarrow/hdfs.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ import os
20
+ import posixpath
21
+ import sys
22
+ import warnings
23
+
24
+ from pyarrow.util import doc, _DEPR_MSG
25
+ from pyarrow.filesystem import FileSystem
26
+ import pyarrow._hdfsio as _hdfsio
27
+
28
+
29
+ class HadoopFileSystem(_hdfsio.HadoopFileSystem, FileSystem):
30
+ """
31
+ DEPRECATED: FileSystem interface for HDFS cluster.
32
+
33
+ See pyarrow.hdfs.connect for full connection details
34
+
35
+ .. deprecated:: 2.0
36
+ ``pyarrow.hdfs.HadoopFileSystem`` is deprecated,
37
+ please use ``pyarrow.fs.HadoopFileSystem`` instead.
38
+ """
39
+
40
+ def __init__(self, host="default", port=0, user=None, kerb_ticket=None,
41
+ driver='libhdfs', extra_conf=None):
42
+ warnings.warn(
43
+ _DEPR_MSG.format(
44
+ "hdfs.HadoopFileSystem", "2.0.0", "fs.HadoopFileSystem"),
45
+ FutureWarning, stacklevel=2)
46
+ if driver == 'libhdfs':
47
+ _maybe_set_hadoop_classpath()
48
+
49
+ self._connect(host, port, user, kerb_ticket, extra_conf)
50
+
51
+ def __reduce__(self):
52
+ return (HadoopFileSystem, (self.host, self.port, self.user,
53
+ self.kerb_ticket, self.extra_conf))
54
+
55
+ def _isfilestore(self):
56
+ """
57
+ Return True if this is a Unix-style file store with directories.
58
+ """
59
+ return True
60
+
61
+ @doc(FileSystem.isdir)
62
+ def isdir(self, path):
63
+ return super().isdir(path)
64
+
65
+ @doc(FileSystem.isfile)
66
+ def isfile(self, path):
67
+ return super().isfile(path)
68
+
69
+ @doc(FileSystem.delete)
70
+ def delete(self, path, recursive=False):
71
+ return super().delete(path, recursive)
72
+
73
+ def mkdir(self, path, **kwargs):
74
+ """
75
+ Create directory in HDFS.
76
+
77
+ Parameters
78
+ ----------
79
+ path : str
80
+ Directory path to create, including any parent directories.
81
+
82
+ Notes
83
+ -----
84
+ libhdfs does not support create_parents=False, so we ignore this here
85
+ """
86
+ return super().mkdir(path)
87
+
88
+ @doc(FileSystem.rename)
89
+ def rename(self, path, new_path):
90
+ return super().rename(path, new_path)
91
+
92
+ @doc(FileSystem.exists)
93
+ def exists(self, path):
94
+ return super().exists(path)
95
+
96
+ def ls(self, path, detail=False):
97
+ """
98
+ Retrieve directory contents and metadata, if requested.
99
+
100
+ Parameters
101
+ ----------
102
+ path : str
103
+ HDFS path to retrieve contents of.
104
+ detail : bool, default False
105
+ If False, only return list of paths.
106
+
107
+ Returns
108
+ -------
109
+ result : list of dicts (detail=True) or strings (detail=False)
110
+ """
111
+ return super().ls(path, detail)
112
+
113
+ def walk(self, top_path):
114
+ """
115
+ Directory tree generator for HDFS, like os.walk.
116
+
117
+ Parameters
118
+ ----------
119
+ top_path : str
120
+ Root directory for tree traversal.
121
+
122
+ Returns
123
+ -------
124
+ Generator yielding 3-tuple (dirpath, dirnames, filename)
125
+ """
126
+ contents = self.ls(top_path, detail=True)
127
+
128
+ directories, files = _libhdfs_walk_files_dirs(top_path, contents)
129
+ yield top_path, directories, files
130
+ for dirname in directories:
131
+ yield from self.walk(self._path_join(top_path, dirname))
132
+
133
+
134
+ def _maybe_set_hadoop_classpath():
135
+ import re
136
+
137
+ if re.search(r'hadoop-common[^/]+.jar', os.environ.get('CLASSPATH', '')):
138
+ return
139
+
140
+ if 'HADOOP_HOME' in os.environ:
141
+ if sys.platform != 'win32':
142
+ classpath = _derive_hadoop_classpath()
143
+ else:
144
+ hadoop_bin = '{}/bin/hadoop'.format(os.environ['HADOOP_HOME'])
145
+ classpath = _hadoop_classpath_glob(hadoop_bin)
146
+ else:
147
+ classpath = _hadoop_classpath_glob('hadoop')
148
+
149
+ os.environ['CLASSPATH'] = classpath.decode('utf-8')
150
+
151
+
152
+ def _derive_hadoop_classpath():
153
+ import subprocess
154
+
155
+ find_args = ('find', '-L', os.environ['HADOOP_HOME'], '-name', '*.jar')
156
+ find = subprocess.Popen(find_args, stdout=subprocess.PIPE)
157
+ xargs_echo = subprocess.Popen(('xargs', 'echo'),
158
+ stdin=find.stdout,
159
+ stdout=subprocess.PIPE)
160
+ jars = subprocess.check_output(('tr', "' '", "':'"),
161
+ stdin=xargs_echo.stdout)
162
+ hadoop_conf = os.environ["HADOOP_CONF_DIR"] \
163
+ if "HADOOP_CONF_DIR" in os.environ \
164
+ else os.environ["HADOOP_HOME"] + "/etc/hadoop"
165
+ return (hadoop_conf + ":").encode("utf-8") + jars
166
+
167
+
168
+ def _hadoop_classpath_glob(hadoop_bin):
169
+ import subprocess
170
+
171
+ hadoop_classpath_args = (hadoop_bin, 'classpath', '--glob')
172
+ return subprocess.check_output(hadoop_classpath_args)
173
+
174
+
175
+ def _libhdfs_walk_files_dirs(top_path, contents):
176
+ files = []
177
+ directories = []
178
+ for c in contents:
179
+ scrubbed_name = posixpath.split(c['name'])[1]
180
+ if c['kind'] == 'file':
181
+ files.append(scrubbed_name)
182
+ else:
183
+ directories.append(scrubbed_name)
184
+
185
+ return directories, files
186
+
187
+
188
+ def connect(host="default", port=0, user=None, kerb_ticket=None,
189
+ extra_conf=None):
190
+ """
191
+ DEPRECATED: Connect to an HDFS cluster.
192
+
193
+ All parameters are optional and should only be set if the defaults need
194
+ to be overridden.
195
+
196
+ Authentication should be automatic if the HDFS cluster uses Kerberos.
197
+ However, if a username is specified, then the ticket cache will likely
198
+ be required.
199
+
200
+ .. deprecated:: 2.0
201
+ ``pyarrow.hdfs.connect`` is deprecated,
202
+ please use ``pyarrow.fs.HadoopFileSystem`` instead.
203
+
204
+ Parameters
205
+ ----------
206
+ host : NameNode. Set to "default" for fs.defaultFS from core-site.xml.
207
+ port : NameNode's port. Set to 0 for default or logical (HA) nodes.
208
+ user : Username when connecting to HDFS; None implies login user.
209
+ kerb_ticket : Path to Kerberos ticket cache.
210
+ extra_conf : dict, default None
211
+ extra Key/Value pairs for config; Will override any
212
+ hdfs-site.xml properties
213
+
214
+ Notes
215
+ -----
216
+ The first time you call this method, it will take longer than usual due
217
+ to JNI spin-up time.
218
+
219
+ Returns
220
+ -------
221
+ filesystem : HadoopFileSystem
222
+ """
223
+ warnings.warn(
224
+ _DEPR_MSG.format("hdfs.connect", "2.0.0", "fs.HadoopFileSystem"),
225
+ FutureWarning, stacklevel=2
226
+ )
227
+ return _connect(
228
+ host=host, port=port, user=user, kerb_ticket=kerb_ticket,
229
+ extra_conf=extra_conf
230
+ )
231
+
232
+
233
+ def _connect(host="default", port=0, user=None, kerb_ticket=None,
234
+ extra_conf=None):
235
+ with warnings.catch_warnings():
236
+ warnings.simplefilter("ignore")
237
+ fs = HadoopFileSystem(host=host, port=port, user=user,
238
+ kerb_ticket=kerb_ticket,
239
+ extra_conf=extra_conf)
240
+ return fs
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Kitchen-sink public API for arrow::Array data structures. C++ library code
19
+ // (especially header files) in Apache Arrow should use more specific headers
20
+ // unless it's a file that uses most or all Array types in which case using
21
+ // arrow/array.h is fine.
22
+
23
+ #pragma once
24
+
25
+ /// \defgroup numeric-arrays Concrete classes for numeric arrays
26
+ /// @{
27
+ /// @}
28
+
29
+ /// \defgroup binary-arrays Concrete classes for binary/string arrays
30
+ /// @{
31
+ /// @}
32
+
33
+ /// \defgroup nested-arrays Concrete classes for nested arrays
34
+ /// @{
35
+ /// @}
36
+
37
+ /// \defgroup run-end-encoded-arrays Concrete classes for run-end encoded arrays
38
+ /// @{
39
+ /// @}
40
+
41
+ #include "arrow/array/array_base.h" // IWYU pragma: keep
42
+ #include "arrow/array/array_binary.h" // IWYU pragma: keep
43
+ #include "arrow/array/array_decimal.h" // IWYU pragma: keep
44
+ #include "arrow/array/array_dict.h" // IWYU pragma: keep
45
+ #include "arrow/array/array_nested.h" // IWYU pragma: keep
46
+ #include "arrow/array/array_primitive.h" // IWYU pragma: keep
47
+ #include "arrow/array/array_run_end.h" // IWYU pragma: keep
48
+ #include "arrow/array/data.h" // IWYU pragma: keep
49
+ #include "arrow/array/util.h" // IWYU pragma: keep
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <cstdint>
22
+ #include <cstring>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <utility>
26
+
27
+ #include "arrow/buffer.h"
28
+ #include "arrow/status.h"
29
+ #include "arrow/util/bit_util.h"
30
+ #include "arrow/util/bitmap_generate.h"
31
+ #include "arrow/util/bitmap_ops.h"
32
+ #include "arrow/util/macros.h"
33
+ #include "arrow/util/ubsan.h"
34
+ #include "arrow/util/visibility.h"
35
+
36
+ namespace arrow {
37
+
38
+ // ----------------------------------------------------------------------
39
+ // Buffer builder classes
40
+
41
+ /// \class BufferBuilder
42
+ /// \brief A class for incrementally building a contiguous chunk of in-memory
43
+ /// data
44
+ class ARROW_EXPORT BufferBuilder {
45
+ public:
46
+ explicit BufferBuilder(MemoryPool* pool = default_memory_pool(),
47
+ int64_t alignment = kDefaultBufferAlignment)
48
+ : pool_(pool),
49
+ data_(/*ensure never null to make ubsan happy and avoid check penalties below*/
50
+ util::MakeNonNull<uint8_t>()),
51
+ capacity_(0),
52
+ size_(0),
53
+ alignment_(alignment) {}
54
+
55
+ /// \brief Constructs new Builder that will start using
56
+ /// the provided buffer until Finish/Reset are called.
57
+ /// The buffer is not resized.
58
+ explicit BufferBuilder(std::shared_ptr<ResizableBuffer> buffer,
59
+ MemoryPool* pool = default_memory_pool(),
60
+ int64_t alignment = kDefaultBufferAlignment)
61
+ : buffer_(std::move(buffer)),
62
+ pool_(pool),
63
+ data_(buffer_->mutable_data()),
64
+ capacity_(buffer_->capacity()),
65
+ size_(buffer_->size()),
66
+ alignment_(alignment) {}
67
+
68
+ /// \brief Resize the buffer to the nearest multiple of 64 bytes
69
+ ///
70
+ /// \param new_capacity the new capacity of the of the builder. Will be
71
+ /// rounded up to a multiple of 64 bytes for padding
72
+ /// \param shrink_to_fit if new capacity is smaller than the existing,
73
+ /// reallocate internal buffer. Set to false to avoid reallocations when
74
+ /// shrinking the builder.
75
+ /// \return Status
76
+ Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) {
77
+ if (buffer_ == NULLPTR) {
78
+ ARROW_ASSIGN_OR_RAISE(buffer_,
79
+ AllocateResizableBuffer(new_capacity, alignment_, pool_));
80
+ } else {
81
+ ARROW_RETURN_NOT_OK(buffer_->Resize(new_capacity, shrink_to_fit));
82
+ }
83
+ capacity_ = buffer_->capacity();
84
+ data_ = buffer_->mutable_data();
85
+ return Status::OK();
86
+ }
87
+
88
+ /// \brief Ensure that builder can accommodate the additional number of bytes
89
+ /// without the need to perform allocations
90
+ ///
91
+ /// \param[in] additional_bytes number of additional bytes to make space for
92
+ /// \return Status
93
+ Status Reserve(const int64_t additional_bytes) {
94
+ auto min_capacity = size_ + additional_bytes;
95
+ if (min_capacity <= capacity_) {
96
+ return Status::OK();
97
+ }
98
+ return Resize(GrowByFactor(capacity_, min_capacity), false);
99
+ }
100
+
101
+ /// \brief Return a capacity expanded by the desired growth factor
102
+ static int64_t GrowByFactor(int64_t current_capacity, int64_t new_capacity) {
103
+ // Doubling capacity except for large Reserve requests. 2x growth strategy
104
+ // (versus 1.5x) seems to have slightly better performance when using
105
+ // jemalloc, but significantly better performance when using the system
106
+ // allocator. See ARROW-6450 for further discussion
107
+ return std::max(new_capacity, current_capacity * 2);
108
+ }
109
+
110
+ /// \brief Append the given data to the buffer
111
+ ///
112
+ /// The buffer is automatically expanded if necessary.
113
+ Status Append(const void* data, const int64_t length) {
114
+ if (ARROW_PREDICT_FALSE(size_ + length > capacity_)) {
115
+ ARROW_RETURN_NOT_OK(Resize(GrowByFactor(capacity_, size_ + length), false));
116
+ }
117
+ UnsafeAppend(data, length);
118
+ return Status::OK();
119
+ }
120
+
121
+ /// \brief Append the given data to the buffer
122
+ ///
123
+ /// The buffer is automatically expanded if necessary.
124
+ Status Append(std::string_view v) { return Append(v.data(), v.size()); }
125
+
126
+ /// \brief Append copies of a value to the buffer
127
+ ///
128
+ /// The buffer is automatically expanded if necessary.
129
+ Status Append(const int64_t num_copies, uint8_t value) {
130
+ ARROW_RETURN_NOT_OK(Reserve(num_copies));
131
+ UnsafeAppend(num_copies, value);
132
+ return Status::OK();
133
+ }
134
+
135
+ // Advance pointer and zero out memory
136
+ Status Advance(const int64_t length) { return Append(length, 0); }
137
+
138
+ // Advance pointer, but don't allocate or zero memory
139
+ void UnsafeAdvance(const int64_t length) { size_ += length; }
140
+
141
+ // Unsafe methods don't check existing size
142
+ void UnsafeAppend(const void* data, const int64_t length) {
143
+ memcpy(data_ + size_, data, static_cast<size_t>(length));
144
+ size_ += length;
145
+ }
146
+
147
+ void UnsafeAppend(std::string_view v) {
148
+ UnsafeAppend(v.data(), static_cast<int64_t>(v.size()));
149
+ }
150
+
151
+ void UnsafeAppend(const int64_t num_copies, uint8_t value) {
152
+ memset(data_ + size_, value, static_cast<size_t>(num_copies));
153
+ size_ += num_copies;
154
+ }
155
+
156
+ /// \brief Return result of builder as a Buffer object.
157
+ ///
158
+ /// The builder is reset and can be reused afterwards.
159
+ ///
160
+ /// \param[out] out the finalized Buffer object
161
+ /// \param shrink_to_fit if the buffer size is smaller than its capacity,
162
+ /// reallocate to fit more tightly in memory. Set to false to avoid
163
+ /// a reallocation, at the expense of potentially more memory consumption.
164
+ /// \return Status
165
+ Status Finish(std::shared_ptr<Buffer>* out, bool shrink_to_fit = true) {
166
+ ARROW_RETURN_NOT_OK(Resize(size_, shrink_to_fit));
167
+ if (size_ != 0) buffer_->ZeroPadding();
168
+ *out = buffer_;
169
+ if (*out == NULLPTR) {
170
+ ARROW_ASSIGN_OR_RAISE(*out, AllocateBuffer(0, alignment_, pool_));
171
+ }
172
+ Reset();
173
+ return Status::OK();
174
+ }
175
+
176
+ Result<std::shared_ptr<Buffer>> Finish(bool shrink_to_fit = true) {
177
+ std::shared_ptr<Buffer> out;
178
+ ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit));
179
+ return out;
180
+ }
181
+
182
+ /// \brief Like Finish, but override the final buffer size
183
+ ///
184
+ /// This is useful after writing data directly into the builder memory
185
+ /// without calling the Append methods (basically, when using BufferBuilder
186
+ /// mostly for memory allocation).
187
+ Result<std::shared_ptr<Buffer>> FinishWithLength(int64_t final_length,
188
+ bool shrink_to_fit = true) {
189
+ size_ = final_length;
190
+ return Finish(shrink_to_fit);
191
+ }
192
+
193
+ void Reset() {
194
+ buffer_ = NULLPTR;
195
+ capacity_ = size_ = 0;
196
+ }
197
+
198
+ /// \brief Set size to a smaller value without modifying builder
199
+ /// contents. For reusable BufferBuilder classes
200
+ /// \param[in] position must be non-negative and less than or equal
201
+ /// to the current length()
202
+ void Rewind(int64_t position) { size_ = position; }
203
+
204
+ int64_t capacity() const { return capacity_; }
205
+ int64_t length() const { return size_; }
206
+ const uint8_t* data() const { return data_; }
207
+ uint8_t* mutable_data() { return data_; }
208
+ template <typename T>
209
+ const T* data_as() const {
210
+ return reinterpret_cast<const T*>(data_);
211
+ }
212
+ template <typename T>
213
+ T* mutable_data_as() {
214
+ return reinterpret_cast<T*>(data_);
215
+ }
216
+
217
+ private:
218
+ std::shared_ptr<ResizableBuffer> buffer_;
219
+ MemoryPool* pool_;
220
+ uint8_t* data_;
221
+ int64_t capacity_;
222
+ int64_t size_;
223
+ int64_t alignment_;
224
+ };
225
+
226
+ template <typename T, typename Enable = void>
227
+ class TypedBufferBuilder;
228
+
229
+ /// \brief A BufferBuilder for building a buffer of arithmetic elements
230
+ template <typename T>
231
+ class TypedBufferBuilder<
232
+ T, typename std::enable_if<std::is_arithmetic<T>::value ||
233
+ std::is_standard_layout<T>::value>::type> {
234
+ public:
235
+ explicit TypedBufferBuilder(MemoryPool* pool = default_memory_pool(),
236
+ int64_t alignment = kDefaultBufferAlignment)
237
+ : bytes_builder_(pool, alignment) {}
238
+
239
+ explicit TypedBufferBuilder(std::shared_ptr<ResizableBuffer> buffer,
240
+ MemoryPool* pool = default_memory_pool())
241
+ : bytes_builder_(std::move(buffer), pool) {}
242
+
243
+ explicit TypedBufferBuilder(BufferBuilder builder)
244
+ : bytes_builder_(std::move(builder)) {}
245
+
246
+ BufferBuilder* bytes_builder() { return &bytes_builder_; }
247
+
248
+ Status Append(T value) {
249
+ return bytes_builder_.Append(reinterpret_cast<uint8_t*>(&value), sizeof(T));
250
+ }
251
+
252
+ Status Append(const T* values, int64_t num_elements) {
253
+ return bytes_builder_.Append(reinterpret_cast<const uint8_t*>(values),
254
+ num_elements * sizeof(T));
255
+ }
256
+
257
+ Status Append(const int64_t num_copies, T value) {
258
+ ARROW_RETURN_NOT_OK(Reserve(num_copies + length()));
259
+ UnsafeAppend(num_copies, value);
260
+ return Status::OK();
261
+ }
262
+
263
+ void UnsafeAppend(T value) {
264
+ bytes_builder_.UnsafeAppend(reinterpret_cast<uint8_t*>(&value), sizeof(T));
265
+ }
266
+
267
+ void UnsafeAppend(const T* values, int64_t num_elements) {
268
+ bytes_builder_.UnsafeAppend(reinterpret_cast<const uint8_t*>(values),
269
+ num_elements * sizeof(T));
270
+ }
271
+
272
+ template <typename Iter>
273
+ void UnsafeAppend(Iter values_begin, Iter values_end) {
274
+ auto num_elements = static_cast<int64_t>(std::distance(values_begin, values_end));
275
+ auto data = mutable_data() + length();
276
+ bytes_builder_.UnsafeAdvance(num_elements * sizeof(T));
277
+ std::copy(values_begin, values_end, data);
278
+ }
279
+
280
+ void UnsafeAppend(const int64_t num_copies, T value) {
281
+ auto data = mutable_data() + length();
282
+ bytes_builder_.UnsafeAdvance(num_copies * sizeof(T));
283
+ std::fill(data, data + num_copies, value);
284
+ }
285
+
286
+ Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) {
287
+ return bytes_builder_.Resize(new_capacity * sizeof(T), shrink_to_fit);
288
+ }
289
+
290
+ Status Reserve(const int64_t additional_elements) {
291
+ return bytes_builder_.Reserve(additional_elements * sizeof(T));
292
+ }
293
+
294
+ Status Advance(const int64_t length) {
295
+ return bytes_builder_.Advance(length * sizeof(T));
296
+ }
297
+
298
+ Status Finish(std::shared_ptr<Buffer>* out, bool shrink_to_fit = true) {
299
+ return bytes_builder_.Finish(out, shrink_to_fit);
300
+ }
301
+
302
+ Result<std::shared_ptr<Buffer>> Finish(bool shrink_to_fit = true) {
303
+ std::shared_ptr<Buffer> out;
304
+ ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit));
305
+ return out;
306
+ }
307
+
308
+ /// \brief Like Finish, but override the final buffer size
309
+ ///
310
+ /// This is useful after writing data directly into the builder memory
311
+ /// without calling the Append methods (basically, when using TypedBufferBuilder
312
+ /// only for memory allocation).
313
+ Result<std::shared_ptr<Buffer>> FinishWithLength(int64_t final_length,
314
+ bool shrink_to_fit = true) {
315
+ return bytes_builder_.FinishWithLength(final_length * sizeof(T), shrink_to_fit);
316
+ }
317
+
318
+ void Reset() { bytes_builder_.Reset(); }
319
+
320
+ int64_t length() const { return bytes_builder_.length() / sizeof(T); }
321
+ int64_t capacity() const { return bytes_builder_.capacity() / sizeof(T); }
322
+ const T* data() const { return reinterpret_cast<const T*>(bytes_builder_.data()); }
323
+ T* mutable_data() { return reinterpret_cast<T*>(bytes_builder_.mutable_data()); }
324
+
325
+ private:
326
+ BufferBuilder bytes_builder_;
327
+ };
328
+
329
+ /// \brief A BufferBuilder for building a buffer containing a bitmap
330
+ template <>
331
+ class TypedBufferBuilder<bool> {
332
+ public:
333
+ explicit TypedBufferBuilder(MemoryPool* pool = default_memory_pool(),
334
+ int64_t alignment = kDefaultBufferAlignment)
335
+ : bytes_builder_(pool, alignment) {}
336
+
337
+ explicit TypedBufferBuilder(BufferBuilder builder)
338
+ : bytes_builder_(std::move(builder)) {}
339
+
340
+ BufferBuilder* bytes_builder() { return &bytes_builder_; }
341
+
342
+ Status Append(bool value) {
343
+ ARROW_RETURN_NOT_OK(Reserve(1));
344
+ UnsafeAppend(value);
345
+ return Status::OK();
346
+ }
347
+
348
+ Status Append(const uint8_t* valid_bytes, int64_t num_elements) {
349
+ ARROW_RETURN_NOT_OK(Reserve(num_elements));
350
+ UnsafeAppend(valid_bytes, num_elements);
351
+ return Status::OK();
352
+ }
353
+
354
+ Status Append(const int64_t num_copies, bool value) {
355
+ ARROW_RETURN_NOT_OK(Reserve(num_copies));
356
+ UnsafeAppend(num_copies, value);
357
+ return Status::OK();
358
+ }
359
+
360
+ void UnsafeAppend(bool value) {
361
+ bit_util::SetBitTo(mutable_data(), bit_length_, value);
362
+ if (!value) {
363
+ ++false_count_;
364
+ }
365
+ ++bit_length_;
366
+ }
367
+
368
+ /// \brief Append bits from an array of bytes (one value per byte)
369
+ void UnsafeAppend(const uint8_t* bytes, int64_t num_elements) {
370
+ if (num_elements == 0) return;
371
+ int64_t i = 0;
372
+ internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, [&] {
373
+ bool value = bytes[i++];
374
+ false_count_ += !value;
375
+ return value;
376
+ });
377
+ bit_length_ += num_elements;
378
+ }
379
+
380
+ /// \brief Append bits from a packed bitmap
381
+ void UnsafeAppend(const uint8_t* bitmap, int64_t offset, int64_t num_elements) {
382
+ if (num_elements == 0) return;
383
+ internal::CopyBitmap(bitmap, offset, num_elements, mutable_data(), bit_length_);
384
+ false_count_ += num_elements - internal::CountSetBits(bitmap, offset, num_elements);
385
+ bit_length_ += num_elements;
386
+ }
387
+
388
+ void UnsafeAppend(const int64_t num_copies, bool value) {
389
+ bit_util::SetBitsTo(mutable_data(), bit_length_, num_copies, value);
390
+ false_count_ += num_copies * !value;
391
+ bit_length_ += num_copies;
392
+ }
393
+
394
+ template <bool count_falses, typename Generator>
395
+ void UnsafeAppend(const int64_t num_elements, Generator&& gen) {
396
+ if (num_elements == 0) return;
397
+
398
+ if (count_falses) {
399
+ internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, [&] {
400
+ bool value = gen();
401
+ false_count_ += !value;
402
+ return value;
403
+ });
404
+ } else {
405
+ internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements,
406
+ std::forward<Generator>(gen));
407
+ }
408
+ bit_length_ += num_elements;
409
+ }
410
+
411
+ Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) {
412
+ const int64_t old_byte_capacity = bytes_builder_.capacity();
413
+ ARROW_RETURN_NOT_OK(
414
+ bytes_builder_.Resize(bit_util::BytesForBits(new_capacity), shrink_to_fit));
415
+ // Resize() may have chosen a larger capacity (e.g. for padding),
416
+ // so ask it again before calling memset().
417
+ const int64_t new_byte_capacity = bytes_builder_.capacity();
418
+ if (new_byte_capacity > old_byte_capacity) {
419
+ // The additional buffer space is 0-initialized for convenience,
420
+ // so that other methods can simply bump the length.
421
+ memset(mutable_data() + old_byte_capacity, 0,
422
+ static_cast<size_t>(new_byte_capacity - old_byte_capacity));
423
+ }
424
+ return Status::OK();
425
+ }
426
+
427
+ Status Reserve(const int64_t additional_elements) {
428
+ return Resize(
429
+ BufferBuilder::GrowByFactor(bit_length_, bit_length_ + additional_elements),
430
+ false);
431
+ }
432
+
433
+ Status Advance(const int64_t length) {
434
+ ARROW_RETURN_NOT_OK(Reserve(length));
435
+ bit_length_ += length;
436
+ false_count_ += length;
437
+ return Status::OK();
438
+ }
439
+
440
+ Status Finish(std::shared_ptr<Buffer>* out, bool shrink_to_fit = true) {
441
+ // set bytes_builder_.size_ == byte size of data
442
+ bytes_builder_.UnsafeAdvance(bit_util::BytesForBits(bit_length_) -
443
+ bytes_builder_.length());
444
+ bit_length_ = false_count_ = 0;
445
+ return bytes_builder_.Finish(out, shrink_to_fit);
446
+ }
447
+
448
+ Result<std::shared_ptr<Buffer>> Finish(bool shrink_to_fit = true) {
449
+ std::shared_ptr<Buffer> out;
450
+ ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit));
451
+ return out;
452
+ }
453
+
454
+ /// \brief Like Finish, but override the final buffer size
455
+ ///
456
+ /// This is useful after writing data directly into the builder memory
457
+ /// without calling the Append methods (basically, when using TypedBufferBuilder
458
+ /// only for memory allocation).
459
+ Result<std::shared_ptr<Buffer>> FinishWithLength(int64_t final_length,
460
+ bool shrink_to_fit = true) {
461
+ const auto final_byte_length = bit_util::BytesForBits(final_length);
462
+ bytes_builder_.UnsafeAdvance(final_byte_length - bytes_builder_.length());
463
+ bit_length_ = false_count_ = 0;
464
+ return bytes_builder_.FinishWithLength(final_byte_length, shrink_to_fit);
465
+ }
466
+
467
+ void Reset() {
468
+ bytes_builder_.Reset();
469
+ bit_length_ = false_count_ = 0;
470
+ }
471
+
472
+ int64_t length() const { return bit_length_; }
473
+ int64_t capacity() const { return bytes_builder_.capacity() * 8; }
474
+ const uint8_t* data() const { return bytes_builder_.data(); }
475
+ uint8_t* mutable_data() { return bytes_builder_.mutable_data(); }
476
+ int64_t false_count() const { return false_count_; }
477
+
478
+ private:
479
+ BufferBuilder bytes_builder_;
480
+ int64_t bit_length_ = 0;
481
+ int64_t false_count_ = 0;
482
+ };
483
+
484
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/builder.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "arrow/array/builder_adaptive.h" // IWYU pragma: keep
23
+ #include "arrow/array/builder_base.h" // IWYU pragma: keep
24
+ #include "arrow/array/builder_binary.h" // IWYU pragma: keep
25
+ #include "arrow/array/builder_decimal.h" // IWYU pragma: keep
26
+ #include "arrow/array/builder_dict.h" // IWYU pragma: keep
27
+ #include "arrow/array/builder_nested.h" // IWYU pragma: keep
28
+ #include "arrow/array/builder_primitive.h" // IWYU pragma: keep
29
+ #include "arrow/array/builder_run_end.h" // IWYU pragma: keep
30
+ #include "arrow/array/builder_time.h" // IWYU pragma: keep
31
+ #include "arrow/array/builder_union.h" // IWYU pragma: keep
32
+ #include "arrow/status.h"
33
+ #include "arrow/util/visibility.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/device.h ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <functional>
22
+ #include <memory>
23
+ #include <string>
24
+
25
+ #include "arrow/io/type_fwd.h"
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type_fwd.h"
29
+ #include "arrow/util/compare.h"
30
+ #include "arrow/util/macros.h"
31
+ #include "arrow/util/visibility.h"
32
+
33
+ namespace arrow {
34
+
35
+ /// \brief EXPERIMENTAL: Device type enum which matches up with C Data Device types
36
+ enum class DeviceAllocationType : char {
37
+ kCPU = 1,
38
+ kCUDA = 2,
39
+ kCUDA_HOST = 3,
40
+ kOPENCL = 4,
41
+ kVULKAN = 7,
42
+ kMETAL = 8,
43
+ kVPI = 9,
44
+ kROCM = 10,
45
+ kROCM_HOST = 11,
46
+ kEXT_DEV = 12,
47
+ kCUDA_MANAGED = 13,
48
+ kONEAPI = 14,
49
+ kWEBGPU = 15,
50
+ kHEXAGON = 16,
51
+ };
52
+
53
+ class MemoryManager;
54
+
55
+ /// \brief EXPERIMENTAL: Abstract interface for hardware devices
56
+ ///
57
+ /// This object represents a device with access to some memory spaces.
58
+ /// When handling a Buffer or raw memory address, it allows deciding in which
59
+ /// context the raw memory address should be interpreted
60
+ /// (e.g. CPU-accessible memory, or embedded memory on some particular GPU).
61
+ class ARROW_EXPORT Device : public std::enable_shared_from_this<Device>,
62
+ public util::EqualityComparable<Device> {
63
+ public:
64
+ virtual ~Device();
65
+
66
+ /// \brief A shorthand for this device's type.
67
+ ///
68
+ /// The returned value is different for each device class, but is the
69
+ /// same for all instances of a given class. It can be used as a replacement
70
+ /// for RTTI.
71
+ virtual const char* type_name() const = 0;
72
+
73
+ /// \brief A human-readable description of the device.
74
+ ///
75
+ /// The returned value should be detailed enough to distinguish between
76
+ /// different instances, where necessary.
77
+ virtual std::string ToString() const = 0;
78
+
79
+ /// \brief Whether this instance points to the same device as another one.
80
+ virtual bool Equals(const Device&) const = 0;
81
+
82
+ /// \brief A device ID to identify this device if there are multiple of this type.
83
+ ///
84
+ /// If there is no "device_id" equivalent (such as for the main CPU device on
85
+ /// non-numa systems) returns -1.
86
+ virtual int64_t device_id() const { return -1; }
87
+
88
+ /// \brief Whether this device is the main CPU device.
89
+ ///
90
+ /// This shorthand method is very useful when deciding whether a memory address
91
+ /// is CPU-accessible.
92
+ bool is_cpu() const { return is_cpu_; }
93
+
94
+ /// \brief Return a MemoryManager instance tied to this device
95
+ ///
96
+ /// The returned instance uses default parameters for this device type's
97
+ /// MemoryManager implementation. Some devices also allow constructing
98
+ /// MemoryManager instances with non-default parameters.
99
+ virtual std::shared_ptr<MemoryManager> default_memory_manager() = 0;
100
+
101
+ /// \brief Return the DeviceAllocationType of this device
102
+ virtual DeviceAllocationType device_type() const = 0;
103
+
104
+ class SyncEvent;
105
+
106
+ /// \brief EXPERIMENTAL: An opaque wrapper for Device-specific streams
107
+ ///
108
+ /// In essence this is just a wrapper around a void* to represent the
109
+ /// standard concept of a stream/queue on a device. Derived classes
110
+ /// should be trivially constructible from it's device-specific counterparts.
111
+ class ARROW_EXPORT Stream {
112
+ public:
113
+ using release_fn_t = std::function<void(void*)>;
114
+
115
+ virtual ~Stream() = default;
116
+
117
+ virtual const void* get_raw() const { return stream_.get(); }
118
+
119
+ /// \brief Make the stream wait on the provided event.
120
+ ///
121
+ /// Tells the stream that it should wait until the synchronization
122
+ /// event is completed without blocking the CPU.
123
+ virtual Status WaitEvent(const SyncEvent&) = 0;
124
+
125
+ /// \brief Blocks the current thread until a stream's remaining tasks are completed
126
+ virtual Status Synchronize() const = 0;
127
+
128
+ protected:
129
+ explicit Stream(void* stream, release_fn_t release_stream)
130
+ : stream_{stream, release_stream} {}
131
+
132
+ std::unique_ptr<void, release_fn_t> stream_;
133
+ };
134
+
135
+ virtual Result<std::shared_ptr<Stream>> MakeStream() { return NULLPTR; }
136
+
137
+ /// \brief Create a new device stream
138
+ ///
139
+ /// This should create the appropriate stream type for the device,
140
+ /// derived from Device::Stream to allow for stream ordered events
141
+ /// and memory allocations.
142
+ virtual Result<std::shared_ptr<Stream>> MakeStream(unsigned int flags) {
143
+ return NULLPTR;
144
+ }
145
+
146
+ /// @brief Wrap an existing device stream alongside a release function
147
+ ///
148
+ /// @param device_stream a pointer to the stream to wrap
149
+ /// @param release_fn a function to call during destruction, `nullptr` or
150
+ /// a no-op function can be passed to indicate ownership is maintained
151
+ /// externally
152
+ virtual Result<std::shared_ptr<Stream>> WrapStream(void* device_stream,
153
+ Stream::release_fn_t release_fn) {
154
+ return NULLPTR;
155
+ }
156
+
157
+ /// \brief EXPERIMENTAL: An object that provides event/stream sync primitives
158
+ class ARROW_EXPORT SyncEvent {
159
+ public:
160
+ using release_fn_t = std::function<void(void*)>;
161
+
162
+ virtual ~SyncEvent() = default;
163
+
164
+ void* get_raw() { return sync_event_.get(); }
165
+
166
+ /// @brief Block until sync event is completed.
167
+ virtual Status Wait() = 0;
168
+
169
+ /// @brief Record the wrapped event on the stream so it triggers
170
+ /// the event when the stream gets to that point in its queue.
171
+ virtual Status Record(const Stream&) = 0;
172
+
173
+ protected:
174
+ /// If creating this with a passed in event, the caller must ensure
175
+ /// that the event lives until clear_event is called on this as it
176
+ /// won't own it.
177
+ explicit SyncEvent(void* sync_event, release_fn_t release_sync_event)
178
+ : sync_event_{sync_event, release_sync_event} {}
179
+
180
+ std::unique_ptr<void, release_fn_t> sync_event_;
181
+ };
182
+
183
+ protected:
184
+ ARROW_DISALLOW_COPY_AND_ASSIGN(Device);
185
+ explicit Device(bool is_cpu = false) : is_cpu_(is_cpu) {}
186
+
187
+ bool is_cpu_;
188
+ };
189
+
190
+ /// \brief EXPERIMENTAL: An object that provides memory management primitives
191
+ ///
192
+ /// A MemoryManager is always tied to a particular Device instance.
193
+ /// It can also have additional parameters (such as a MemoryPool to
194
+ /// allocate CPU memory).
195
+ class ARROW_EXPORT MemoryManager : public std::enable_shared_from_this<MemoryManager> {
196
+ public:
197
+ virtual ~MemoryManager();
198
+
199
+ /// \brief The device this MemoryManager is tied to
200
+ const std::shared_ptr<Device>& device() const { return device_; }
201
+
202
+ /// \brief Whether this MemoryManager is tied to the main CPU device.
203
+ ///
204
+ /// This shorthand method is very useful when deciding whether a memory address
205
+ /// is CPU-accessible.
206
+ bool is_cpu() const { return device_->is_cpu(); }
207
+
208
+ /// \brief Create a RandomAccessFile to read a particular buffer.
209
+ ///
210
+ /// The given buffer must be tied to this MemoryManager.
211
+ ///
212
+ /// See also the Buffer::GetReader shorthand.
213
+ virtual Result<std::shared_ptr<io::RandomAccessFile>> GetBufferReader(
214
+ std::shared_ptr<Buffer> buf) = 0;
215
+
216
+ /// \brief Create a OutputStream to write to a particular buffer.
217
+ ///
218
+ /// The given buffer must be mutable and tied to this MemoryManager.
219
+ /// The returned stream object writes into the buffer's underlying memory
220
+ /// (but it won't resize it).
221
+ ///
222
+ /// See also the Buffer::GetWriter shorthand.
223
+ virtual Result<std::shared_ptr<io::OutputStream>> GetBufferWriter(
224
+ std::shared_ptr<Buffer> buf) = 0;
225
+
226
+ /// \brief Allocate a (mutable) Buffer
227
+ ///
228
+ /// The buffer will be allocated in the device's memory.
229
+ virtual Result<std::unique_ptr<Buffer>> AllocateBuffer(int64_t size) = 0;
230
+
231
+ /// \brief Copy a Buffer to a destination MemoryManager
232
+ ///
233
+ /// See also the Buffer::Copy shorthand.
234
+ static Result<std::shared_ptr<Buffer>> CopyBuffer(
235
+ const std::shared_ptr<Buffer>& source, const std::shared_ptr<MemoryManager>& to);
236
+
237
+ /// \brief Copy a non-owned Buffer to a destination MemoryManager
238
+ ///
239
+ /// This is useful for cases where the source memory area is externally managed
240
+ /// (its lifetime not tied to the source Buffer), otherwise please use CopyBuffer().
241
+ static Result<std::unique_ptr<Buffer>> CopyNonOwned(
242
+ const Buffer& source, const std::shared_ptr<MemoryManager>& to);
243
+
244
+ /// \brief Make a no-copy Buffer view in a destination MemoryManager
245
+ ///
246
+ /// See also the Buffer::View shorthand.
247
+ static Result<std::shared_ptr<Buffer>> ViewBuffer(
248
+ const std::shared_ptr<Buffer>& source, const std::shared_ptr<MemoryManager>& to);
249
+
250
+ /// \brief Create a new SyncEvent.
251
+ ///
252
+ /// This version should construct the appropriate event for the device and
253
+ /// provide the unique_ptr with the correct deleter for the event type.
254
+ /// If the device does not require or work with any synchronization, it is
255
+ /// allowed for it to return a nullptr.
256
+ virtual Result<std::shared_ptr<Device::SyncEvent>> MakeDeviceSyncEvent();
257
+
258
+ /// \brief Wrap an event into a SyncEvent.
259
+ ///
260
+ /// @param sync_event passed in sync_event (should be a pointer to the appropriate type)
261
+ /// @param release_sync_event destructor to free sync_event. `nullptr` may be
262
+ /// passed to indicate that no destruction/freeing is necessary
263
+ virtual Result<std::shared_ptr<Device::SyncEvent>> WrapDeviceSyncEvent(
264
+ void* sync_event, Device::SyncEvent::release_fn_t release_sync_event);
265
+
266
+ protected:
267
+ ARROW_DISALLOW_COPY_AND_ASSIGN(MemoryManager);
268
+
269
+ explicit MemoryManager(const std::shared_ptr<Device>& device) : device_(device) {}
270
+
271
+ // Default implementations always return nullptr, should be overridden
272
+ // by subclasses that support data transfer.
273
+ // (returning nullptr means unsupported copy / view)
274
+ // In CopyBufferFrom and ViewBufferFrom, the `from` parameter is guaranteed to
275
+ // be equal to `buf->memory_manager()`.
276
+ virtual Result<std::shared_ptr<Buffer>> CopyBufferFrom(
277
+ const std::shared_ptr<Buffer>& buf, const std::shared_ptr<MemoryManager>& from);
278
+ virtual Result<std::shared_ptr<Buffer>> CopyBufferTo(
279
+ const std::shared_ptr<Buffer>& buf, const std::shared_ptr<MemoryManager>& to);
280
+ virtual Result<std::unique_ptr<Buffer>> CopyNonOwnedFrom(
281
+ const Buffer& buf, const std::shared_ptr<MemoryManager>& from);
282
+ virtual Result<std::unique_ptr<Buffer>> CopyNonOwnedTo(
283
+ const Buffer& buf, const std::shared_ptr<MemoryManager>& to);
284
+ virtual Result<std::shared_ptr<Buffer>> ViewBufferFrom(
285
+ const std::shared_ptr<Buffer>& buf, const std::shared_ptr<MemoryManager>& from);
286
+ virtual Result<std::shared_ptr<Buffer>> ViewBufferTo(
287
+ const std::shared_ptr<Buffer>& buf, const std::shared_ptr<MemoryManager>& to);
288
+
289
+ std::shared_ptr<Device> device_;
290
+ };
291
+
292
+ // ----------------------------------------------------------------------
293
+ // CPU backend implementation
294
+
295
+ class ARROW_EXPORT CPUDevice : public Device {
296
+ public:
297
+ const char* type_name() const override;
298
+ std::string ToString() const override;
299
+ bool Equals(const Device&) const override;
300
+ DeviceAllocationType device_type() const override { return DeviceAllocationType::kCPU; }
301
+
302
+ std::shared_ptr<MemoryManager> default_memory_manager() override;
303
+
304
+ /// \brief Return the global CPUDevice instance
305
+ static std::shared_ptr<Device> Instance();
306
+
307
+ /// \brief Create a MemoryManager
308
+ ///
309
+ /// The returned MemoryManager will use the given MemoryPool for allocations.
310
+ static std::shared_ptr<MemoryManager> memory_manager(MemoryPool* pool);
311
+
312
+ protected:
313
+ CPUDevice() : Device(true) {}
314
+ };
315
+
316
+ class ARROW_EXPORT CPUMemoryManager : public MemoryManager {
317
+ public:
318
+ Result<std::shared_ptr<io::RandomAccessFile>> GetBufferReader(
319
+ std::shared_ptr<Buffer> buf) override;
320
+ Result<std::shared_ptr<io::OutputStream>> GetBufferWriter(
321
+ std::shared_ptr<Buffer> buf) override;
322
+
323
+ Result<std::unique_ptr<Buffer>> AllocateBuffer(int64_t size) override;
324
+
325
+ /// \brief Return the MemoryPool associated with this MemoryManager.
326
+ MemoryPool* pool() const { return pool_; }
327
+
328
+ protected:
329
+ CPUMemoryManager(const std::shared_ptr<Device>& device, MemoryPool* pool)
330
+ : MemoryManager(device), pool_(pool) {}
331
+
332
+ static std::shared_ptr<MemoryManager> Make(const std::shared_ptr<Device>& device,
333
+ MemoryPool* pool = default_memory_pool());
334
+
335
+ Result<std::shared_ptr<Buffer>> CopyBufferFrom(
336
+ const std::shared_ptr<Buffer>& buf,
337
+ const std::shared_ptr<MemoryManager>& from) override;
338
+ Result<std::shared_ptr<Buffer>> CopyBufferTo(
339
+ const std::shared_ptr<Buffer>& buf,
340
+ const std::shared_ptr<MemoryManager>& to) override;
341
+ Result<std::unique_ptr<Buffer>> CopyNonOwnedFrom(
342
+ const Buffer& buf, const std::shared_ptr<MemoryManager>& from) override;
343
+ Result<std::unique_ptr<Buffer>> CopyNonOwnedTo(
344
+ const Buffer& buf, const std::shared_ptr<MemoryManager>& to) override;
345
+ Result<std::shared_ptr<Buffer>> ViewBufferFrom(
346
+ const std::shared_ptr<Buffer>& buf,
347
+ const std::shared_ptr<MemoryManager>& from) override;
348
+ Result<std::shared_ptr<Buffer>> ViewBufferTo(
349
+ const std::shared_ptr<Buffer>& buf,
350
+ const std::shared_ptr<MemoryManager>& to) override;
351
+
352
+ MemoryPool* pool_;
353
+
354
+ friend std::shared_ptr<MemoryManager> CPUDevice::memory_manager(MemoryPool* pool);
355
+ ARROW_FRIEND_EXPORT friend std::shared_ptr<MemoryManager> default_cpu_memory_manager();
356
+ };
357
+
358
+ /// \brief Return the default CPU MemoryManager instance
359
+ ///
360
+ /// The returned singleton instance uses the default MemoryPool.
361
+ /// This function is a faster spelling of
362
+ /// `CPUDevice::Instance()->default_memory_manager()`.
363
+ ARROW_EXPORT
364
+ std::shared_ptr<MemoryManager> default_cpu_memory_manager();
365
+
366
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool.h ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cstdint>
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <string>
25
+
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type_fwd.h"
29
+ #include "arrow/util/visibility.h"
30
+
31
+ namespace arrow {
32
+
33
+ namespace internal {
34
+
35
+ ///////////////////////////////////////////////////////////////////////
36
+ // Helper tracking memory statistics
37
+
38
+ class MemoryPoolStats {
39
+ public:
40
+ MemoryPoolStats() : bytes_allocated_(0), max_memory_(0) {}
41
+
42
+ int64_t max_memory() const { return max_memory_.load(); }
43
+
44
+ int64_t bytes_allocated() const { return bytes_allocated_.load(); }
45
+
46
+ int64_t total_bytes_allocated() const { return total_allocated_bytes_.load(); }
47
+
48
+ int64_t num_allocations() const { return num_allocs_.load(); }
49
+
50
+ inline void UpdateAllocatedBytes(int64_t diff, bool is_free = false) {
51
+ auto allocated = bytes_allocated_.fetch_add(diff) + diff;
52
+ // "maximum" allocated memory is ill-defined in multi-threaded code,
53
+ // so don't try to be too rigorous here
54
+ if (diff > 0 && allocated > max_memory_) {
55
+ max_memory_ = allocated;
56
+ }
57
+
58
+ // Reallocations might just expand/contract the allocation in place or might
59
+ // copy to a new location. We can't really know, so we just represent the
60
+ // optimistic case.
61
+ if (diff > 0) {
62
+ total_allocated_bytes_ += diff;
63
+ }
64
+
65
+ // We count any reallocation as a allocation.
66
+ if (!is_free) {
67
+ num_allocs_ += 1;
68
+ }
69
+ }
70
+
71
+ protected:
72
+ std::atomic<int64_t> bytes_allocated_ = 0;
73
+ std::atomic<int64_t> max_memory_ = 0;
74
+ std::atomic<int64_t> total_allocated_bytes_ = 0;
75
+ std::atomic<int64_t> num_allocs_ = 0;
76
+ };
77
+
78
+ } // namespace internal
79
+
80
+ /// Base class for memory allocation on the CPU.
81
+ ///
82
+ /// Besides tracking the number of allocated bytes, the allocator also should
83
+ /// take care of the required 64-byte alignment.
84
+ class ARROW_EXPORT MemoryPool {
85
+ public:
86
+ virtual ~MemoryPool() = default;
87
+
88
+ /// \brief EXPERIMENTAL. Create a new instance of the default MemoryPool
89
+ static std::unique_ptr<MemoryPool> CreateDefault();
90
+
91
+ /// Allocate a new memory region of at least size bytes.
92
+ ///
93
+ /// The allocated region shall be 64-byte aligned.
94
+ Status Allocate(int64_t size, uint8_t** out) {
95
+ return Allocate(size, kDefaultBufferAlignment, out);
96
+ }
97
+
98
+ /// Allocate a new memory region of at least size bytes aligned to alignment.
99
+ virtual Status Allocate(int64_t size, int64_t alignment, uint8_t** out) = 0;
100
+
101
+ /// Resize an already allocated memory section.
102
+ ///
103
+ /// As by default most default allocators on a platform don't support aligned
104
+ /// reallocation, this function can involve a copy of the underlying data.
105
+ virtual Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
106
+ uint8_t** ptr) = 0;
107
+ Status Reallocate(int64_t old_size, int64_t new_size, uint8_t** ptr) {
108
+ return Reallocate(old_size, new_size, kDefaultBufferAlignment, ptr);
109
+ }
110
+
111
+ /// Free an allocated region.
112
+ ///
113
+ /// @param buffer Pointer to the start of the allocated memory region
114
+ /// @param size Allocated size located at buffer. An allocator implementation
115
+ /// may use this for tracking the amount of allocated bytes as well as for
116
+ /// faster deallocation if supported by its backend.
117
+ /// @param alignment The alignment of the allocation. Defaults to 64 bytes.
118
+ virtual void Free(uint8_t* buffer, int64_t size, int64_t alignment) = 0;
119
+ void Free(uint8_t* buffer, int64_t size) {
120
+ Free(buffer, size, kDefaultBufferAlignment);
121
+ }
122
+
123
+ /// Return unused memory to the OS
124
+ ///
125
+ /// Only applies to allocators that hold onto unused memory. This will be
126
+ /// best effort, a memory pool may not implement this feature or may be
127
+ /// unable to fulfill the request due to fragmentation.
128
+ virtual void ReleaseUnused() {}
129
+
130
+ /// The number of bytes that were allocated and not yet free'd through
131
+ /// this allocator.
132
+ virtual int64_t bytes_allocated() const = 0;
133
+
134
+ /// Return peak memory allocation in this memory pool
135
+ ///
136
+ /// \return Maximum bytes allocated. If not known (or not implemented),
137
+ /// returns -1
138
+ virtual int64_t max_memory() const;
139
+
140
+ /// The number of bytes that were allocated.
141
+ virtual int64_t total_bytes_allocated() const = 0;
142
+
143
+ /// The number of allocations or reallocations that were requested.
144
+ virtual int64_t num_allocations() const = 0;
145
+
146
+ /// The name of the backend used by this MemoryPool (e.g. "system" or "jemalloc").
147
+ virtual std::string backend_name() const = 0;
148
+
149
+ protected:
150
+ MemoryPool() = default;
151
+ };
152
+
153
+ class ARROW_EXPORT LoggingMemoryPool : public MemoryPool {
154
+ public:
155
+ explicit LoggingMemoryPool(MemoryPool* pool);
156
+ ~LoggingMemoryPool() override = default;
157
+
158
+ using MemoryPool::Allocate;
159
+ using MemoryPool::Free;
160
+ using MemoryPool::Reallocate;
161
+
162
+ Status Allocate(int64_t size, int64_t alignment, uint8_t** out) override;
163
+ Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
164
+ uint8_t** ptr) override;
165
+ void Free(uint8_t* buffer, int64_t size, int64_t alignment) override;
166
+
167
+ int64_t bytes_allocated() const override;
168
+
169
+ int64_t max_memory() const override;
170
+
171
+ int64_t total_bytes_allocated() const override;
172
+
173
+ int64_t num_allocations() const override;
174
+
175
+ std::string backend_name() const override;
176
+
177
+ private:
178
+ MemoryPool* pool_;
179
+ };
180
+
181
+ /// Derived class for memory allocation.
182
+ ///
183
+ /// Tracks the number of bytes and maximum memory allocated through its direct
184
+ /// calls. Actual allocation is delegated to MemoryPool class.
185
+ class ARROW_EXPORT ProxyMemoryPool : public MemoryPool {
186
+ public:
187
+ explicit ProxyMemoryPool(MemoryPool* pool);
188
+ ~ProxyMemoryPool() override;
189
+
190
+ using MemoryPool::Allocate;
191
+ using MemoryPool::Free;
192
+ using MemoryPool::Reallocate;
193
+
194
+ Status Allocate(int64_t size, int64_t alignment, uint8_t** out) override;
195
+ Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
196
+ uint8_t** ptr) override;
197
+ void Free(uint8_t* buffer, int64_t size, int64_t alignment) override;
198
+
199
+ int64_t bytes_allocated() const override;
200
+
201
+ int64_t max_memory() const override;
202
+
203
+ int64_t total_bytes_allocated() const override;
204
+
205
+ int64_t num_allocations() const override;
206
+
207
+ std::string backend_name() const override;
208
+
209
+ private:
210
+ class ProxyMemoryPoolImpl;
211
+ std::unique_ptr<ProxyMemoryPoolImpl> impl_;
212
+ };
213
+
214
+ /// \brief Return a process-wide memory pool based on the system allocator.
215
+ ARROW_EXPORT MemoryPool* system_memory_pool();
216
+
217
+ /// \brief Return a process-wide memory pool based on jemalloc.
218
+ ///
219
+ /// May return NotImplemented if jemalloc is not available.
220
+ ARROW_EXPORT Status jemalloc_memory_pool(MemoryPool** out);
221
+
222
+ /// \brief Set jemalloc memory page purging behavior for future-created arenas
223
+ /// to the indicated number of milliseconds. See dirty_decay_ms and
224
+ /// muzzy_decay_ms options in jemalloc for a description of what these do. The
225
+ /// default is configured to 1000 (1 second) which releases memory more
226
+ /// aggressively to the operating system than the jemalloc default of 10
227
+ /// seconds. If you set the value to 0, dirty / muzzy pages will be released
228
+ /// immediately rather than with a time decay, but this may reduce application
229
+ /// performance.
230
+ ARROW_EXPORT
231
+ Status jemalloc_set_decay_ms(int ms);
232
+
233
+ /// \brief Get basic statistics from jemalloc's mallctl.
234
+ /// See the MALLCTL NAMESPACE section in jemalloc project documentation for
235
+ /// available stats.
236
+ ARROW_EXPORT
237
+ Result<int64_t> jemalloc_get_stat(const char* name);
238
+
239
+ /// \brief Reset the counter for peak bytes allocated in the calling thread to zero.
240
+ /// This affects subsequent calls to thread.peak.read, but not the values returned by
241
+ /// thread.allocated or thread.deallocated.
242
+ ARROW_EXPORT
243
+ Status jemalloc_peak_reset();
244
+
245
+ /// \brief Print summary statistics in human-readable form to stderr.
246
+ /// See malloc_stats_print documentation in jemalloc project documentation for
247
+ /// available opt flags.
248
+ ARROW_EXPORT
249
+ Status jemalloc_stats_print(const char* opts = "");
250
+
251
+ /// \brief Print summary statistics in human-readable form using a callback
252
+ /// See malloc_stats_print documentation in jemalloc project documentation for
253
+ /// available opt flags.
254
+ ARROW_EXPORT
255
+ Status jemalloc_stats_print(std::function<void(const char*)> write_cb,
256
+ const char* opts = "");
257
+
258
+ /// \brief Get summary statistics in human-readable form.
259
+ /// See malloc_stats_print documentation in jemalloc project documentation for
260
+ /// available opt flags.
261
+ ARROW_EXPORT
262
+ Result<std::string> jemalloc_stats_string(const char* opts = "");
263
+
264
+ /// \brief Return a process-wide memory pool based on mimalloc.
265
+ ///
266
+ /// May return NotImplemented if mimalloc is not available.
267
+ ARROW_EXPORT Status mimalloc_memory_pool(MemoryPool** out);
268
+
269
+ /// \brief Return the names of the backends supported by this Arrow build.
270
+ ARROW_EXPORT std::vector<std::string> SupportedMemoryBackendNames();
271
+
272
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/record_batch.h ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/compare.h"
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type_fwd.h"
29
+ #include "arrow/util/iterator.h"
30
+ #include "arrow/util/macros.h"
31
+ #include "arrow/util/visibility.h"
32
+
33
+ namespace arrow {
34
+
35
+ /// \class RecordBatch
36
+ /// \brief Collection of equal-length arrays matching a particular Schema
37
+ ///
38
+ /// A record batch is table-like data structure that is semantically a sequence
39
+ /// of fields, each a contiguous Arrow array
40
+ class ARROW_EXPORT RecordBatch {
41
+ public:
42
+ virtual ~RecordBatch() = default;
43
+
44
+ /// \param[in] schema The record batch schema
45
+ /// \param[in] num_rows length of fields in the record batch. Each array
46
+ /// should have the same length as num_rows
47
+ /// \param[in] columns the record batch fields as vector of arrays
48
+ static std::shared_ptr<RecordBatch> Make(std::shared_ptr<Schema> schema,
49
+ int64_t num_rows,
50
+ std::vector<std::shared_ptr<Array>> columns);
51
+
52
+ /// \brief Construct record batch from vector of internal data structures
53
+ /// \since 0.5.0
54
+ ///
55
+ /// This class is intended for internal use, or advanced users.
56
+ ///
57
+ /// \param schema the record batch schema
58
+ /// \param num_rows the number of semantic rows in the record batch. This
59
+ /// should be equal to the length of each field
60
+ /// \param columns the data for the batch's columns
61
+ static std::shared_ptr<RecordBatch> Make(
62
+ std::shared_ptr<Schema> schema, int64_t num_rows,
63
+ std::vector<std::shared_ptr<ArrayData>> columns);
64
+
65
+ /// \brief Create an empty RecordBatch of a given schema
66
+ ///
67
+ /// The output RecordBatch will be created with DataTypes from
68
+ /// the given schema.
69
+ ///
70
+ /// \param[in] schema the schema of the empty RecordBatch
71
+ /// \param[in] pool the memory pool to allocate memory from
72
+ /// \return the resulting RecordBatch
73
+ static Result<std::shared_ptr<RecordBatch>> MakeEmpty(
74
+ std::shared_ptr<Schema> schema, MemoryPool* pool = default_memory_pool());
75
+
76
+ /// \brief Convert record batch to struct array
77
+ ///
78
+ /// Create a struct array whose child arrays are the record batch's columns.
79
+ /// Note that the record batch's top-level field metadata cannot be reflected
80
+ /// in the resulting struct array.
81
+ Result<std::shared_ptr<StructArray>> ToStructArray() const;
82
+
83
+ /// \brief Construct record batch from struct array
84
+ ///
85
+ /// This constructs a record batch using the child arrays of the given
86
+ /// array, which must be a struct array.
87
+ ///
88
+ /// \param[in] array the source array, must be a StructArray
89
+ /// \param[in] pool the memory pool to allocate new validity bitmaps
90
+ ///
91
+ /// This operation will usually be zero-copy. However, if the struct array has an
92
+ /// offset or a validity bitmap then these will need to be pushed into the child arrays.
93
+ /// Pushing the offset is zero-copy but pushing the validity bitmap is not.
94
+ static Result<std::shared_ptr<RecordBatch>> FromStructArray(
95
+ const std::shared_ptr<Array>& array, MemoryPool* pool = default_memory_pool());
96
+
97
+ /// \brief Determine if two record batches are exactly equal
98
+ ///
99
+ /// \param[in] other the RecordBatch to compare with
100
+ /// \param[in] check_metadata if true, check that Schema metadata is the same
101
+ /// \param[in] opts the options for equality comparisons
102
+ /// \return true if batches are equal
103
+ bool Equals(const RecordBatch& other, bool check_metadata = false,
104
+ const EqualOptions& opts = EqualOptions::Defaults()) const;
105
+
106
+ /// \brief Determine if two record batches are approximately equal
107
+ ///
108
+ /// \param[in] other the RecordBatch to compare with
109
+ /// \param[in] opts the options for equality comparisons
110
+ /// \return true if batches are approximately equal
111
+ bool ApproxEquals(const RecordBatch& other,
112
+ const EqualOptions& opts = EqualOptions::Defaults()) const;
113
+
114
+ /// \return the record batch's schema
115
+ const std::shared_ptr<Schema>& schema() const { return schema_; }
116
+
117
+ /// \brief Replace the schema with another schema with the same types, but potentially
118
+ /// different field names and/or metadata.
119
+ Result<std::shared_ptr<RecordBatch>> ReplaceSchema(
120
+ std::shared_ptr<Schema> schema) const;
121
+
122
+ /// \brief Retrieve all columns at once
123
+ virtual const std::vector<std::shared_ptr<Array>>& columns() const = 0;
124
+
125
+ /// \brief Retrieve an array from the record batch
126
+ /// \param[in] i field index, does not boundscheck
127
+ /// \return an Array object
128
+ virtual std::shared_ptr<Array> column(int i) const = 0;
129
+
130
+ /// \brief Retrieve an array from the record batch
131
+ /// \param[in] name field name
132
+ /// \return an Array or null if no field was found
133
+ std::shared_ptr<Array> GetColumnByName(const std::string& name) const;
134
+
135
+ /// \brief Retrieve an array's internal data from the record batch
136
+ /// \param[in] i field index, does not boundscheck
137
+ /// \return an internal ArrayData object
138
+ virtual std::shared_ptr<ArrayData> column_data(int i) const = 0;
139
+
140
+ /// \brief Retrieve all arrays' internal data from the record batch.
141
+ virtual const ArrayDataVector& column_data() const = 0;
142
+
143
+ /// \brief Add column to the record batch, producing a new RecordBatch
144
+ ///
145
+ /// \param[in] i field index, which will be boundschecked
146
+ /// \param[in] field field to be added
147
+ /// \param[in] column column to be added
148
+ virtual Result<std::shared_ptr<RecordBatch>> AddColumn(
149
+ int i, const std::shared_ptr<Field>& field,
150
+ const std::shared_ptr<Array>& column) const = 0;
151
+
152
+ /// \brief Add new nullable column to the record batch, producing a new
153
+ /// RecordBatch.
154
+ ///
155
+ /// For non-nullable columns, use the Field-based version of this method.
156
+ ///
157
+ /// \param[in] i field index, which will be boundschecked
158
+ /// \param[in] field_name name of field to be added
159
+ /// \param[in] column column to be added
160
+ virtual Result<std::shared_ptr<RecordBatch>> AddColumn(
161
+ int i, std::string field_name, const std::shared_ptr<Array>& column) const;
162
+
163
+ /// \brief Replace a column in the record batch, producing a new RecordBatch
164
+ ///
165
+ /// \param[in] i field index, does boundscheck
166
+ /// \param[in] field field to be replaced
167
+ /// \param[in] column column to be replaced
168
+ virtual Result<std::shared_ptr<RecordBatch>> SetColumn(
169
+ int i, const std::shared_ptr<Field>& field,
170
+ const std::shared_ptr<Array>& column) const = 0;
171
+
172
+ /// \brief Remove column from the record batch, producing a new RecordBatch
173
+ ///
174
+ /// \param[in] i field index, does boundscheck
175
+ virtual Result<std::shared_ptr<RecordBatch>> RemoveColumn(int i) const = 0;
176
+
177
+ virtual std::shared_ptr<RecordBatch> ReplaceSchemaMetadata(
178
+ const std::shared_ptr<const KeyValueMetadata>& metadata) const = 0;
179
+
180
+ /// \brief Name in i-th column
181
+ const std::string& column_name(int i) const;
182
+
183
+ /// \return the number of columns in the table
184
+ int num_columns() const;
185
+
186
+ /// \return the number of rows (the corresponding length of each column)
187
+ int64_t num_rows() const { return num_rows_; }
188
+
189
+ /// \brief Slice each of the arrays in the record batch
190
+ /// \param[in] offset the starting offset to slice, through end of batch
191
+ /// \return new record batch
192
+ virtual std::shared_ptr<RecordBatch> Slice(int64_t offset) const;
193
+
194
+ /// \brief Slice each of the arrays in the record batch
195
+ /// \param[in] offset the starting offset to slice
196
+ /// \param[in] length the number of elements to slice from offset
197
+ /// \return new record batch
198
+ virtual std::shared_ptr<RecordBatch> Slice(int64_t offset, int64_t length) const = 0;
199
+
200
+ /// \return PrettyPrint representation suitable for debugging
201
+ std::string ToString() const;
202
+
203
+ /// \brief Return new record batch with specified columns
204
+ Result<std::shared_ptr<RecordBatch>> SelectColumns(
205
+ const std::vector<int>& indices) const;
206
+
207
+ /// \brief Perform cheap validation checks to determine obvious inconsistencies
208
+ /// within the record batch's schema and internal data.
209
+ ///
210
+ /// This is O(k) where k is the total number of fields and array descendents.
211
+ ///
212
+ /// \return Status
213
+ virtual Status Validate() const;
214
+
215
+ /// \brief Perform extensive validation checks to determine inconsistencies
216
+ /// within the record batch's schema and internal data.
217
+ ///
218
+ /// This is potentially O(k*n) where n is the number of rows.
219
+ ///
220
+ /// \return Status
221
+ virtual Status ValidateFull() const;
222
+
223
+ protected:
224
+ RecordBatch(const std::shared_ptr<Schema>& schema, int64_t num_rows);
225
+
226
+ std::shared_ptr<Schema> schema_;
227
+ int64_t num_rows_;
228
+
229
+ private:
230
+ ARROW_DISALLOW_COPY_AND_ASSIGN(RecordBatch);
231
+ };
232
+
233
+ struct ARROW_EXPORT RecordBatchWithMetadata {
234
+ std::shared_ptr<RecordBatch> batch;
235
+ std::shared_ptr<KeyValueMetadata> custom_metadata;
236
+ };
237
+
238
+ /// \brief Abstract interface for reading stream of record batches
239
+ class ARROW_EXPORT RecordBatchReader {
240
+ public:
241
+ using ValueType = std::shared_ptr<RecordBatch>;
242
+
243
+ virtual ~RecordBatchReader();
244
+
245
+ /// \return the shared schema of the record batches in the stream
246
+ virtual std::shared_ptr<Schema> schema() const = 0;
247
+
248
+ /// \brief Read the next record batch in the stream. Return null for batch
249
+ /// when reaching end of stream
250
+ ///
251
+ /// \param[out] batch the next loaded batch, null at end of stream
252
+ /// \return Status
253
+ virtual Status ReadNext(std::shared_ptr<RecordBatch>* batch) = 0;
254
+
255
+ virtual Result<RecordBatchWithMetadata> ReadNext() {
256
+ return Status::NotImplemented("ReadNext with custom metadata");
257
+ }
258
+
259
+ /// \brief Iterator interface
260
+ Result<std::shared_ptr<RecordBatch>> Next() {
261
+ std::shared_ptr<RecordBatch> batch;
262
+ ARROW_RETURN_NOT_OK(ReadNext(&batch));
263
+ return batch;
264
+ }
265
+
266
+ /// \brief finalize reader
267
+ virtual Status Close() { return Status::OK(); }
268
+
269
+ class RecordBatchReaderIterator {
270
+ public:
271
+ using iterator_category = std::input_iterator_tag;
272
+ using difference_type = std::ptrdiff_t;
273
+ using value_type = std::shared_ptr<RecordBatch>;
274
+ using pointer = value_type const*;
275
+ using reference = value_type const&;
276
+
277
+ RecordBatchReaderIterator() : batch_(RecordBatchEnd()), reader_(NULLPTR) {}
278
+
279
+ explicit RecordBatchReaderIterator(RecordBatchReader* reader)
280
+ : batch_(RecordBatchEnd()), reader_(reader) {
281
+ Next();
282
+ }
283
+
284
+ bool operator==(const RecordBatchReaderIterator& other) const {
285
+ return batch_ == other.batch_;
286
+ }
287
+
288
+ bool operator!=(const RecordBatchReaderIterator& other) const {
289
+ return !(*this == other);
290
+ }
291
+
292
+ Result<std::shared_ptr<RecordBatch>> operator*() {
293
+ ARROW_RETURN_NOT_OK(batch_.status());
294
+
295
+ return batch_;
296
+ }
297
+
298
+ RecordBatchReaderIterator& operator++() {
299
+ Next();
300
+ return *this;
301
+ }
302
+
303
+ RecordBatchReaderIterator operator++(int) {
304
+ RecordBatchReaderIterator tmp(*this);
305
+ Next();
306
+ return tmp;
307
+ }
308
+
309
+ private:
310
+ std::shared_ptr<RecordBatch> RecordBatchEnd() {
311
+ return std::shared_ptr<RecordBatch>(NULLPTR);
312
+ }
313
+
314
+ void Next() {
315
+ if (reader_ == NULLPTR) {
316
+ batch_ = RecordBatchEnd();
317
+ return;
318
+ }
319
+ batch_ = reader_->Next();
320
+ }
321
+
322
+ Result<std::shared_ptr<RecordBatch>> batch_;
323
+ RecordBatchReader* reader_;
324
+ };
325
+ /// \brief Return an iterator to the first record batch in the stream
326
+ RecordBatchReaderIterator begin() { return RecordBatchReaderIterator(this); }
327
+
328
+ /// \brief Return an iterator to the end of the stream
329
+ RecordBatchReaderIterator end() { return RecordBatchReaderIterator(); }
330
+
331
+ /// \brief Consume entire stream as a vector of record batches
332
+ Result<RecordBatchVector> ToRecordBatches();
333
+
334
+ /// \brief Read all batches and concatenate as arrow::Table
335
+ Result<std::shared_ptr<Table>> ToTable();
336
+
337
+ /// \brief Create a RecordBatchReader from a vector of RecordBatch.
338
+ ///
339
+ /// \param[in] batches the vector of RecordBatch to read from
340
+ /// \param[in] schema schema to conform to. Will be inferred from the first
341
+ /// element if not provided.
342
+ static Result<std::shared_ptr<RecordBatchReader>> Make(
343
+ RecordBatchVector batches, std::shared_ptr<Schema> schema = NULLPTR);
344
+
345
+ /// \brief Create a RecordBatchReader from an Iterator of RecordBatch.
346
+ ///
347
+ /// \param[in] batches an iterator of RecordBatch to read from.
348
+ /// \param[in] schema schema that each record batch in iterator will conform to.
349
+ static Result<std::shared_ptr<RecordBatchReader>> MakeFromIterator(
350
+ Iterator<std::shared_ptr<RecordBatch>> batches, std::shared_ptr<Schema> schema);
351
+ };
352
+
353
+ /// \brief Concatenate record batches
354
+ ///
355
+ /// The columns of the new batch are formed by concatenate the same columns of each input
356
+ /// batch. Concatenate multiple batches into a new batch requires that the schema must be
357
+ /// consistent. It supports merging batches without columns (only length, scenarios such
358
+ /// as count(*)).
359
+ ///
360
+ /// \param[in] batches a vector of record batches to be concatenated
361
+ /// \param[in] pool memory to store the result will be allocated from this memory pool
362
+ /// \return the concatenated record batch
363
+ ARROW_EXPORT
364
+ Result<std::shared_ptr<RecordBatch>> ConcatenateRecordBatches(
365
+ const RecordBatchVector& batches, MemoryPool* pool = default_memory_pool());
366
+
367
+ } // namespace arrow