applied-ai-018 commited on
Commit
4fc86f0
·
verified ·
1 Parent(s): 3ba9440

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/dill-0.3.8.dist-info/LICENSE +35 -0
  2. llmeval-env/lib/python3.10/site-packages/dill-0.3.8.dist-info/METADATA +280 -0
  3. llmeval-env/lib/python3.10/site-packages/dill-0.3.8.dist-info/RECORD +97 -0
  4. llmeval-env/lib/python3.10/site-packages/dill-0.3.8.dist-info/WHEEL +5 -0
  5. llmeval-env/lib/python3.10/site-packages/dill-0.3.8.dist-info/top_level.txt +1 -0
  6. llmeval-env/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/INSTALLER +1 -0
  7. llmeval-env/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/LICENSE +21 -0
  8. llmeval-env/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/METADATA +370 -0
  9. llmeval-env/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/RECORD +35 -0
  10. llmeval-env/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/WHEEL +5 -0
  11. llmeval-env/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so +0 -0
  12. llmeval-env/lib/python3.10/site-packages/pyarrow/_azurefs.cpython-310-x86_64-linux-gnu.so +0 -0
  13. llmeval-env/lib/python3.10/site-packages/pyarrow/_azurefs.pyx +134 -0
  14. llmeval-env/lib/python3.10/site-packages/pyarrow/_compute.pxd +70 -0
  15. llmeval-env/lib/python3.10/site-packages/pyarrow/_compute.pyx +0 -0
  16. llmeval-env/lib/python3.10/site-packages/pyarrow/_compute_docstrings.py +56 -0
  17. llmeval-env/lib/python3.10/site-packages/pyarrow/_csv.cpython-310-x86_64-linux-gnu.so +0 -0
  18. llmeval-env/lib/python3.10/site-packages/pyarrow/_cuda.pxd +67 -0
  19. llmeval-env/lib/python3.10/site-packages/pyarrow/_cuda.pyx +1058 -0
  20. llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so +0 -0
  21. llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_orc.pyx +51 -0
  22. llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so +0 -0
  23. llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pyx +1023 -0
  24. llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.cpython-310-x86_64-linux-gnu.so +0 -0
  25. llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.pyx +170 -0
  26. llmeval-env/lib/python3.10/site-packages/pyarrow/_dlpack.pxi +46 -0
  27. llmeval-env/lib/python3.10/site-packages/pyarrow/_flight.pyx +0 -0
  28. llmeval-env/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so +0 -0
  29. llmeval-env/lib/python3.10/site-packages/pyarrow/_fs.pyx +1634 -0
  30. llmeval-env/lib/python3.10/site-packages/pyarrow/_gcsfs.cpython-310-x86_64-linux-gnu.so +0 -0
  31. llmeval-env/lib/python3.10/site-packages/pyarrow/_gcsfs.pyx +212 -0
  32. llmeval-env/lib/python3.10/site-packages/pyarrow/_hdfs.cpython-310-x86_64-linux-gnu.so +0 -0
  33. llmeval-env/lib/python3.10/site-packages/pyarrow/_hdfs.pyx +160 -0
  34. llmeval-env/lib/python3.10/site-packages/pyarrow/_json.cpython-310-x86_64-linux-gnu.so +0 -0
  35. llmeval-env/lib/python3.10/site-packages/pyarrow/_json.pxd +36 -0
  36. llmeval-env/lib/python3.10/site-packages/pyarrow/_orc.cpython-310-x86_64-linux-gnu.so +0 -0
  37. llmeval-env/lib/python3.10/site-packages/pyarrow/_orc.pyx +445 -0
  38. llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet.cpython-310-x86_64-linux-gnu.so +0 -0
  39. llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet.pxd +674 -0
  40. llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet_encryption.cpython-310-x86_64-linux-gnu.so +0 -0
  41. llmeval-env/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pyx +62 -0
  42. llmeval-env/lib/python3.10/site-packages/pyarrow/_s3fs.cpython-310-x86_64-linux-gnu.so +0 -0
  43. llmeval-env/lib/python3.10/site-packages/pyarrow/_substrait.cpython-310-x86_64-linux-gnu.so +0 -0
  44. llmeval-env/lib/python3.10/site-packages/pyarrow/_substrait.pyx +349 -0
  45. llmeval-env/lib/python3.10/site-packages/pyarrow/array.pxi +0 -0
  46. llmeval-env/lib/python3.10/site-packages/pyarrow/benchmark.pxi +20 -0
  47. llmeval-env/lib/python3.10/site-packages/pyarrow/benchmark.py +21 -0
  48. llmeval-env/lib/python3.10/site-packages/pyarrow/builder.pxi +148 -0
  49. llmeval-env/lib/python3.10/site-packages/pyarrow/cffi.py +81 -0
  50. llmeval-env/lib/python3.10/site-packages/pyarrow/config.pxi +95 -0
llmeval-env/lib/python3.10/site-packages/dill-0.3.8.dist-info/LICENSE ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2004-2016 California Institute of Technology.
2
+ Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
3
+ All rights reserved.
4
+
5
+ This software is available subject to the conditions and terms laid
6
+ out below. By downloading and using this software you are agreeing
7
+ to the following conditions.
8
+
9
+ Redistribution and use in source and binary forms, with or without
10
+ modification, are permitted provided that the following conditions
11
+ are met:
12
+
13
+ - Redistributions of source code must retain the above copyright
14
+ notice, this list of conditions and the following disclaimer.
15
+
16
+ - Redistributions in binary form must reproduce the above copyright
17
+ notice, this list of conditions and the following disclaimer in the
18
+ documentation and/or other materials provided with the distribution.
19
+
20
+ - Neither the names of the copyright holders nor the names of any of
21
+ the contributors may be used to endorse or promote products derived
22
+ from this software without specific prior written permission.
23
+
24
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26
+ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
31
+ OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
33
+ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
34
+ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35
+
llmeval-env/lib/python3.10/site-packages/dill-0.3.8.dist-info/METADATA ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: dill
3
+ Version: 0.3.8
4
+ Summary: serialize all of Python
5
+ Home-page: https://github.com/uqfoundation/dill
6
+ Author: Mike McKerns
7
+ Author-email: [email protected]
8
+ Maintainer: Mike McKerns
9
+ Maintainer-email: [email protected]
10
+ License: BSD-3-Clause
11
+ Download-URL: https://pypi.org/project/dill/#files
12
+ Project-URL: Documentation, http://dill.rtfd.io
13
+ Project-URL: Source Code, https://github.com/uqfoundation/dill
14
+ Project-URL: Bug Tracker, https://github.com/uqfoundation/dill/issues
15
+ Platform: Linux
16
+ Platform: Windows
17
+ Platform: Mac
18
+ Classifier: Development Status :: 5 - Production/Stable
19
+ Classifier: Intended Audience :: Developers
20
+ Classifier: Intended Audience :: Science/Research
21
+ Classifier: License :: OSI Approved :: BSD License
22
+ Classifier: Programming Language :: Python :: 3
23
+ Classifier: Programming Language :: Python :: 3.8
24
+ Classifier: Programming Language :: Python :: 3.9
25
+ Classifier: Programming Language :: Python :: 3.10
26
+ Classifier: Programming Language :: Python :: 3.11
27
+ Classifier: Programming Language :: Python :: 3.12
28
+ Classifier: Programming Language :: Python :: Implementation :: CPython
29
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
30
+ Classifier: Topic :: Scientific/Engineering
31
+ Classifier: Topic :: Software Development
32
+ Requires-Python: >=3.8
33
+ Provides-Extra: graph
34
+ Requires-Dist: objgraph (>=1.7.2) ; extra == 'graph'
35
+ Provides-Extra: profile
36
+ Requires-Dist: gprof2dot (>=2022.7.29) ; extra == 'profile'
37
+ Provides-Extra: readline
38
+
39
+ -----------------------------
40
+ dill: serialize all of Python
41
+ -----------------------------
42
+
43
+ About Dill
44
+ ==========
45
+
46
+ ``dill`` extends Python's ``pickle`` module for serializing and de-serializing
47
+ Python objects to the majority of the built-in Python types. Serialization
48
+ is the process of converting an object to a byte stream, and the inverse
49
+ of which is converting a byte stream back to a Python object hierarchy.
50
+
51
+ ``dill`` provides the user the same interface as the ``pickle`` module, and
52
+ also includes some additional features. In addition to pickling Python
53
+ objects, ``dill`` provides the ability to save the state of an interpreter
54
+ session in a single command. Hence, it would be feasible to save an
55
+ interpreter session, close the interpreter, ship the pickled file to
56
+ another computer, open a new interpreter, unpickle the session and
57
+ thus continue from the 'saved' state of the original interpreter
58
+ session.
59
+
60
+ ``dill`` can be used to store Python objects to a file, but the primary
61
+ usage is to send Python objects across the network as a byte stream.
62
+ ``dill`` is quite flexible, and allows arbitrary user defined classes
63
+ and functions to be serialized. Thus ``dill`` is not intended to be
64
+ secure against erroneously or maliciously constructed data. It is
65
+ left to the user to decide whether the data they unpickle is from
66
+ a trustworthy source.
67
+
68
+ ``dill`` is part of ``pathos``, a Python framework for heterogeneous computing.
69
+ ``dill`` is in active development, so any user feedback, bug reports, comments,
70
+ or suggestions are highly appreciated. A list of issues is located at
71
+ https://github.com/uqfoundation/dill/issues, with a legacy list maintained at
72
+ https://uqfoundation.github.io/project/pathos/query.
73
+
74
+
75
+ Major Features
76
+ ==============
77
+
78
+ ``dill`` can pickle the following standard types:
79
+
80
+ - none, type, bool, int, float, complex, bytes, str,
81
+ - tuple, list, dict, file, buffer, builtin,
82
+ - Python classes, namedtuples, dataclasses, metaclasses,
83
+ - instances of classes,
84
+ - set, frozenset, array, functions, exceptions
85
+
86
+ ``dill`` can also pickle more 'exotic' standard types:
87
+
88
+ - functions with yields, nested functions, lambdas,
89
+ - cell, method, unboundmethod, module, code, methodwrapper,
90
+ - methoddescriptor, getsetdescriptor, memberdescriptor, wrapperdescriptor,
91
+ - dictproxy, slice, notimplemented, ellipsis, quit
92
+
93
+ ``dill`` cannot yet pickle these standard types:
94
+
95
+ - frame, generator, traceback
96
+
97
+ ``dill`` also provides the capability to:
98
+
99
+ - save and load Python interpreter sessions
100
+ - save and extract the source code from functions and classes
101
+ - interactively diagnose pickling errors
102
+
103
+
104
+ Current Release
105
+ ===============
106
+
107
+ The latest released version of ``dill`` is available from:
108
+
109
+ https://pypi.org/project/dill
110
+
111
+ ``dill`` is distributed under a 3-clause BSD license.
112
+
113
+
114
+ Development Version
115
+ ===================
116
+
117
+ You can get the latest development version with all the shiny new features at:
118
+
119
+ https://github.com/uqfoundation
120
+
121
+ If you have a new contribution, please submit a pull request.
122
+
123
+
124
+ Installation
125
+ ============
126
+
127
+ ``dill`` can be installed with ``pip``::
128
+
129
+ $ pip install dill
130
+
131
+ To optionally include the ``objgraph`` diagnostic tool in the install::
132
+
133
+ $ pip install dill[graph]
134
+
135
+ To optionally include the ``gprof2dot`` diagnostic tool in the install::
136
+
137
+ $ pip install dill[profile]
138
+
139
+ For windows users, to optionally install session history tools::
140
+
141
+ $ pip install dill[readline]
142
+
143
+
144
+ Requirements
145
+ ============
146
+
147
+ ``dill`` requires:
148
+
149
+ - ``python`` (or ``pypy``), **>=3.8**
150
+ - ``setuptools``, **>=42**
151
+
152
+ Optional requirements:
153
+
154
+ - ``objgraph``, **>=1.7.2**
155
+ - ``gprof2dot``, **>=2022.7.29**
156
+ - ``pyreadline``, **>=1.7.1** (on windows)
157
+
158
+
159
+ Basic Usage
160
+ ===========
161
+
162
+ ``dill`` is a drop-in replacement for ``pickle``. Existing code can be
163
+ updated to allow complete pickling using::
164
+
165
+ >>> import dill as pickle
166
+
167
+ or::
168
+
169
+ >>> from dill import dumps, loads
170
+
171
+ ``dumps`` converts the object to a unique byte string, and ``loads`` performs
172
+ the inverse operation::
173
+
174
+ >>> squared = lambda x: x**2
175
+ >>> loads(dumps(squared))(3)
176
+ 9
177
+
178
+ There are a number of options to control serialization which are provided
179
+ as keyword arguments to several ``dill`` functions:
180
+
181
+ * with *protocol*, the pickle protocol level can be set. This uses the
182
+ same value as the ``pickle`` module, *DEFAULT_PROTOCOL*.
183
+ * with *byref=True*, ``dill`` to behave a lot more like pickle with
184
+ certain objects (like modules) pickled by reference as opposed to
185
+ attempting to pickle the object itself.
186
+ * with *recurse=True*, objects referred to in the global dictionary are
187
+ recursively traced and pickled, instead of the default behavior of
188
+ attempting to store the entire global dictionary.
189
+ * with *fmode*, the contents of the file can be pickled along with the file
190
+ handle, which is useful if the object is being sent over the wire to a
191
+ remote system which does not have the original file on disk. Options are
192
+ *HANDLE_FMODE* for just the handle, *CONTENTS_FMODE* for the file content
193
+ and *FILE_FMODE* for content and handle.
194
+ * with *ignore=False*, objects reconstructed with types defined in the
195
+ top-level script environment use the existing type in the environment
196
+ rather than a possibly different reconstructed type.
197
+
198
+ The default serialization can also be set globally in *dill.settings*.
199
+ Thus, we can modify how ``dill`` handles references to the global dictionary
200
+ locally or globally::
201
+
202
+ >>> import dill.settings
203
+ >>> dumps(absolute) == dumps(absolute, recurse=True)
204
+ False
205
+ >>> dill.settings['recurse'] = True
206
+ >>> dumps(absolute) == dumps(absolute, recurse=True)
207
+ True
208
+
209
+ ``dill`` also includes source code inspection, as an alternate to pickling::
210
+
211
+ >>> import dill.source
212
+ >>> print(dill.source.getsource(squared))
213
+ squared = lambda x:x**2
214
+
215
+ To aid in debugging pickling issues, use *dill.detect* which provides
216
+ tools like pickle tracing::
217
+
218
+ >>> import dill.detect
219
+ >>> with dill.detect.trace():
220
+ >>> dumps(squared)
221
+ ┬ F1: <function <lambda> at 0x7fe074f8c280>
222
+ ├┬ F2: <function _create_function at 0x7fe074c49c10>
223
+ │└ # F2 [34 B]
224
+ ├┬ Co: <code object <lambda> at 0x7fe07501eb30, file "<stdin>", line 1>
225
+ │├┬ F2: <function _create_code at 0x7fe074c49ca0>
226
+ ││└ # F2 [19 B]
227
+ │└ # Co [87 B]
228
+ ├┬ D1: <dict object at 0x7fe0750d4680>
229
+ │└ # D1 [22 B]
230
+ ├┬ D2: <dict object at 0x7fe074c5a1c0>
231
+ │└ # D2 [2 B]
232
+ ├┬ D2: <dict object at 0x7fe074f903c0>
233
+ │├┬ D2: <dict object at 0x7fe074f8ebc0>
234
+ ││└ # D2 [2 B]
235
+ │└ # D2 [23 B]
236
+ └ # F1 [180 B]
237
+
238
+ With trace, we see how ``dill`` stored the lambda (``F1``) by first storing
239
+ ``_create_function``, the underlying code object (``Co``) and ``_create_code``
240
+ (which is used to handle code objects), then we handle the reference to
241
+ the global dict (``D2``) plus other dictionaries (``D1`` and ``D2``) that
242
+ save the lambda object's state. A ``#`` marks when the object is actually stored.
243
+
244
+
245
+ More Information
246
+ ================
247
+
248
+ Probably the best way to get started is to look at the documentation at
249
+ http://dill.rtfd.io. Also see ``dill.tests`` for a set of scripts that
250
+ demonstrate how ``dill`` can serialize different Python objects. You can
251
+ run the test suite with ``python -m dill.tests``. The contents of any
252
+ pickle file can be examined with ``undill``. As ``dill`` conforms to
253
+ the ``pickle`` interface, the examples and documentation found at
254
+ http://docs.python.org/library/pickle.html also apply to ``dill``
255
+ if one will ``import dill as pickle``. The source code is also generally
256
+ well documented, so further questions may be resolved by inspecting the
257
+ code itself. Please feel free to submit a ticket on github, or ask a
258
+ question on stackoverflow (**@Mike McKerns**).
259
+ If you would like to share how you use ``dill`` in your work, please send
260
+ an email (to **mmckerns at uqfoundation dot org**).
261
+
262
+
263
+ Citation
264
+ ========
265
+
266
+ If you use ``dill`` to do research that leads to publication, we ask that you
267
+ acknowledge use of ``dill`` by citing the following in your publication::
268
+
269
+ M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis,
270
+ "Building a framework for predictive science", Proceedings of
271
+ the 10th Python in Science Conference, 2011;
272
+ http://arxiv.org/pdf/1202.1056
273
+
274
+ Michael McKerns and Michael Aivazis,
275
+ "pathos: a framework for heterogeneous computing", 2010- ;
276
+ https://uqfoundation.github.io/project/pathos
277
+
278
+ Please see https://uqfoundation.github.io/project/pathos or
279
+ http://arxiv.org/pdf/1202.1056 for further information.
280
+
llmeval-env/lib/python3.10/site-packages/dill-0.3.8.dist-info/RECORD ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ../../../bin/get_gprof,sha256=5UXwSf1BcfNtv4U5oGL8yBcORUJXKeOKS_CAK2mS76Y,2447
2
+ ../../../bin/get_objgraph,sha256=i9nSmF-NxOfqVVATQhW8k0UWRPiPbqvGX0gh9rOal4A,1641
3
+ ../../../bin/undill,sha256=4LwLIDxWu23zePFX3C_90CVZcMGl9hJuH0jLnmUq3Ks,577
4
+ dill-0.3.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
5
+ dill-0.3.8.dist-info/LICENSE,sha256=UeiKI-eId86r1yfCGcel4z9l2pugOsT9KFupBKoc4is,1790
6
+ dill-0.3.8.dist-info/METADATA,sha256=UxkSs2cU8JyrJsV5kS0QR9crJ07hrUJS2RiIMQaC4ss,10106
7
+ dill-0.3.8.dist-info/RECORD,,
8
+ dill-0.3.8.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
9
+ dill-0.3.8.dist-info/top_level.txt,sha256=HLSIyYIjQzJiBvs3_-16ntezE3j6mWGTW0DT1xDd7X0,5
10
+ dill/__diff.py,sha256=kirMxzB7E8lfjo21M5oIf7if95ny0aWhYB790KMpN08,7143
11
+ dill/__info__.py,sha256=Kmel_yLTyH-hwNC5cVfzN-LV08AbS_AvSa2uwMeIQdk,10756
12
+ dill/__init__.py,sha256=j-Jxl3H6bxatS0h2f8ywWs7DChwk7B9ozuZQBVcjYGU,3798
13
+ dill/__pycache__/__diff.cpython-310.pyc,,
14
+ dill/__pycache__/__info__.cpython-310.pyc,,
15
+ dill/__pycache__/__init__.cpython-310.pyc,,
16
+ dill/__pycache__/_dill.cpython-310.pyc,,
17
+ dill/__pycache__/_objects.cpython-310.pyc,,
18
+ dill/__pycache__/_shims.cpython-310.pyc,,
19
+ dill/__pycache__/detect.cpython-310.pyc,,
20
+ dill/__pycache__/logger.cpython-310.pyc,,
21
+ dill/__pycache__/objtypes.cpython-310.pyc,,
22
+ dill/__pycache__/pointers.cpython-310.pyc,,
23
+ dill/__pycache__/session.cpython-310.pyc,,
24
+ dill/__pycache__/settings.cpython-310.pyc,,
25
+ dill/__pycache__/source.cpython-310.pyc,,
26
+ dill/__pycache__/temp.cpython-310.pyc,,
27
+ dill/_dill.py,sha256=3Eo6gKj1sODJjgPgYNT8TU-YL6QNQ7rIeWPUVnRzyqQ,88548
28
+ dill/_objects.py,sha256=dPlUXzQIh8CA0fMy9NMbwwLGUPmXe5H8MdQtRWB1b_M,19605
29
+ dill/_shims.py,sha256=IuzQcyPET5VWmWMoSGStieoedvNXlb5suDpa4bykTbQ,6635
30
+ dill/detect.py,sha256=Mb-PfCxn1mg0l3TmHXyPNVEc4n3fuxc_nue6eL3-q_o,11114
31
+ dill/logger.py,sha256=YS5ZloAOKjJRZaOBRCaMUDWmWVQZcicvbXVSrz8L8XU,11134
32
+ dill/objtypes.py,sha256=BamGH3BEM6lLlxisuvXcGjsCRLNeoLs4_rFZrM5r2yM,736
33
+ dill/pointers.py,sha256=vnQzjwGtKMGnmbdYRXRWNLMyceNPSw4f7UpvwCXLYbE,4467
34
+ dill/session.py,sha256=NvCWpoP9r_rGBL2pOwwxOri8mFly5KlIWG3GwkBFnc0,23525
35
+ dill/settings.py,sha256=7I3yvSpPKstOqpoW2gv3X77kXK-hZlqCnF7nJUGhxTY,630
36
+ dill/source.py,sha256=DWfIxcBjpjbbKYz2DstV9kRdjajBdZLOcLXfsZsPo9U,45121
37
+ dill/temp.py,sha256=KJUry4t0UjQCh5t4LXcxNyMF_uOGHwcjTuNYTJD9qdA,8027
38
+ dill/tests/__init__.py,sha256=Gx-chVB-l-e7ncsGp2zF4BimTjbUyO7BY7RkrO835vY,479
39
+ dill/tests/__main__.py,sha256=fHhioQwcOvTPlf1RM_wVQ0Y3ndETWJOuXJQ2rVtqliA,899
40
+ dill/tests/__pycache__/__init__.cpython-310.pyc,,
41
+ dill/tests/__pycache__/__main__.cpython-310.pyc,,
42
+ dill/tests/__pycache__/test_abc.cpython-310.pyc,,
43
+ dill/tests/__pycache__/test_check.cpython-310.pyc,,
44
+ dill/tests/__pycache__/test_classdef.cpython-310.pyc,,
45
+ dill/tests/__pycache__/test_dataclasses.cpython-310.pyc,,
46
+ dill/tests/__pycache__/test_detect.cpython-310.pyc,,
47
+ dill/tests/__pycache__/test_dictviews.cpython-310.pyc,,
48
+ dill/tests/__pycache__/test_diff.cpython-310.pyc,,
49
+ dill/tests/__pycache__/test_extendpickle.cpython-310.pyc,,
50
+ dill/tests/__pycache__/test_fglobals.cpython-310.pyc,,
51
+ dill/tests/__pycache__/test_file.cpython-310.pyc,,
52
+ dill/tests/__pycache__/test_functions.cpython-310.pyc,,
53
+ dill/tests/__pycache__/test_functors.cpython-310.pyc,,
54
+ dill/tests/__pycache__/test_logger.cpython-310.pyc,,
55
+ dill/tests/__pycache__/test_mixins.cpython-310.pyc,,
56
+ dill/tests/__pycache__/test_module.cpython-310.pyc,,
57
+ dill/tests/__pycache__/test_moduledict.cpython-310.pyc,,
58
+ dill/tests/__pycache__/test_nested.cpython-310.pyc,,
59
+ dill/tests/__pycache__/test_objects.cpython-310.pyc,,
60
+ dill/tests/__pycache__/test_properties.cpython-310.pyc,,
61
+ dill/tests/__pycache__/test_pycapsule.cpython-310.pyc,,
62
+ dill/tests/__pycache__/test_recursive.cpython-310.pyc,,
63
+ dill/tests/__pycache__/test_registered.cpython-310.pyc,,
64
+ dill/tests/__pycache__/test_restricted.cpython-310.pyc,,
65
+ dill/tests/__pycache__/test_selected.cpython-310.pyc,,
66
+ dill/tests/__pycache__/test_session.cpython-310.pyc,,
67
+ dill/tests/__pycache__/test_source.cpython-310.pyc,,
68
+ dill/tests/__pycache__/test_temp.cpython-310.pyc,,
69
+ dill/tests/__pycache__/test_weakref.cpython-310.pyc,,
70
+ dill/tests/test_abc.py,sha256=BSjSKKCQ5_iPfFxAd0yBq4KSAJxelrlC3IzoAhjd1C4,4227
71
+ dill/tests/test_check.py,sha256=4F5gkX6zxY7C5sD2_0Tkqf3T3jmQl0K15FOxYUTZQl0,1396
72
+ dill/tests/test_classdef.py,sha256=fI3fVk4SlsjNMMs5RfU6DUCaxpP7YYRjvLZ2nhXMHuc,8600
73
+ dill/tests/test_dataclasses.py,sha256=yKjFuG24ymLtjk-sZZdhvNY7aDqerTDpMcfi_eV4ft0,890
74
+ dill/tests/test_detect.py,sha256=sE9THufHXCDysBPQ4QkN5DHn6DaIldVRAEciseIRH08,4083
75
+ dill/tests/test_dictviews.py,sha256=Jhol0cQWPwoQrp7OPxGhU8FNRX2GgfFp9fTahCvQEPA,1337
76
+ dill/tests/test_diff.py,sha256=5VIWf2fpV6auLHNfzkHLTrgx6AJBlE2xe5Wanfmq8TM,2667
77
+ dill/tests/test_extendpickle.py,sha256=gONrMBHO94Edhnqm1wo49hgzwmaxHs7L-86Hs-7albY,1315
78
+ dill/tests/test_fglobals.py,sha256=DCvdojmKcLN_X9vX4Qe1FbsqjeoJK-wsY2uJwBfNFro,1676
79
+ dill/tests/test_file.py,sha256=jUU2h8qaDOIe1mn_Ng7wqCZcd7Ucx3TAaI-K_90_Tbk,13578
80
+ dill/tests/test_functions.py,sha256=-mqTpUbzRu8GynjBGD25dRDm8qInIe07sRZmCcA_iXY,4267
81
+ dill/tests/test_functors.py,sha256=7rx9wLmrgFwF0gUm_-SGOISPYSok0XjmrQ-jFMRt6gs,930
82
+ dill/tests/test_logger.py,sha256=D9zGRaA-CEadG13orPS_D4gPVZlkqXf9Zu8wn2oMiYc,2385
83
+ dill/tests/test_mixins.py,sha256=YtB24BjodooLj85ijFbAxiM7LlFQZAUL8RQVx9vIAwY,4007
84
+ dill/tests/test_module.py,sha256=KLl_gZJJqDY7S_bD5wCqKL8JQCS0MDMoipVQSDfASlo,1943
85
+ dill/tests/test_moduledict.py,sha256=faXG6-5AcmCfP3xe2FYGOUdSosU-9TWnKU_ZVqPDaxY,1182
86
+ dill/tests/test_nested.py,sha256=ViWiOrChLZktS0z6qyKqMxDdTuy9kAX4qMgH_OreMcc,3146
87
+ dill/tests/test_objects.py,sha256=pPAth0toC_UWztuKHC7NZlsRBb0g_gSAt70UbUtXEXo,1931
88
+ dill/tests/test_properties.py,sha256=h35c-lYir1JG6oLPtrA0eYE0xoSohIimsA3yIfRw6yA,1346
89
+ dill/tests/test_pycapsule.py,sha256=EXFyB6g1Wx9O9LM6StIeUKhrhln4_hou1xrtGwkt4Cw,1417
90
+ dill/tests/test_recursive.py,sha256=bfr-BsK1Xu0PU7l2srHsDXdY2l1LeM3L3w7NraXO0cc,4182
91
+ dill/tests/test_registered.py,sha256=J3oku053VfdJgYh4Z5_kyFRf-C52JglIzjcyxEaYOhk,1573
92
+ dill/tests/test_restricted.py,sha256=xLMIae8sYJksAj9hKKyHFHIL8vtbGpFeOULz59snYM4,783
93
+ dill/tests/test_selected.py,sha256=Hp-AAd6Qp5FJZ-vY_Bbejo5Rg6xFstec5QkSg5D7Aac,3218
94
+ dill/tests/test_session.py,sha256=KoSPvs4c4VJ8mFMF7EUlD_3GwcOhhipt9fqHr--Go-4,10161
95
+ dill/tests/test_source.py,sha256=wZTYBbpzUwj3Mz5OjrHQKfskaVVwuy2UQDg5p2wLbT4,6036
96
+ dill/tests/test_temp.py,sha256=F_7nJkSetLIBSAYMw1-hYh03iVrEYwGs-4GIUzoBOfY,2619
97
+ dill/tests/test_weakref.py,sha256=mrjZP5aPtUP1wBD6ibPsDsfI9ffmq_Ykt7ltoodi5Lg,1602
llmeval-env/lib/python3.10/site-packages/dill-0.3.8.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
llmeval-env/lib/python3.10/site-packages/dill-0.3.8.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ dill
llmeval-env/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2016 Tsuyoshi Hombashi
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
llmeval-env/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/METADATA ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: pathvalidate
3
+ Version: 3.2.0
4
+ Summary: pathvalidate is a Python library to sanitize/validate a string such as filenames/file-paths/etc.
5
+ Home-page: https://github.com/thombashi/pathvalidate
6
+ Author: Tsuyoshi Hombashi
7
+ Author-email: [email protected]
8
+ License: MIT License
9
+ Project-URL: Documentation, https://pathvalidate.rtfd.io/
10
+ Project-URL: Source, https://github.com/thombashi/pathvalidate
11
+ Project-URL: Tracker, https://github.com/thombashi/pathvalidate/issues
12
+ Project-URL: Changlog, https://github.com/thombashi/pathvalidate/releases
13
+ Keywords: file,path,validation,validator,sanitization,sanitizer
14
+ Classifier: Development Status :: 5 - Production/Stable
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: Intended Audience :: Information Technology
17
+ Classifier: License :: OSI Approved :: MIT License
18
+ Classifier: Operating System :: OS Independent
19
+ Classifier: Programming Language :: Python :: 3
20
+ Classifier: Programming Language :: Python :: 3.7
21
+ Classifier: Programming Language :: Python :: 3.8
22
+ Classifier: Programming Language :: Python :: 3.9
23
+ Classifier: Programming Language :: Python :: 3.10
24
+ Classifier: Programming Language :: Python :: 3.11
25
+ Classifier: Programming Language :: Python :: 3 :: Only
26
+ Classifier: Programming Language :: Python :: Implementation :: CPython
27
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
28
+ Classifier: Topic :: Software Development :: Libraries
29
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
+ Classifier: Topic :: System :: Filesystems
31
+ Classifier: Topic :: Text Processing
32
+ Requires-Python: >=3.7
33
+ Description-Content-Type: text/x-rst
34
+ License-File: LICENSE
35
+ Provides-Extra: docs
36
+ Requires-Dist: sphinx-rtd-theme >=1.2.2 ; extra == 'docs'
37
+ Requires-Dist: Sphinx >=2.4 ; extra == 'docs'
38
+ Requires-Dist: urllib3 <2 ; extra == 'docs'
39
+ Provides-Extra: test
40
+ Requires-Dist: allpairspy >=2 ; extra == 'test'
41
+ Requires-Dist: click >=6.2 ; extra == 'test'
42
+ Requires-Dist: Faker >=1.0.8 ; extra == 'test'
43
+ Requires-Dist: pytest >=6.0.1 ; extra == 'test'
44
+ Requires-Dist: pytest-md-report >=0.4.1 ; extra == 'test'
45
+ Requires-Dist: pytest-discord >=0.1.4 ; (python_version >= "3.7") and extra == 'test'
46
+
47
+ .. contents:: **pathvalidate**
48
+ :backlinks: top
49
+ :depth: 2
50
+
51
+ Summary
52
+ =========
53
+ `pathvalidate <https://github.com/thombashi/pathvalidate>`__ is a Python library to sanitize/validate a string such as filenames/file-paths/etc.
54
+
55
+ .. image:: https://badge.fury.io/py/pathvalidate.svg
56
+ :target: https://badge.fury.io/py/pathvalidate
57
+ :alt: PyPI package version
58
+
59
+ .. image:: https://anaconda.org/thombashi/pathvalidate/badges/version.svg
60
+ :target: https://anaconda.org/thombashi/pathvalidate
61
+ :alt: conda package version
62
+
63
+ .. image:: https://img.shields.io/pypi/pyversions/pathvalidate.svg
64
+ :target: https://pypi.org/project/pathvalidate
65
+ :alt: Supported Python versions
66
+
67
+ .. image:: https://img.shields.io/pypi/implementation/pathvalidate.svg
68
+ :target: https://pypi.org/project/pathvalidate
69
+ :alt: Supported Python implementations
70
+
71
+ .. image:: https://github.com/thombashi/pathvalidate/workflows/Tests/badge.svg
72
+ :target: https://github.com/thombashi/pathvalidate/actions?query=workflow%3ATests
73
+ :alt: Linux/macOS/Windows CI status
74
+
75
+ .. image:: https://coveralls.io/repos/github/thombashi/pathvalidate/badge.svg?branch=master
76
+ :target: https://coveralls.io/github/thombashi/pathvalidate?branch=master
77
+ :alt: Test coverage: coveralls
78
+
79
+ .. image:: https://github.com/thombashi/pathvalidate/actions/workflows/github-code-scanning/codeql/badge.svg
80
+ :target: https://github.com/thombashi/pathvalidate/actions/workflows/github-code-scanning/codeql
81
+ :alt: CodeQL
82
+
83
+ Features
84
+ ---------
85
+ - Sanitize/Validate a string as a:
86
+ - file name
87
+ - file path
88
+ - Sanitize will do:
89
+ - Remove invalid characters for a target platform
90
+ - Replace reserved names for a target platform
91
+ - Normalize
92
+ - Remove unprintable characters
93
+ - Argument validator/sanitizer for ``argparse`` and ``click``
94
+ - Multi platform support:
95
+ - ``Linux``
96
+ - ``Windows``
97
+ - ``macOS``
98
+ - ``POSIX``
99
+ - ``universal`` (platform independent)
100
+ - Multibyte character support
101
+
102
+ Examples
103
+ ==========
104
+ Sanitize a filename
105
+ ---------------------
106
+ :Sample Code:
107
+ .. code-block:: python
108
+
109
+ from pathvalidate import sanitize_filename
110
+
111
+ fname = "fi:l*e/p\"a?t>h|.t<xt"
112
+ print(f"{fname} -> {sanitize_filename(fname)}\n")
113
+
114
+ fname = "\0_a*b:c<d>e%f/(g)h+i_0.txt"
115
+ print(f"{fname} -> {sanitize_filename(fname)}\n")
116
+
117
+ :Output:
118
+ .. code-block::
119
+
120
+ fi:l*e/p"a?t>h|.t<xt -> filepath.txt
121
+
122
+ _a*b:c<d>e%f/(g)h+i_0.txt -> _abcde%f(g)h+i_0.txt
123
+
124
+ The default target ``platform`` is ``universal``.
125
+ i.e. the sanitized file name is valid for any platform.
126
+
127
+ Sanitize a filepath
128
+ ---------------------
129
+ :Sample Code:
130
+ .. code-block:: python
131
+
132
+ from pathvalidate import sanitize_filepath
133
+
134
+ fpath = "fi:l*e/p\"a?t>h|.t<xt"
135
+ print(f"{fpath} -> {sanitize_filepath(fpath)}\n")
136
+
137
+ fpath = "\0_a*b:c<d>e%f/(g)h+i_0.txt"
138
+ print(f"{fpath} -> {sanitize_filepath(fpath)}\n")
139
+
140
+ :Output:
141
+ .. code-block::
142
+
143
+ fi:l*e/p"a?t>h|.t<xt -> file/path.txt
144
+
145
+ _a*b:c<d>e%f/(g)h+i_0.txt -> _abcde%f/(g)h+i_0.txt
146
+
147
+ Validate a filename
148
+ ---------------------
149
+ :Sample Code:
150
+ .. code-block:: python
151
+
152
+ import sys
153
+ from pathvalidate import ValidationError, validate_filename
154
+
155
+ try:
156
+ validate_filename("fi:l*e/p\"a?t>h|.t<xt")
157
+ except ValidationError as e:
158
+ print(f"{e}\n", file=sys.stderr)
159
+
160
+ try:
161
+ validate_filename("COM1")
162
+ except ValidationError as e:
163
+ print(f"{e}\n", file=sys.stderr)
164
+
165
+ :Output:
166
+ .. code-block::
167
+
168
+ [PV1100] invalid characters found: platform=universal, description=invalids=('/'), value='fi:l*e/p"a?t>h|.t<xt'
169
+
170
+ [PV1002] found a reserved name by a platform: 'COM1' is a reserved name, platform=universal, reusable_name=False
171
+
172
+ Check a filename
173
+ ------------------
174
+ :Sample Code:
175
+ .. code-block:: python
176
+
177
+ from pathvalidate import is_valid_filename, sanitize_filename
178
+
179
+ fname = "fi:l*e/p\"a?t>h|.t<xt"
180
+ print(f"is_valid_filename('{fname}') return {is_valid_filename(fname)}\n")
181
+
182
+ sanitized_fname = sanitize_filename(fname)
183
+ print(f"is_valid_filename('{sanitized_fname}') return {is_valid_filename(sanitized_fname)}\n")
184
+
185
+ :Output:
186
+ .. code-block::
187
+
188
+ is_valid_filename('fi:l*e/p"a?t>h|.t<xt') return False
189
+
190
+ is_valid_filename('filepath.txt') return True
191
+
192
+ filename/filepath validator for ``argparse``
193
+ ----------------------------------------------
194
+ :Sample Code:
195
+ .. code-block:: python
196
+
197
+ from argparse import ArgumentParser
198
+
199
+ from pathvalidate.argparse import validate_filename_arg, validate_filepath_arg
200
+
201
+ parser = ArgumentParser()
202
+ parser.add_argument("--filename", type=validate_filename_arg)
203
+ parser.add_argument("--filepath", type=validate_filepath_arg)
204
+ options = parser.parse_args()
205
+
206
+ if options.filename:
207
+ print(f"filename: {options.filename}")
208
+
209
+ if options.filepath:
210
+ print(f"filepath: {options.filepath}")
211
+
212
+ :Output:
213
+ .. code-block::
214
+
215
+ $ ./examples/argparse_validate.py --filename eg
216
+ filename: eg
217
+ $ ./examples/argparse_validate.py --filename e?g
218
+ usage: argparse_validate.py [-h] [--filename FILENAME] [--filepath FILEPATH]
219
+ argparse_validate.py: error: argument --filename: [PV1100] invalid characters found: invalids=(':'), value='e:g', platform=Windows
220
+
221
+ .. note::
222
+ ``validate_filepath_arg`` consider ``platform`` as of ``"auto"`` if the input is an absolute file path.
223
+
224
+ filename/filepath sanitizer for ``argparse``
225
+ ----------------------------------------------
226
+ :Sample Code:
227
+ .. code-block:: python
228
+
229
+ from argparse import ArgumentParser
230
+
231
+ from pathvalidate.argparse import sanitize_filename_arg, sanitize_filepath_arg
232
+
233
+
234
+ parser = ArgumentParser()
235
+ parser.add_argument("--filename", type=sanitize_filename_arg)
236
+ parser.add_argument("--filepath", type=sanitize_filepath_arg)
237
+ options = parser.parse_args()
238
+
239
+ if options.filename:
240
+ print("filename: {}".format(options.filename))
241
+
242
+ if options.filepath:
243
+ print("filepath: {}".format(options.filepath))
244
+
245
+ :Output:
246
+ .. code-block::
247
+
248
+ $ ./examples/argparse_sanitize.py --filename e/g
249
+ filename: eg
250
+
251
+ .. note::
252
+ ``sanitize_filepath_arg`` is set platform as ``"auto"``.
253
+
254
+ filename/filepath validator for ``click``
255
+ -------------------------------------------
256
+ :Sample Code:
257
+ .. code-block:: python
258
+
259
+ import click
260
+
261
+ from pathvalidate.click import validate_filename_arg, validate_filepath_arg
262
+
263
+
264
+ @click.command()
265
+ @click.option("--filename", callback=validate_filename_arg)
266
+ @click.option("--filepath", callback=validate_filepath_arg)
267
+ def cli(filename: str, filepath: str) -> None:
268
+ if filename:
269
+ click.echo(f"filename: {filename}")
270
+ if filepath:
271
+ click.echo(f"filepath: {filepath}")
272
+
273
+
274
+ if __name__ == "__main__":
275
+ cli()
276
+
277
+ :Output:
278
+ .. code-block::
279
+
280
+ $ ./examples/click_validate.py --filename ab
281
+ filename: ab
282
+ $ ./examples/click_validate.py --filepath e?g
283
+ Usage: click_validate.py [OPTIONS]
284
+ Try 'click_validate.py --help' for help.
285
+
286
+ Error: Invalid value for '--filename': [PV1100] invalid characters found: invalids=('?'), value='e?g', platform=Windows
287
+
288
+ filename/filepath sanitizer for ``click``
289
+ -------------------------------------------
290
+ :Sample Code:
291
+ .. code-block:: python
292
+
293
+ import click
294
+
295
+ from pathvalidate.click import sanitize_filename_arg, sanitize_filepath_arg
296
+
297
+
298
+ @click.command()
299
+ @click.option("--filename", callback=sanitize_filename_arg)
300
+ @click.option("--filepath", callback=sanitize_filepath_arg)
301
+ def cli(filename, filepath):
302
+ if filename:
303
+ click.echo(f"filename: {filename}")
304
+ if filepath:
305
+ click.echo(f"filepath: {filepath}")
306
+
307
+
308
+ if __name__ == "__main__":
309
+ cli()
310
+
311
+ :Output:
312
+ .. code-block::
313
+
314
+ $ ./examples/click_sanitize.py --filename a/b
315
+ filename: ab
316
+
317
+ For more information
318
+ ----------------------
319
+ More examples can be found at
320
+ https://pathvalidate.rtfd.io/en/latest/pages/examples/index.html
321
+
322
+ Installation
323
+ ============
324
+ Installation: pip
325
+ ------------------------------
326
+ ::
327
+
328
+ pip install pathvalidate
329
+
330
+ Installation: conda
331
+ ------------------------------
332
+ ::
333
+
334
+ conda install -c thombashi pathvalidate
335
+
336
+ Installation: apt
337
+ ------------------------------
338
+ ::
339
+
340
+ sudo add-apt-repository ppa:thombashi/ppa
341
+ sudo apt update
342
+ sudo apt install python3-pathvalidate
343
+
344
+
345
+ Dependencies
346
+ ============
347
+ Python 3.7+
348
+ no external dependencies.
349
+
350
+ Documentation
351
+ ===============
352
+ https://pathvalidate.rtfd.io/
353
+
354
+ Sponsors
355
+ ====================================
356
+ .. image:: https://avatars.githubusercontent.com/u/44389260?s=48&u=6da7176e51ae2654bcfd22564772ef8a3bb22318&v=4
357
+ :target: https://github.com/chasbecker
358
+ :alt: Charles Becker (chasbecker)
359
+ .. image:: https://avatars.githubusercontent.com/u/9919?s=48&v=4
360
+ :target: https://github.com/github
361
+ :alt: onetime: GitHub (github)
362
+ .. image:: https://avatars.githubusercontent.com/u/46711571?s=48&u=57687c0e02d5d6e8eeaf9177f7b7af4c9f275eb5&v=4
363
+ :target: https://github.com/Arturi0
364
+ :alt: onetime: Arturi0
365
+ .. image:: https://avatars.githubusercontent.com/u/3658062?s=48&v=4
366
+ :target: https://github.com/b4tman
367
+ :alt: onetime: Dmitry Belyaev (b4tman)
368
+
369
+ `Become a sponsor <https://github.com/sponsors/thombashi>`__
370
+
llmeval-env/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/RECORD ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pathvalidate-3.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ pathvalidate-3.2.0.dist-info/LICENSE,sha256=qT11vLB3TimQEGOAytrW3LLeGTxV1DX_xWujRaCLHcI,1084
3
+ pathvalidate-3.2.0.dist-info/METADATA,sha256=Kc0RTAOHjVPeTIb-Fv8g162B0RcyDzI_Jj2nD9J8Gdk,11747
4
+ pathvalidate-3.2.0.dist-info/RECORD,,
5
+ pathvalidate-3.2.0.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
6
+ pathvalidate-3.2.0.dist-info/top_level.txt,sha256=AtoiECsrk-xZknk3ruLi-UweWuXhbKeEGDWFwMcK_ks,13
7
+ pathvalidate/__init__.py,sha256=R8x0yEBF3dfwpTlGe1TJZ9XgOmO-tKGoEvpZgNA83Ys,1926
8
+ pathvalidate/__pycache__/__init__.cpython-310.pyc,,
9
+ pathvalidate/__pycache__/__version__.cpython-310.pyc,,
10
+ pathvalidate/__pycache__/_base.cpython-310.pyc,,
11
+ pathvalidate/__pycache__/_common.cpython-310.pyc,,
12
+ pathvalidate/__pycache__/_const.cpython-310.pyc,,
13
+ pathvalidate/__pycache__/_filename.cpython-310.pyc,,
14
+ pathvalidate/__pycache__/_filepath.cpython-310.pyc,,
15
+ pathvalidate/__pycache__/_ltsv.cpython-310.pyc,,
16
+ pathvalidate/__pycache__/_symbol.cpython-310.pyc,,
17
+ pathvalidate/__pycache__/_types.cpython-310.pyc,,
18
+ pathvalidate/__pycache__/argparse.cpython-310.pyc,,
19
+ pathvalidate/__pycache__/click.cpython-310.pyc,,
20
+ pathvalidate/__pycache__/error.cpython-310.pyc,,
21
+ pathvalidate/__pycache__/handler.cpython-310.pyc,,
22
+ pathvalidate/__version__.py,sha256=R8MJHDvfFVYjKEFUDzFulsQ9h1EhLDaHtPVwKRedF-E,201
23
+ pathvalidate/_base.py,sha256=NsynjO1IqYaG6rTbGkMx77OIfcUGSv51jLvMvIyyA1A,7443
24
+ pathvalidate/_common.py,sha256=4JLadI56z-1xST0kfgjtiGMWCkmdlcfdrnZn5wIg_9k,3363
25
+ pathvalidate/_const.py,sha256=UzAu38QxKjZDJEcJ-M99sQDnSpALIK7jJoZizFptiBw,686
26
+ pathvalidate/_filename.py,sha256=YEhwJKEq73kLkqInYjbiagGO22q0iswiISzignbWZXE,17356
27
+ pathvalidate/_filepath.py,sha256=z-QgwCNhy8KY6M8hK8JGeUh3YO-P4_7qAE1p9_LFSXc,18915
28
+ pathvalidate/_ltsv.py,sha256=BuCgH-iLdptUbaghoLCXwk7DQFGBBFjuNGeDv2I0IsM,1203
29
+ pathvalidate/_symbol.py,sha256=8kcG9D7IWCdfw3x18I8qSmA09vpHfQB2suVtMloGu28,2326
30
+ pathvalidate/_types.py,sha256=3CRkyBkMvcPcFPigO-Kr18Z6RgGEgUdLK1cXBg8UjWc,180
31
+ pathvalidate/argparse.py,sha256=z_z7inal8sw2wPwFjsMEMQ2zR3kACdK1qsItocXFf3Y,970
32
+ pathvalidate/click.py,sha256=IvaOB4R7ivR3GNPGaROAzOGBcROWIIsZKADJ08hxab4,1077
33
+ pathvalidate/error.py,sha256=t6ePXdcW3ALnv0c_iEDtjLA8hS7USopJamttH5bmnmQ,7531
34
+ pathvalidate/handler.py,sha256=RDOka3TjLz91yqQdLirQmjhFyEt5PVepk6kmGAAes8o,3268
35
+ pathvalidate/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
llmeval-env/lib/python3.10/site-packages/pathvalidate-3.2.0.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.41.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
llmeval-env/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (321 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_azurefs.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (105 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_azurefs.pyx ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from cython cimport binding
21
+
22
+
23
+ from pyarrow.lib import frombytes, tobytes
24
+ from pyarrow.includes.libarrow_fs cimport *
25
+ from pyarrow._fs cimport FileSystem
26
+
27
+
28
+ cdef class AzureFileSystem(FileSystem):
29
+ """
30
+ Azure Blob Storage backed FileSystem implementation
31
+
32
+ This implementation supports flat namespace and hierarchical namespace (HNS) a.k.a.
33
+ Data Lake Gen2 storage accounts. HNS will be automatically detected and HNS specific
34
+ features will be used when they provide a performance advantage. Azurite emulator is
35
+ also supported. Note: `/` is the only supported delimiter.
36
+
37
+ The storage account is considered the root of the filesystem. When enabled, containers
38
+ will be created or deleted during relevant directory operations. Obviously, this also
39
+ requires authentication with the additional permissions.
40
+
41
+ By default `DefaultAzureCredential <https://github.com/Azure/azure-sdk-for-cpp/blob/main/sdk/identity/azure-identity/README.md#defaultazurecredential>`__
42
+ is used for authentication. This means it will try several types of authentication
43
+ and go with the first one that works. If any authentication parameters are provided when
44
+ initialising the FileSystem, they will be used instead of the default credential.
45
+
46
+ Parameters
47
+ ----------
48
+ account_name : str
49
+ Azure Blob Storage account name. This is the globally unique identifier for the
50
+ storage account.
51
+ account_key : str, default None
52
+ Account key of the storage account. Pass None to use default credential.
53
+ blob_storage_authority : str, default None
54
+ hostname[:port] of the Blob Service. Defaults to `.blob.core.windows.net`. Useful
55
+ for connecting to a local emulator, like Azurite.
56
+ dfs_storage_authority : str, default None
57
+ hostname[:port] of the Data Lake Gen 2 Service. Defaults to
58
+ `.dfs.core.windows.net`. Useful for connecting to a local emulator, like Azurite.
59
+ blob_storage_scheme : str, default None
60
+ Either `http` or `https`. Defaults to `https`. Useful for connecting to a local
61
+ emulator, like Azurite.
62
+ dfs_storage_scheme : str, default None
63
+ Either `http` or `https`. Defaults to `https`. Useful for connecting to a local
64
+ emulator, like Azurite.
65
+
66
+ Examples
67
+ --------
68
+ >>> from pyarrow import fs
69
+ >>> azure_fs = fs.AzureFileSystem(account_name='myaccount')
70
+ >>> azurite_fs = fs.AzureFileSystem(
71
+ ... account_name='devstoreaccount1',
72
+ ... account_key='Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==',
73
+ ... blob_storage_authority='127.0.0.1:10000',
74
+ ... dfs_storage_authority='127.0.0.1:10000',
75
+ ... blob_storage_scheme='http',
76
+ ... dfs_storage_scheme='http',
77
+ ... )
78
+
79
+ For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`.
80
+ """
81
+ cdef:
82
+ CAzureFileSystem* azurefs
83
+ c_string account_key
84
+
85
+ def __init__(self, account_name, *, account_key=None, blob_storage_authority=None,
86
+ dfs_storage_authority=None, blob_storage_scheme=None,
87
+ dfs_storage_scheme=None):
88
+ cdef:
89
+ CAzureOptions options
90
+ shared_ptr[CAzureFileSystem] wrapped
91
+
92
+ options.account_name = tobytes(account_name)
93
+ if blob_storage_authority:
94
+ options.blob_storage_authority = tobytes(blob_storage_authority)
95
+ if dfs_storage_authority:
96
+ options.dfs_storage_authority = tobytes(dfs_storage_authority)
97
+ if blob_storage_scheme:
98
+ options.blob_storage_scheme = tobytes(blob_storage_scheme)
99
+ if dfs_storage_scheme:
100
+ options.dfs_storage_scheme = tobytes(dfs_storage_scheme)
101
+
102
+ if account_key:
103
+ options.ConfigureAccountKeyCredential(tobytes(account_key))
104
+ self.account_key = tobytes(account_key)
105
+ else:
106
+ options.ConfigureDefaultCredential()
107
+
108
+ with nogil:
109
+ wrapped = GetResultValue(CAzureFileSystem.Make(options))
110
+
111
+ self.init(<shared_ptr[CFileSystem]> wrapped)
112
+
113
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
114
+ FileSystem.init(self, wrapped)
115
+ self.azurefs = <CAzureFileSystem*> wrapped.get()
116
+
117
+ @staticmethod
118
+ @binding(True) # Required for cython < 3
119
+ def _reconstruct(kwargs):
120
+ # __reduce__ doesn't allow passing named arguments directly to the
121
+ # reconstructor, hence this wrapper.
122
+ return AzureFileSystem(**kwargs)
123
+
124
+ def __reduce__(self):
125
+ cdef CAzureOptions opts = self.azurefs.options()
126
+ return (
127
+ AzureFileSystem._reconstruct, (dict(
128
+ account_name=frombytes(opts.account_name),
129
+ account_key=frombytes(self.account_key),
130
+ blob_storage_authority=frombytes(opts.blob_storage_authority),
131
+ dfs_storage_authority=frombytes(opts.dfs_storage_authority),
132
+ blob_storage_scheme=frombytes(opts.blob_storage_scheme),
133
+ dfs_storage_scheme=frombytes(opts.dfs_storage_scheme)
134
+ ),))
llmeval-env/lib/python3.10/site-packages/pyarrow/_compute.pxd ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.lib cimport *
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport *
23
+
24
+ cdef class UdfContext(_Weakrefable):
25
+ cdef:
26
+ CUdfContext c_context
27
+
28
+ cdef void init(self, const CUdfContext& c_context)
29
+
30
+
31
+ cdef class FunctionOptions(_Weakrefable):
32
+ cdef:
33
+ shared_ptr[CFunctionOptions] wrapped
34
+
35
+ cdef const CFunctionOptions* get_options(self) except NULL
36
+ cdef void init(self, const shared_ptr[CFunctionOptions]& sp)
37
+
38
+ cdef inline shared_ptr[CFunctionOptions] unwrap(self)
39
+
40
+
41
+ cdef class _SortOptions(FunctionOptions):
42
+ pass
43
+
44
+
45
+ cdef CExpression _bind(Expression filter, Schema schema) except *
46
+
47
+
48
+ cdef class Expression(_Weakrefable):
49
+
50
+ cdef:
51
+ CExpression expr
52
+
53
+ cdef void init(self, const CExpression& sp)
54
+
55
+ @staticmethod
56
+ cdef wrap(const CExpression& sp)
57
+
58
+ cdef inline CExpression unwrap(self)
59
+
60
+ @staticmethod
61
+ cdef Expression _expr_or_scalar(object expr)
62
+
63
+
64
+ cdef CExpression _true
65
+
66
+ cdef CFieldRef _ensure_field_ref(value) except *
67
+
68
+ cdef CSortOrder unwrap_sort_order(order) except *
69
+
70
+ cdef CNullPlacement unwrap_null_placement(null_placement) except *
llmeval-env/lib/python3.10/site-packages/pyarrow/_compute.pyx ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_compute_docstrings.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ """
19
+ Custom documentation additions for compute functions.
20
+ """
21
+
22
+ function_doc_additions = {}
23
+
24
+ function_doc_additions["filter"] = """
25
+ Examples
26
+ --------
27
+ >>> import pyarrow as pa
28
+ >>> arr = pa.array(["a", "b", "c", None, "e"])
29
+ >>> mask = pa.array([True, False, None, False, True])
30
+ >>> arr.filter(mask)
31
+ <pyarrow.lib.StringArray object at ...>
32
+ [
33
+ "a",
34
+ "e"
35
+ ]
36
+ >>> arr.filter(mask, null_selection_behavior='emit_null')
37
+ <pyarrow.lib.StringArray object at ...>
38
+ [
39
+ "a",
40
+ null,
41
+ "e"
42
+ ]
43
+ """
44
+
45
+ function_doc_additions["mode"] = """
46
+ Examples
47
+ --------
48
+ >>> import pyarrow as pa
49
+ >>> import pyarrow.compute as pc
50
+ >>> arr = pa.array([1, 1, 2, 2, 3, 2, 2, 2])
51
+ >>> modes = pc.mode(arr, 2)
52
+ >>> modes[0]
53
+ <pyarrow.StructScalar: [('mode', 2), ('count', 5)]>
54
+ >>> modes[1]
55
+ <pyarrow.StructScalar: [('mode', 1), ('count', 2)]>
56
+ """
llmeval-env/lib/python3.10/site-packages/pyarrow/_csv.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (361 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_cuda.pxd ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.lib cimport *
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport *
23
+ from pyarrow.includes.libarrow_cuda cimport *
24
+
25
+
26
+ cdef class Context(_Weakrefable):
27
+ cdef:
28
+ shared_ptr[CCudaContext] context
29
+ int device_number
30
+
31
+ cdef void init(self, const shared_ptr[CCudaContext]& ctx)
32
+
33
+
34
+ cdef class IpcMemHandle(_Weakrefable):
35
+ cdef:
36
+ shared_ptr[CCudaIpcMemHandle] handle
37
+
38
+ cdef void init(self, shared_ptr[CCudaIpcMemHandle]& h)
39
+
40
+
41
+ cdef class CudaBuffer(Buffer):
42
+ cdef:
43
+ shared_ptr[CCudaBuffer] cuda_buffer
44
+ object base
45
+
46
+ cdef void init_cuda(self,
47
+ const shared_ptr[CCudaBuffer]& buffer,
48
+ object base)
49
+
50
+
51
+ cdef class HostBuffer(Buffer):
52
+ cdef:
53
+ shared_ptr[CCudaHostBuffer] host_buffer
54
+
55
+ cdef void init_host(self, const shared_ptr[CCudaHostBuffer]& buffer)
56
+
57
+
58
+ cdef class BufferReader(NativeFile):
59
+ cdef:
60
+ CCudaBufferReader* reader
61
+ CudaBuffer buffer
62
+
63
+
64
+ cdef class BufferWriter(NativeFile):
65
+ cdef:
66
+ CCudaBufferWriter* writer
67
+ CudaBuffer buffer
llmeval-env/lib/python3.10/site-packages/pyarrow/_cuda.pyx ADDED
@@ -0,0 +1,1058 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ from pyarrow.lib cimport *
20
+ from pyarrow.includes.libarrow_cuda cimport *
21
+ from pyarrow.lib import allocate_buffer, as_buffer, ArrowTypeError
22
+ from pyarrow.util import get_contiguous_span
23
+ cimport cpython as cp
24
+
25
+
26
+ cdef class Context(_Weakrefable):
27
+ """
28
+ CUDA driver context.
29
+ """
30
+
31
+ def __init__(self, *args, **kwargs):
32
+ """
33
+ Create a CUDA driver context for a particular device.
34
+
35
+ If a CUDA context handle is passed, it is wrapped, otherwise
36
+ a default CUDA context for the given device is requested.
37
+
38
+ Parameters
39
+ ----------
40
+ device_number : int (default 0)
41
+ Specify the GPU device for which the CUDA driver context is
42
+ requested.
43
+ handle : int, optional
44
+ Specify CUDA handle for a shared context that has been created
45
+ by another library.
46
+ """
47
+ # This method exposed because autodoc doesn't pick __cinit__
48
+
49
+ def __cinit__(self, int device_number=0, uintptr_t handle=0):
50
+ cdef CCudaDeviceManager* manager
51
+ manager = GetResultValue(CCudaDeviceManager.Instance())
52
+ cdef int n = manager.num_devices()
53
+ if device_number >= n or device_number < 0:
54
+ self.context.reset()
55
+ raise ValueError('device_number argument must be '
56
+ 'non-negative less than %s' % (n))
57
+ if handle == 0:
58
+ self.context = GetResultValue(manager.GetContext(device_number))
59
+ else:
60
+ self.context = GetResultValue(manager.GetSharedContext(
61
+ device_number, <void*>handle))
62
+ self.device_number = device_number
63
+
64
+ @staticmethod
65
+ def from_numba(context=None):
66
+ """
67
+ Create a Context instance from a Numba CUDA context.
68
+
69
+ Parameters
70
+ ----------
71
+ context : {numba.cuda.cudadrv.driver.Context, None}
72
+ A Numba CUDA context instance.
73
+ If None, the current Numba context is used.
74
+
75
+ Returns
76
+ -------
77
+ shared_context : pyarrow.cuda.Context
78
+ Context instance.
79
+ """
80
+ if context is None:
81
+ import numba.cuda
82
+ context = numba.cuda.current_context()
83
+ return Context(device_number=context.device.id,
84
+ handle=context.handle.value)
85
+
86
+ def to_numba(self):
87
+ """
88
+ Convert Context to a Numba CUDA context.
89
+
90
+ Returns
91
+ -------
92
+ context : numba.cuda.cudadrv.driver.Context
93
+ Numba CUDA context instance.
94
+ """
95
+ import ctypes
96
+ import numba.cuda
97
+ device = numba.cuda.gpus[self.device_number]
98
+ handle = ctypes.c_void_p(self.handle)
99
+ context = numba.cuda.cudadrv.driver.Context(device, handle)
100
+
101
+ class DummyPendingDeallocs(object):
102
+ # Context is managed by pyarrow
103
+ def add_item(self, *args, **kwargs):
104
+ pass
105
+
106
+ context.deallocations = DummyPendingDeallocs()
107
+ return context
108
+
109
+ @staticmethod
110
+ def get_num_devices():
111
+ """ Return the number of GPU devices.
112
+ """
113
+ cdef CCudaDeviceManager* manager
114
+ manager = GetResultValue(CCudaDeviceManager.Instance())
115
+ return manager.num_devices()
116
+
117
+ @property
118
+ def device_number(self):
119
+ """ Return context device number.
120
+ """
121
+ return self.device_number
122
+
123
+ @property
124
+ def handle(self):
125
+ """ Return pointer to context handle.
126
+ """
127
+ return <uintptr_t>self.context.get().handle()
128
+
129
+ cdef void init(self, const shared_ptr[CCudaContext]& ctx):
130
+ self.context = ctx
131
+
132
+ def synchronize(self):
133
+ """Blocks until the device has completed all preceding requested
134
+ tasks.
135
+ """
136
+ check_status(self.context.get().Synchronize())
137
+
138
+ @property
139
+ def bytes_allocated(self):
140
+ """Return the number of allocated bytes.
141
+ """
142
+ return self.context.get().bytes_allocated()
143
+
144
+ def get_device_address(self, uintptr_t address):
145
+ """Return the device address that is reachable from kernels running in
146
+ the context
147
+
148
+ Parameters
149
+ ----------
150
+ address : int
151
+ Specify memory address value
152
+
153
+ Returns
154
+ -------
155
+ device_address : int
156
+ Device address accessible from device context
157
+
158
+ Notes
159
+ -----
160
+ The device address is defined as a memory address accessible
161
+ by device. While it is often a device memory address but it
162
+ can be also a host memory address, for instance, when the
163
+ memory is allocated as host memory (using cudaMallocHost or
164
+ cudaHostAlloc) or as managed memory (using cudaMallocManaged)
165
+ or the host memory is page-locked (using cudaHostRegister).
166
+ """
167
+ return GetResultValue(self.context.get().GetDeviceAddress(address))
168
+
169
+ def new_buffer(self, int64_t nbytes):
170
+ """Return new device buffer.
171
+
172
+ Parameters
173
+ ----------
174
+ nbytes : int
175
+ Specify the number of bytes to be allocated.
176
+
177
+ Returns
178
+ -------
179
+ buf : CudaBuffer
180
+ Allocated buffer.
181
+ """
182
+ cdef:
183
+ shared_ptr[CCudaBuffer] cudabuf
184
+ with nogil:
185
+ cudabuf = GetResultValue(self.context.get().Allocate(nbytes))
186
+ return pyarrow_wrap_cudabuffer(cudabuf)
187
+
188
+ def foreign_buffer(self, address, size, base=None):
189
+ """
190
+ Create device buffer from address and size as a view.
191
+
192
+ The caller is responsible for allocating and freeing the
193
+ memory. When `address==size==0` then a new zero-sized buffer
194
+ is returned.
195
+
196
+ Parameters
197
+ ----------
198
+ address : int
199
+ Specify the starting address of the buffer. The address can
200
+ refer to both device or host memory but it must be
201
+ accessible from device after mapping it with
202
+ `get_device_address` method.
203
+ size : int
204
+ Specify the size of device buffer in bytes.
205
+ base : {None, object}
206
+ Specify object that owns the referenced memory.
207
+
208
+ Returns
209
+ -------
210
+ cbuf : CudaBuffer
211
+ Device buffer as a view of device reachable memory.
212
+
213
+ """
214
+ if not address and size == 0:
215
+ return self.new_buffer(0)
216
+ cdef:
217
+ uintptr_t c_addr = self.get_device_address(address)
218
+ int64_t c_size = size
219
+ shared_ptr[CCudaBuffer] cudabuf
220
+
221
+ cudabuf = GetResultValue(self.context.get().View(
222
+ <uint8_t*>c_addr, c_size))
223
+ return pyarrow_wrap_cudabuffer_base(cudabuf, base)
224
+
225
+ def open_ipc_buffer(self, ipc_handle):
226
+ """ Open existing CUDA IPC memory handle
227
+
228
+ Parameters
229
+ ----------
230
+ ipc_handle : IpcMemHandle
231
+ Specify opaque pointer to CUipcMemHandle (driver API).
232
+
233
+ Returns
234
+ -------
235
+ buf : CudaBuffer
236
+ referencing device buffer
237
+ """
238
+ handle = pyarrow_unwrap_cudaipcmemhandle(ipc_handle)
239
+ cdef shared_ptr[CCudaBuffer] cudabuf
240
+ with nogil:
241
+ cudabuf = GetResultValue(
242
+ self.context.get().OpenIpcBuffer(handle.get()[0]))
243
+ return pyarrow_wrap_cudabuffer(cudabuf)
244
+
245
+ def buffer_from_data(self, object data, int64_t offset=0, int64_t size=-1):
246
+ """Create device buffer and initialize with data.
247
+
248
+ Parameters
249
+ ----------
250
+ data : {CudaBuffer, HostBuffer, Buffer, array-like}
251
+ Specify data to be copied to device buffer.
252
+ offset : int
253
+ Specify the offset of input buffer for device data
254
+ buffering. Default: 0.
255
+ size : int
256
+ Specify the size of device buffer in bytes. Default: all
257
+ (starting from input offset)
258
+
259
+ Returns
260
+ -------
261
+ cbuf : CudaBuffer
262
+ Device buffer with copied data.
263
+ """
264
+ is_host_data = not pyarrow_is_cudabuffer(data)
265
+ buf = as_buffer(data) if is_host_data else data
266
+
267
+ bsize = buf.size
268
+ if offset < 0 or (bsize and offset >= bsize):
269
+ raise ValueError('offset argument is out-of-range')
270
+ if size < 0:
271
+ size = bsize - offset
272
+ elif offset + size > bsize:
273
+ raise ValueError(
274
+ 'requested larger slice than available in device buffer')
275
+
276
+ if offset != 0 or size != bsize:
277
+ buf = buf.slice(offset, size)
278
+
279
+ result = self.new_buffer(size)
280
+ if is_host_data:
281
+ result.copy_from_host(buf, position=0, nbytes=size)
282
+ else:
283
+ result.copy_from_device(buf, position=0, nbytes=size)
284
+ return result
285
+
286
+ def buffer_from_object(self, obj):
287
+ """Create device buffer view of arbitrary object that references
288
+ device accessible memory.
289
+
290
+ When the object contains a non-contiguous view of device
291
+ accessible memory then the returned device buffer will contain
292
+ contiguous view of the memory, that is, including the
293
+ intermediate data that is otherwise invisible to the input
294
+ object.
295
+
296
+ Parameters
297
+ ----------
298
+ obj : {object, Buffer, HostBuffer, CudaBuffer, ...}
299
+ Specify an object that holds (device or host) address that
300
+ can be accessed from device. This includes objects with
301
+ types defined in pyarrow.cuda as well as arbitrary objects
302
+ that implement the CUDA array interface as defined by numba.
303
+
304
+ Returns
305
+ -------
306
+ cbuf : CudaBuffer
307
+ Device buffer as a view of device accessible memory.
308
+
309
+ """
310
+ if isinstance(obj, HostBuffer):
311
+ return self.foreign_buffer(obj.address, obj.size, base=obj)
312
+ elif isinstance(obj, Buffer):
313
+ return CudaBuffer.from_buffer(obj)
314
+ elif isinstance(obj, CudaBuffer):
315
+ return obj
316
+ elif hasattr(obj, '__cuda_array_interface__'):
317
+ desc = obj.__cuda_array_interface__
318
+ addr = desc['data'][0]
319
+ if addr is None:
320
+ return self.new_buffer(0)
321
+ import numpy as np
322
+ start, end = get_contiguous_span(
323
+ desc['shape'], desc.get('strides'),
324
+ np.dtype(desc['typestr']).itemsize)
325
+ return self.foreign_buffer(addr + start, end - start, base=obj)
326
+ raise ArrowTypeError('cannot create device buffer view from'
327
+ ' `%s` object' % (type(obj)))
328
+
329
+
330
+ cdef class IpcMemHandle(_Weakrefable):
331
+ """A serializable container for a CUDA IPC handle.
332
+ """
333
+ cdef void init(self, shared_ptr[CCudaIpcMemHandle]& h):
334
+ self.handle = h
335
+
336
+ @staticmethod
337
+ def from_buffer(Buffer opaque_handle):
338
+ """Create IpcMemHandle from opaque buffer (e.g. from another
339
+ process)
340
+
341
+ Parameters
342
+ ----------
343
+ opaque_handle :
344
+ a CUipcMemHandle as a const void*
345
+
346
+ Returns
347
+ -------
348
+ ipc_handle : IpcMemHandle
349
+ """
350
+ c_buf = pyarrow_unwrap_buffer(opaque_handle)
351
+ cdef:
352
+ shared_ptr[CCudaIpcMemHandle] handle
353
+
354
+ handle = GetResultValue(
355
+ CCudaIpcMemHandle.FromBuffer(c_buf.get().data()))
356
+ return pyarrow_wrap_cudaipcmemhandle(handle)
357
+
358
+ def serialize(self, pool=None):
359
+ """Write IpcMemHandle to a Buffer
360
+
361
+ Parameters
362
+ ----------
363
+ pool : {MemoryPool, None}
364
+ Specify a pool to allocate memory from
365
+
366
+ Returns
367
+ -------
368
+ buf : Buffer
369
+ The serialized buffer.
370
+ """
371
+ cdef CMemoryPool* pool_ = maybe_unbox_memory_pool(pool)
372
+ cdef shared_ptr[CBuffer] buf
373
+ cdef CCudaIpcMemHandle* h = self.handle.get()
374
+ with nogil:
375
+ buf = GetResultValue(h.Serialize(pool_))
376
+ return pyarrow_wrap_buffer(buf)
377
+
378
+
379
+ cdef class CudaBuffer(Buffer):
380
+ """An Arrow buffer with data located in a GPU device.
381
+
382
+ To create a CudaBuffer instance, use Context.device_buffer().
383
+
384
+ The memory allocated in a CudaBuffer is freed when the buffer object
385
+ is deleted.
386
+ """
387
+
388
+ def __init__(self):
389
+ raise TypeError("Do not call CudaBuffer's constructor directly, use "
390
+ "`<pyarrow.Context instance>.device_buffer`"
391
+ " method instead.")
392
+
393
+ cdef void init_cuda(self,
394
+ const shared_ptr[CCudaBuffer]& buffer,
395
+ object base):
396
+ self.cuda_buffer = buffer
397
+ self.init(<shared_ptr[CBuffer]> buffer)
398
+ self.base = base
399
+
400
+ @staticmethod
401
+ def from_buffer(buf):
402
+ """ Convert back generic buffer into CudaBuffer
403
+
404
+ Parameters
405
+ ----------
406
+ buf : Buffer
407
+ Specify buffer containing CudaBuffer
408
+
409
+ Returns
410
+ -------
411
+ dbuf : CudaBuffer
412
+ Resulting device buffer.
413
+ """
414
+ c_buf = pyarrow_unwrap_buffer(buf)
415
+ cuda_buffer = GetResultValue(CCudaBuffer.FromBuffer(c_buf))
416
+ return pyarrow_wrap_cudabuffer(cuda_buffer)
417
+
418
+ @staticmethod
419
+ def from_numba(mem):
420
+ """Create a CudaBuffer view from numba MemoryPointer instance.
421
+
422
+ Parameters
423
+ ----------
424
+ mem : numba.cuda.cudadrv.driver.MemoryPointer
425
+
426
+ Returns
427
+ -------
428
+ cbuf : CudaBuffer
429
+ Device buffer as a view of numba MemoryPointer.
430
+ """
431
+ ctx = Context.from_numba(mem.context)
432
+ if mem.device_pointer.value is None and mem.size==0:
433
+ return ctx.new_buffer(0)
434
+ return ctx.foreign_buffer(mem.device_pointer.value, mem.size, base=mem)
435
+
436
+ def to_numba(self):
437
+ """Return numba memory pointer of CudaBuffer instance.
438
+ """
439
+ import ctypes
440
+ from numba.cuda.cudadrv.driver import MemoryPointer
441
+ return MemoryPointer(self.context.to_numba(),
442
+ pointer=ctypes.c_void_p(self.address),
443
+ size=self.size)
444
+
445
+ cdef getitem(self, int64_t i):
446
+ return self.copy_to_host(position=i, nbytes=1)[0]
447
+
448
+ def copy_to_host(self, int64_t position=0, int64_t nbytes=-1,
449
+ Buffer buf=None,
450
+ MemoryPool memory_pool=None, c_bool resizable=False):
451
+ """Copy memory from GPU device to CPU host
452
+
453
+ Caller is responsible for ensuring that all tasks affecting
454
+ the memory are finished. Use
455
+
456
+ `<CudaBuffer instance>.context.synchronize()`
457
+
458
+ when needed.
459
+
460
+ Parameters
461
+ ----------
462
+ position : int
463
+ Specify the starting position of the source data in GPU
464
+ device buffer. Default: 0.
465
+ nbytes : int
466
+ Specify the number of bytes to copy. Default: -1 (all from
467
+ the position until host buffer is full).
468
+ buf : Buffer
469
+ Specify a pre-allocated output buffer in host. Default: None
470
+ (allocate new output buffer).
471
+ memory_pool : MemoryPool
472
+ resizable : bool
473
+ Specify extra arguments to allocate_buffer. Used only when
474
+ buf is None.
475
+
476
+ Returns
477
+ -------
478
+ buf : Buffer
479
+ Output buffer in host.
480
+
481
+ """
482
+ if position < 0 or (self.size and position > self.size) \
483
+ or (self.size == 0 and position != 0):
484
+ raise ValueError('position argument is out-of-range')
485
+ cdef:
486
+ int64_t c_nbytes
487
+ if buf is None:
488
+ if nbytes < 0:
489
+ # copy all starting from position to new host buffer
490
+ c_nbytes = self.size - position
491
+ else:
492
+ if nbytes > self.size - position:
493
+ raise ValueError(
494
+ 'requested more to copy than available from '
495
+ 'device buffer')
496
+ # copy nbytes starting from position to new host buffer
497
+ c_nbytes = nbytes
498
+ buf = allocate_buffer(c_nbytes, memory_pool=memory_pool,
499
+ resizable=resizable)
500
+ else:
501
+ if nbytes < 0:
502
+ # copy all from position until given host buffer is full
503
+ c_nbytes = min(self.size - position, buf.size)
504
+ else:
505
+ if nbytes > buf.size:
506
+ raise ValueError(
507
+ 'requested copy does not fit into host buffer')
508
+ # copy nbytes from position to given host buffer
509
+ c_nbytes = nbytes
510
+
511
+ cdef:
512
+ shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf)
513
+ int64_t c_position = position
514
+ with nogil:
515
+ check_status(self.cuda_buffer.get()
516
+ .CopyToHost(c_position, c_nbytes,
517
+ c_buf.get().mutable_data()))
518
+ return buf
519
+
520
+ def copy_from_host(self, data, int64_t position=0, int64_t nbytes=-1):
521
+ """Copy data from host to device.
522
+
523
+ The device buffer must be pre-allocated.
524
+
525
+ Parameters
526
+ ----------
527
+ data : {Buffer, array-like}
528
+ Specify data in host. It can be array-like that is valid
529
+ argument to py_buffer
530
+ position : int
531
+ Specify the starting position of the copy in device buffer.
532
+ Default: 0.
533
+ nbytes : int
534
+ Specify the number of bytes to copy. Default: -1 (all from
535
+ source until device buffer, starting from position, is full)
536
+
537
+ Returns
538
+ -------
539
+ nbytes : int
540
+ Number of bytes copied.
541
+ """
542
+ if position < 0 or position > self.size:
543
+ raise ValueError('position argument is out-of-range')
544
+ cdef:
545
+ int64_t c_nbytes
546
+ buf = as_buffer(data)
547
+
548
+ if nbytes < 0:
549
+ # copy from host buffer to device buffer starting from
550
+ # position until device buffer is full
551
+ c_nbytes = min(self.size - position, buf.size)
552
+ else:
553
+ if nbytes > buf.size:
554
+ raise ValueError(
555
+ 'requested more to copy than available from host buffer')
556
+ if nbytes > self.size - position:
557
+ raise ValueError(
558
+ 'requested more to copy than available in device buffer')
559
+ # copy nbytes from host buffer to device buffer starting
560
+ # from position
561
+ c_nbytes = nbytes
562
+
563
+ cdef:
564
+ shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf)
565
+ int64_t c_position = position
566
+ with nogil:
567
+ check_status(self.cuda_buffer.get().
568
+ CopyFromHost(c_position, c_buf.get().data(),
569
+ c_nbytes))
570
+ return c_nbytes
571
+
572
+ def copy_from_device(self, buf, int64_t position=0, int64_t nbytes=-1):
573
+ """Copy data from device to device.
574
+
575
+ Parameters
576
+ ----------
577
+ buf : CudaBuffer
578
+ Specify source device buffer.
579
+ position : int
580
+ Specify the starting position of the copy in device buffer.
581
+ Default: 0.
582
+ nbytes : int
583
+ Specify the number of bytes to copy. Default: -1 (all from
584
+ source until device buffer, starting from position, is full)
585
+
586
+ Returns
587
+ -------
588
+ nbytes : int
589
+ Number of bytes copied.
590
+
591
+ """
592
+ if position < 0 or position > self.size:
593
+ raise ValueError('position argument is out-of-range')
594
+ cdef:
595
+ int64_t c_nbytes
596
+
597
+ if nbytes < 0:
598
+ # copy from source device buffer to device buffer starting
599
+ # from position until device buffer is full
600
+ c_nbytes = min(self.size - position, buf.size)
601
+ else:
602
+ if nbytes > buf.size:
603
+ raise ValueError(
604
+ 'requested more to copy than available from device buffer')
605
+ if nbytes > self.size - position:
606
+ raise ValueError(
607
+ 'requested more to copy than available in device buffer')
608
+ # copy nbytes from source device buffer to device buffer
609
+ # starting from position
610
+ c_nbytes = nbytes
611
+
612
+ cdef:
613
+ shared_ptr[CCudaBuffer] c_buf = pyarrow_unwrap_cudabuffer(buf)
614
+ int64_t c_position = position
615
+ shared_ptr[CCudaContext] c_src_ctx = pyarrow_unwrap_cudacontext(
616
+ buf.context)
617
+ void* c_source_data = <void*>(c_buf.get().address())
618
+
619
+ if self.context.handle != buf.context.handle:
620
+ with nogil:
621
+ check_status(self.cuda_buffer.get().
622
+ CopyFromAnotherDevice(c_src_ctx, c_position,
623
+ c_source_data, c_nbytes))
624
+ else:
625
+ with nogil:
626
+ check_status(self.cuda_buffer.get().
627
+ CopyFromDevice(c_position, c_source_data,
628
+ c_nbytes))
629
+ return c_nbytes
630
+
631
+ def export_for_ipc(self):
632
+ """
633
+ Expose this device buffer as IPC memory which can be used in other
634
+ processes.
635
+
636
+ After calling this function, this device memory will not be
637
+ freed when the CudaBuffer is destructed.
638
+
639
+ Returns
640
+ -------
641
+ ipc_handle : IpcMemHandle
642
+ The exported IPC handle
643
+
644
+ """
645
+ cdef shared_ptr[CCudaIpcMemHandle] handle
646
+ with nogil:
647
+ handle = GetResultValue(self.cuda_buffer.get().ExportForIpc())
648
+ return pyarrow_wrap_cudaipcmemhandle(handle)
649
+
650
+ @property
651
+ def context(self):
652
+ """Returns the CUDA driver context of this buffer.
653
+ """
654
+ return pyarrow_wrap_cudacontext(self.cuda_buffer.get().context())
655
+
656
+ def slice(self, offset=0, length=None):
657
+ """Return slice of device buffer
658
+
659
+ Parameters
660
+ ----------
661
+ offset : int, default 0
662
+ Specify offset from the start of device buffer to slice
663
+ length : int, default None
664
+ Specify the length of slice (default is until end of device
665
+ buffer starting from offset). If the length is larger than
666
+ the data available, the returned slice will have a size of
667
+ the available data starting from the offset.
668
+
669
+ Returns
670
+ -------
671
+ sliced : CudaBuffer
672
+ Zero-copy slice of device buffer.
673
+
674
+ """
675
+ if offset < 0 or (self.size and offset >= self.size):
676
+ raise ValueError('offset argument is out-of-range')
677
+ cdef int64_t offset_ = offset
678
+ cdef int64_t size
679
+ if length is None:
680
+ size = self.size - offset_
681
+ elif offset + length <= self.size:
682
+ size = length
683
+ else:
684
+ size = self.size - offset
685
+ parent = pyarrow_unwrap_cudabuffer(self)
686
+ return pyarrow_wrap_cudabuffer(make_shared[CCudaBuffer](parent,
687
+ offset_, size))
688
+
689
+ def to_pybytes(self):
690
+ """Return device buffer content as Python bytes.
691
+ """
692
+ return self.copy_to_host().to_pybytes()
693
+
694
+ def __getbuffer__(self, cp.Py_buffer* buffer, int flags):
695
+ # Device buffer contains data pointers on the device. Hence,
696
+ # cannot support buffer protocol PEP-3118 for CudaBuffer.
697
+ raise BufferError('buffer protocol for device buffer not supported')
698
+
699
+
700
+ cdef class HostBuffer(Buffer):
701
+ """Device-accessible CPU memory created using cudaHostAlloc.
702
+
703
+ To create a HostBuffer instance, use
704
+
705
+ cuda.new_host_buffer(<nbytes>)
706
+ """
707
+
708
+ def __init__(self):
709
+ raise TypeError("Do not call HostBuffer's constructor directly,"
710
+ " use `cuda.new_host_buffer` function instead.")
711
+
712
+ cdef void init_host(self, const shared_ptr[CCudaHostBuffer]& buffer):
713
+ self.host_buffer = buffer
714
+ self.init(<shared_ptr[CBuffer]> buffer)
715
+
716
+ @property
717
+ def size(self):
718
+ return self.host_buffer.get().size()
719
+
720
+
721
+ cdef class BufferReader(NativeFile):
722
+ """File interface for zero-copy read from CUDA buffers.
723
+
724
+ Note: Read methods return pointers to device memory. This means
725
+ you must be careful using this interface with any Arrow code which
726
+ may expect to be able to do anything other than pointer arithmetic
727
+ on the returned buffers.
728
+ """
729
+
730
+ def __cinit__(self, CudaBuffer obj):
731
+ self.buffer = obj
732
+ self.reader = new CCudaBufferReader(self.buffer.buffer)
733
+ self.set_random_access_file(
734
+ shared_ptr[CRandomAccessFile](self.reader))
735
+ self.is_readable = True
736
+
737
+ def read_buffer(self, nbytes=None):
738
+ """Return a slice view of the underlying device buffer.
739
+
740
+ The slice will start at the current reader position and will
741
+ have specified size in bytes.
742
+
743
+ Parameters
744
+ ----------
745
+ nbytes : int, default None
746
+ Specify the number of bytes to read. Default: None (read all
747
+ remaining bytes).
748
+
749
+ Returns
750
+ -------
751
+ cbuf : CudaBuffer
752
+ New device buffer.
753
+
754
+ """
755
+ cdef:
756
+ int64_t c_nbytes
757
+ shared_ptr[CCudaBuffer] output
758
+
759
+ if nbytes is None:
760
+ c_nbytes = self.size() - self.tell()
761
+ else:
762
+ c_nbytes = nbytes
763
+
764
+ with nogil:
765
+ output = static_pointer_cast[CCudaBuffer, CBuffer](
766
+ GetResultValue(self.reader.Read(c_nbytes)))
767
+
768
+ return pyarrow_wrap_cudabuffer(output)
769
+
770
+
771
+ cdef class BufferWriter(NativeFile):
772
+ """File interface for writing to CUDA buffers.
773
+
774
+ By default writes are unbuffered. Use set_buffer_size to enable
775
+ buffering.
776
+ """
777
+
778
+ def __cinit__(self, CudaBuffer buffer):
779
+ self.buffer = buffer
780
+ self.writer = new CCudaBufferWriter(self.buffer.cuda_buffer)
781
+ self.set_output_stream(shared_ptr[COutputStream](self.writer))
782
+ self.is_writable = True
783
+
784
+ def writeat(self, int64_t position, object data):
785
+ """Write data to buffer starting from position.
786
+
787
+ Parameters
788
+ ----------
789
+ position : int
790
+ Specify device buffer position where the data will be
791
+ written.
792
+ data : array-like
793
+ Specify data, the data instance must implement buffer
794
+ protocol.
795
+ """
796
+ cdef:
797
+ Buffer buf = as_buffer(data)
798
+ const uint8_t* c_data = buf.buffer.get().data()
799
+ int64_t c_size = buf.buffer.get().size()
800
+
801
+ with nogil:
802
+ check_status(self.writer.WriteAt(position, c_data, c_size))
803
+
804
+ def flush(self):
805
+ """ Flush the buffer stream """
806
+ with nogil:
807
+ check_status(self.writer.Flush())
808
+
809
+ def seek(self, int64_t position, int whence=0):
810
+ # TODO: remove this method after NativeFile.seek supports
811
+ # writable files.
812
+ cdef int64_t offset
813
+
814
+ with nogil:
815
+ if whence == 0:
816
+ offset = position
817
+ elif whence == 1:
818
+ offset = GetResultValue(self.writer.Tell())
819
+ offset = offset + position
820
+ else:
821
+ with gil:
822
+ raise ValueError("Invalid value of whence: {0}"
823
+ .format(whence))
824
+ check_status(self.writer.Seek(offset))
825
+ return self.tell()
826
+
827
+ @property
828
+ def buffer_size(self):
829
+ """Returns size of host (CPU) buffer, 0 for unbuffered
830
+ """
831
+ return self.writer.buffer_size()
832
+
833
+ @buffer_size.setter
834
+ def buffer_size(self, int64_t buffer_size):
835
+ """Set CPU buffer size to limit calls to cudaMemcpy
836
+
837
+ Parameters
838
+ ----------
839
+ buffer_size : int
840
+ Specify the size of CPU buffer to allocate in bytes.
841
+ """
842
+ with nogil:
843
+ check_status(self.writer.SetBufferSize(buffer_size))
844
+
845
+ @property
846
+ def num_bytes_buffered(self):
847
+ """Returns number of bytes buffered on host
848
+ """
849
+ return self.writer.num_bytes_buffered()
850
+
851
+ # Functions
852
+
853
+
854
+ def new_host_buffer(const int64_t size, int device=0):
855
+ """Return buffer with CUDA-accessible memory on CPU host
856
+
857
+ Parameters
858
+ ----------
859
+ size : int
860
+ Specify the number of bytes to be allocated.
861
+ device : int
862
+ Specify GPU device number.
863
+
864
+ Returns
865
+ -------
866
+ dbuf : HostBuffer
867
+ Allocated host buffer
868
+ """
869
+ cdef shared_ptr[CCudaHostBuffer] buffer
870
+ with nogil:
871
+ buffer = GetResultValue(AllocateCudaHostBuffer(device, size))
872
+ return pyarrow_wrap_cudahostbuffer(buffer)
873
+
874
+
875
+ def serialize_record_batch(object batch, object ctx):
876
+ """ Write record batch message to GPU device memory
877
+
878
+ Parameters
879
+ ----------
880
+ batch : RecordBatch
881
+ Record batch to write
882
+ ctx : Context
883
+ CUDA Context to allocate device memory from
884
+
885
+ Returns
886
+ -------
887
+ dbuf : CudaBuffer
888
+ device buffer which contains the record batch message
889
+ """
890
+ cdef shared_ptr[CCudaBuffer] buffer
891
+ cdef CRecordBatch* batch_ = pyarrow_unwrap_batch(batch).get()
892
+ cdef CCudaContext* ctx_ = pyarrow_unwrap_cudacontext(ctx).get()
893
+ with nogil:
894
+ buffer = GetResultValue(CudaSerializeRecordBatch(batch_[0], ctx_))
895
+ return pyarrow_wrap_cudabuffer(buffer)
896
+
897
+
898
+ def read_message(object source, pool=None):
899
+ """ Read Arrow IPC message located on GPU device
900
+
901
+ Parameters
902
+ ----------
903
+ source : {CudaBuffer, cuda.BufferReader}
904
+ Device buffer or reader of device buffer.
905
+ pool : MemoryPool (optional)
906
+ Pool to allocate CPU memory for the metadata
907
+
908
+ Returns
909
+ -------
910
+ message : Message
911
+ The deserialized message, body still on device
912
+ """
913
+ cdef:
914
+ Message result = Message.__new__(Message)
915
+ cdef CMemoryPool* pool_ = maybe_unbox_memory_pool(pool)
916
+ if not isinstance(source, BufferReader):
917
+ reader = BufferReader(source)
918
+ with nogil:
919
+ result.message = move(
920
+ GetResultValue(ReadMessage(reader.reader, pool_)))
921
+ return result
922
+
923
+
924
+ def read_record_batch(object buffer, object schema, *,
925
+ DictionaryMemo dictionary_memo=None, pool=None):
926
+ """Construct RecordBatch referencing IPC message located on CUDA device.
927
+
928
+ While the metadata is copied to host memory for deserialization,
929
+ the record batch data remains on the device.
930
+
931
+ Parameters
932
+ ----------
933
+ buffer :
934
+ Device buffer containing the complete IPC message
935
+ schema : Schema
936
+ The schema for the record batch
937
+ dictionary_memo : DictionaryMemo, optional
938
+ If message contains dictionaries, must pass a populated
939
+ DictionaryMemo
940
+ pool : MemoryPool (optional)
941
+ Pool to allocate metadata from
942
+
943
+ Returns
944
+ -------
945
+ batch : RecordBatch
946
+ Reconstructed record batch, with device pointers
947
+
948
+ """
949
+ cdef:
950
+ shared_ptr[CSchema] schema_ = pyarrow_unwrap_schema(schema)
951
+ shared_ptr[CCudaBuffer] buffer_ = pyarrow_unwrap_cudabuffer(buffer)
952
+ CDictionaryMemo temp_memo
953
+ CDictionaryMemo* arg_dict_memo
954
+ CMemoryPool* pool_ = maybe_unbox_memory_pool(pool)
955
+ shared_ptr[CRecordBatch] batch
956
+
957
+ if dictionary_memo is not None:
958
+ arg_dict_memo = dictionary_memo.memo
959
+ else:
960
+ arg_dict_memo = &temp_memo
961
+
962
+ with nogil:
963
+ batch = GetResultValue(CudaReadRecordBatch(
964
+ schema_, arg_dict_memo, buffer_, pool_))
965
+ return pyarrow_wrap_batch(batch)
966
+
967
+
968
+ # Public API
969
+
970
+
971
+ cdef public api bint pyarrow_is_buffer(object buffer):
972
+ return isinstance(buffer, Buffer)
973
+
974
+ # cudabuffer
975
+
976
+ cdef public api bint pyarrow_is_cudabuffer(object buffer):
977
+ return isinstance(buffer, CudaBuffer)
978
+
979
+
980
+ cdef public api object \
981
+ pyarrow_wrap_cudabuffer_base(const shared_ptr[CCudaBuffer]& buf, base):
982
+ cdef CudaBuffer result = CudaBuffer.__new__(CudaBuffer)
983
+ result.init_cuda(buf, base)
984
+ return result
985
+
986
+
987
+ cdef public api object \
988
+ pyarrow_wrap_cudabuffer(const shared_ptr[CCudaBuffer]& buf):
989
+ cdef CudaBuffer result = CudaBuffer.__new__(CudaBuffer)
990
+ result.init_cuda(buf, None)
991
+ return result
992
+
993
+
994
+ cdef public api shared_ptr[CCudaBuffer] pyarrow_unwrap_cudabuffer(object obj):
995
+ if pyarrow_is_cudabuffer(obj):
996
+ return (<CudaBuffer>obj).cuda_buffer
997
+ raise TypeError('expected CudaBuffer instance, got %s'
998
+ % (type(obj).__name__))
999
+
1000
+ # cudahostbuffer
1001
+
1002
+ cdef public api bint pyarrow_is_cudahostbuffer(object buffer):
1003
+ return isinstance(buffer, HostBuffer)
1004
+
1005
+
1006
+ cdef public api object \
1007
+ pyarrow_wrap_cudahostbuffer(const shared_ptr[CCudaHostBuffer]& buf):
1008
+ cdef HostBuffer result = HostBuffer.__new__(HostBuffer)
1009
+ result.init_host(buf)
1010
+ return result
1011
+
1012
+
1013
+ cdef public api shared_ptr[CCudaHostBuffer] \
1014
+ pyarrow_unwrap_cudahostbuffer(object obj):
1015
+ if pyarrow_is_cudahostbuffer(obj):
1016
+ return (<HostBuffer>obj).host_buffer
1017
+ raise TypeError('expected HostBuffer instance, got %s'
1018
+ % (type(obj).__name__))
1019
+
1020
+ # cudacontext
1021
+
1022
+ cdef public api bint pyarrow_is_cudacontext(object ctx):
1023
+ return isinstance(ctx, Context)
1024
+
1025
+
1026
+ cdef public api object \
1027
+ pyarrow_wrap_cudacontext(const shared_ptr[CCudaContext]& ctx):
1028
+ cdef Context result = Context.__new__(Context)
1029
+ result.init(ctx)
1030
+ return result
1031
+
1032
+
1033
+ cdef public api shared_ptr[CCudaContext] \
1034
+ pyarrow_unwrap_cudacontext(object obj):
1035
+ if pyarrow_is_cudacontext(obj):
1036
+ return (<Context>obj).context
1037
+ raise TypeError('expected Context instance, got %s'
1038
+ % (type(obj).__name__))
1039
+
1040
+ # cudaipcmemhandle
1041
+
1042
+ cdef public api bint pyarrow_is_cudaipcmemhandle(object handle):
1043
+ return isinstance(handle, IpcMemHandle)
1044
+
1045
+
1046
+ cdef public api object \
1047
+ pyarrow_wrap_cudaipcmemhandle(shared_ptr[CCudaIpcMemHandle]& h):
1048
+ cdef IpcMemHandle result = IpcMemHandle.__new__(IpcMemHandle)
1049
+ result.init(h)
1050
+ return result
1051
+
1052
+
1053
+ cdef public api shared_ptr[CCudaIpcMemHandle] \
1054
+ pyarrow_unwrap_cudaipcmemhandle(object obj):
1055
+ if pyarrow_is_cudaipcmemhandle(obj):
1056
+ return (<IpcMemHandle>obj).handle
1057
+ raise TypeError('expected IpcMemHandle instance, got %s'
1058
+ % (type(obj).__name__))
llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (78.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_orc.pyx ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ """Dataset support for ORC file format."""
21
+
22
+ from pyarrow.lib cimport *
23
+ from pyarrow.includes.libarrow cimport *
24
+ from pyarrow.includes.libarrow_dataset cimport *
25
+
26
+ from pyarrow._dataset cimport FileFormat
27
+
28
+
29
+ cdef class OrcFileFormat(FileFormat):
30
+
31
+ def __init__(self):
32
+ self.init(shared_ptr[CFileFormat](new COrcFileFormat()))
33
+
34
+ def equals(self, OrcFileFormat other):
35
+ """
36
+ Parameters
37
+ ----------
38
+ other : pyarrow.dataset.OrcFileFormat
39
+
40
+ Returns
41
+ -------
42
+ True
43
+ """
44
+ return True
45
+
46
+ @property
47
+ def default_extname(self):
48
+ return "orc"
49
+
50
+ def __reduce__(self):
51
+ return OrcFileFormat, tuple()
llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (357 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pyx ADDED
@@ -0,0 +1,1023 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ """Dataset support for Parquet file format."""
21
+
22
+ from cython cimport binding
23
+ from cython.operator cimport dereference as deref
24
+
25
+ import os
26
+ import warnings
27
+
28
+ import pyarrow as pa
29
+ from pyarrow.lib cimport *
30
+ from pyarrow.lib import frombytes, tobytes
31
+ from pyarrow.includes.libarrow cimport *
32
+ from pyarrow.includes.libarrow_dataset cimport *
33
+ from pyarrow.includes.libarrow_dataset_parquet cimport *
34
+ from pyarrow._fs cimport FileSystem
35
+
36
+ from pyarrow._compute cimport Expression, _bind
37
+ from pyarrow._dataset cimport (
38
+ _make_file_source,
39
+ DatasetFactory,
40
+ FileFormat,
41
+ FileFragment,
42
+ FileWriteOptions,
43
+ Fragment,
44
+ FragmentScanOptions,
45
+ CacheOptions,
46
+ Partitioning,
47
+ PartitioningFactory,
48
+ WrittenFile
49
+ )
50
+
51
+ from pyarrow._parquet cimport (
52
+ _create_writer_properties, _create_arrow_writer_properties,
53
+ FileMetaData,
54
+ )
55
+
56
+
57
+ try:
58
+ from pyarrow._dataset_parquet_encryption import (
59
+ set_encryption_config, set_decryption_config
60
+ )
61
+ parquet_encryption_enabled = True
62
+ except ImportError:
63
+ parquet_encryption_enabled = False
64
+
65
+
66
+ cdef Expression _true = Expression._scalar(True)
67
+
68
+ ctypedef CParquetFileWriter* _CParquetFileWriterPtr
69
+
70
+
71
+ cdef class ParquetFileFormat(FileFormat):
72
+ """
73
+ FileFormat for Parquet
74
+
75
+ Parameters
76
+ ----------
77
+ read_options : ParquetReadOptions
78
+ Read options for the file.
79
+ default_fragment_scan_options : ParquetFragmentScanOptions
80
+ Scan Options for the file.
81
+ **kwargs : dict
82
+ Additional options for read option or scan option
83
+ """
84
+
85
+ cdef:
86
+ CParquetFileFormat* parquet_format
87
+
88
+ def __init__(self, read_options=None,
89
+ default_fragment_scan_options=None,
90
+ **kwargs):
91
+ cdef:
92
+ shared_ptr[CParquetFileFormat] wrapped
93
+ CParquetFileFormatReaderOptions* options
94
+
95
+ # Read/scan options
96
+ read_options_args = {option: kwargs[option] for option in kwargs
97
+ if option in _PARQUET_READ_OPTIONS}
98
+ scan_args = {option: kwargs[option] for option in kwargs
99
+ if option not in _PARQUET_READ_OPTIONS}
100
+ if read_options and read_options_args:
101
+ duplicates = ', '.join(sorted(read_options_args))
102
+ raise ValueError(f'If `read_options` is given, '
103
+ f'cannot specify {duplicates}')
104
+ if default_fragment_scan_options and scan_args:
105
+ duplicates = ', '.join(sorted(scan_args))
106
+ raise ValueError(f'If `default_fragment_scan_options` is given, '
107
+ f'cannot specify {duplicates}')
108
+
109
+ if read_options is None:
110
+ read_options = ParquetReadOptions(**read_options_args)
111
+ elif isinstance(read_options, dict):
112
+ # For backwards compatibility
113
+ duplicates = []
114
+ for option, value in read_options.items():
115
+ if option in _PARQUET_READ_OPTIONS:
116
+ read_options_args[option] = value
117
+ else:
118
+ duplicates.append(option)
119
+ scan_args[option] = value
120
+ if duplicates:
121
+ duplicates = ", ".join(duplicates)
122
+ warnings.warn(f'The scan options {duplicates} should be '
123
+ 'specified directly as keyword arguments')
124
+ read_options = ParquetReadOptions(**read_options_args)
125
+ elif not isinstance(read_options, ParquetReadOptions):
126
+ raise TypeError('`read_options` must be either a dictionary or an '
127
+ 'instance of ParquetReadOptions')
128
+
129
+ if default_fragment_scan_options is None:
130
+ default_fragment_scan_options = ParquetFragmentScanOptions(
131
+ **scan_args)
132
+ elif isinstance(default_fragment_scan_options, dict):
133
+ default_fragment_scan_options = ParquetFragmentScanOptions(
134
+ **default_fragment_scan_options)
135
+ elif not isinstance(default_fragment_scan_options,
136
+ ParquetFragmentScanOptions):
137
+ raise TypeError('`default_fragment_scan_options` must be either a '
138
+ 'dictionary or an instance of '
139
+ 'ParquetFragmentScanOptions')
140
+
141
+ wrapped = make_shared[CParquetFileFormat]()
142
+
143
+ options = &(wrapped.get().reader_options)
144
+ if read_options.dictionary_columns is not None:
145
+ for column in read_options.dictionary_columns:
146
+ options.dict_columns.insert(tobytes(column))
147
+ options.coerce_int96_timestamp_unit = \
148
+ read_options._coerce_int96_timestamp_unit
149
+
150
+ self.init(<shared_ptr[CFileFormat]> wrapped)
151
+ self.default_fragment_scan_options = default_fragment_scan_options
152
+
153
+ cdef void init(self, const shared_ptr[CFileFormat]& sp):
154
+ FileFormat.init(self, sp)
155
+ self.parquet_format = <CParquetFileFormat*> sp.get()
156
+
157
+ cdef WrittenFile _finish_write(self, path, base_dir,
158
+ CFileWriter* file_writer):
159
+ cdef:
160
+ FileMetaData parquet_metadata
161
+ CParquetFileWriter* parquet_file_writer
162
+
163
+ parquet_metadata = None
164
+ parquet_file_writer = dynamic_cast[_CParquetFileWriterPtr](file_writer)
165
+ with nogil:
166
+ metadata = deref(
167
+ deref(parquet_file_writer).parquet_writer()).metadata()
168
+ if metadata:
169
+ parquet_metadata = FileMetaData()
170
+ parquet_metadata.init(metadata)
171
+ parquet_metadata.set_file_path(os.path.relpath(path, base_dir))
172
+
173
+ size = GetResultValue(file_writer.GetBytesWritten())
174
+
175
+ return WrittenFile(path, parquet_metadata, size)
176
+
177
+ @property
178
+ def read_options(self):
179
+ cdef CParquetFileFormatReaderOptions* options
180
+ options = &self.parquet_format.reader_options
181
+ parquet_read_options = ParquetReadOptions(
182
+ dictionary_columns={frombytes(col)
183
+ for col in options.dict_columns},
184
+ )
185
+ # Read options getter/setter works with strings so setting
186
+ # the private property which uses the C Type
187
+ parquet_read_options._coerce_int96_timestamp_unit = \
188
+ options.coerce_int96_timestamp_unit
189
+ return parquet_read_options
190
+
191
+ def make_write_options(self, **kwargs):
192
+ """
193
+ Parameters
194
+ ----------
195
+ **kwargs : dict
196
+
197
+ Returns
198
+ -------
199
+ pyarrow.dataset.FileWriteOptions
200
+ """
201
+ # Safeguard from calling make_write_options as a static class method
202
+ if not isinstance(self, ParquetFileFormat):
203
+ raise TypeError("make_write_options() should be called on "
204
+ "an instance of ParquetFileFormat")
205
+ opts = FileFormat.make_write_options(self)
206
+ (<ParquetFileWriteOptions> opts).update(**kwargs)
207
+ return opts
208
+
209
+ cdef _set_default_fragment_scan_options(self, FragmentScanOptions options):
210
+ if options.type_name == 'parquet':
211
+ self.parquet_format.default_fragment_scan_options = options.wrapped
212
+ else:
213
+ super()._set_default_fragment_scan_options(options)
214
+
215
+ def equals(self, ParquetFileFormat other):
216
+ """
217
+ Parameters
218
+ ----------
219
+ other : pyarrow.dataset.ParquetFileFormat
220
+
221
+ Returns
222
+ -------
223
+ bool
224
+ """
225
+ return (
226
+ self.read_options.equals(other.read_options) and
227
+ self.default_fragment_scan_options ==
228
+ other.default_fragment_scan_options
229
+ )
230
+
231
+ @property
232
+ def default_extname(self):
233
+ return "parquet"
234
+
235
+ def __reduce__(self):
236
+ return ParquetFileFormat, (self.read_options,
237
+ self.default_fragment_scan_options)
238
+
239
+ def __repr__(self):
240
+ return f"<ParquetFileFormat read_options={self.read_options}>"
241
+
242
+ def make_fragment(self, file, filesystem=None,
243
+ Expression partition_expression=None, row_groups=None, *, file_size=None):
244
+ """
245
+ Make a FileFragment from a given file.
246
+
247
+ Parameters
248
+ ----------
249
+ file : file-like object, path-like or str
250
+ The file or file path to make a fragment from.
251
+ filesystem : Filesystem, optional
252
+ If `filesystem` is given, `file` must be a string and specifies
253
+ the path of the file to read from the filesystem.
254
+ partition_expression : Expression, optional
255
+ An expression that is guaranteed true for all rows in the fragment. Allows
256
+ fragment to be potentially skipped while scanning with a filter.
257
+ row_groups : Iterable, optional
258
+ The indices of the row groups to include
259
+ file_size : int, optional
260
+ The size of the file in bytes. Can improve performance with high-latency filesystems
261
+ when file size needs to be known before reading.
262
+
263
+ Returns
264
+ -------
265
+ fragment : Fragment
266
+ The file fragment
267
+ """
268
+ cdef:
269
+ vector[int] c_row_groups
270
+ if partition_expression is None:
271
+ partition_expression = _true
272
+ if row_groups is None:
273
+ return super().make_fragment(file, filesystem,
274
+ partition_expression, file_size=file_size)
275
+
276
+ c_source = _make_file_source(file, filesystem, file_size)
277
+ c_row_groups = [<int> row_group for row_group in set(row_groups)]
278
+
279
+ c_fragment = <shared_ptr[CFragment]> GetResultValue(
280
+ self.parquet_format.MakeFragment(move(c_source),
281
+ partition_expression.unwrap(),
282
+ <shared_ptr[CSchema]>nullptr,
283
+ move(c_row_groups)))
284
+ return Fragment.wrap(move(c_fragment))
285
+
286
+
287
+ class RowGroupInfo:
288
+ """
289
+ A wrapper class for RowGroup information
290
+
291
+ Parameters
292
+ ----------
293
+ id : integer
294
+ The group ID.
295
+ metadata : FileMetaData
296
+ The rowgroup metadata.
297
+ schema : Schema
298
+ Schema of the rows.
299
+ """
300
+
301
+ def __init__(self, id, metadata, schema):
302
+ self.id = id
303
+ self.metadata = metadata
304
+ self.schema = schema
305
+
306
+ @property
307
+ def num_rows(self):
308
+ return self.metadata.num_rows
309
+
310
+ @property
311
+ def total_byte_size(self):
312
+ return self.metadata.total_byte_size
313
+
314
+ @property
315
+ def statistics(self):
316
+ def name_stats(i):
317
+ col = self.metadata.column(i)
318
+
319
+ stats = col.statistics
320
+ if stats is None or not stats.has_min_max:
321
+ return None, None
322
+
323
+ name = col.path_in_schema
324
+ field_index = self.schema.get_field_index(name)
325
+ if field_index < 0:
326
+ return None, None
327
+
328
+ typ = self.schema.field(field_index).type
329
+ return col.path_in_schema, {
330
+ 'min': pa.scalar(stats.min, type=typ).as_py(),
331
+ 'max': pa.scalar(stats.max, type=typ).as_py()
332
+ }
333
+
334
+ return {
335
+ name: stats for name, stats
336
+ in map(name_stats, range(self.metadata.num_columns))
337
+ if stats is not None
338
+ }
339
+
340
+ def __repr__(self):
341
+ return "RowGroupInfo({})".format(self.id)
342
+
343
+ def __eq__(self, other):
344
+ if isinstance(other, int):
345
+ return self.id == other
346
+ if not isinstance(other, RowGroupInfo):
347
+ return False
348
+ return self.id == other.id
349
+
350
+
351
+ cdef class ParquetFileFragment(FileFragment):
352
+ """A Fragment representing a parquet file."""
353
+
354
+ cdef:
355
+ CParquetFileFragment* parquet_file_fragment
356
+
357
+ cdef void init(self, const shared_ptr[CFragment]& sp):
358
+ FileFragment.init(self, sp)
359
+ self.parquet_file_fragment = <CParquetFileFragment*> sp.get()
360
+
361
+ def __reduce__(self):
362
+ buffer = self.buffer
363
+ # parquet_file_fragment.row_groups() is empty if the metadata
364
+ # information of the file is not yet populated
365
+ if not bool(self.parquet_file_fragment.row_groups()):
366
+ row_groups = None
367
+ else:
368
+ row_groups = [row_group.id for row_group in self.row_groups]
369
+
370
+ return self.format.make_fragment, (
371
+ self.path if buffer is None else buffer,
372
+ self.filesystem,
373
+ self.partition_expression,
374
+ row_groups
375
+ )
376
+
377
+ def ensure_complete_metadata(self):
378
+ """
379
+ Ensure that all metadata (statistics, physical schema, ...) have
380
+ been read and cached in this fragment.
381
+ """
382
+ with nogil:
383
+ check_status(self.parquet_file_fragment.EnsureCompleteMetadata())
384
+
385
+ @property
386
+ def row_groups(self):
387
+ metadata = self.metadata
388
+ cdef vector[int] row_groups = self.parquet_file_fragment.row_groups()
389
+ return [RowGroupInfo(i, metadata.row_group(i), self.physical_schema)
390
+ for i in row_groups]
391
+
392
+ @property
393
+ def metadata(self):
394
+ self.ensure_complete_metadata()
395
+ cdef FileMetaData metadata = FileMetaData()
396
+ metadata.init(self.parquet_file_fragment.metadata())
397
+ return metadata
398
+
399
+ @property
400
+ def num_row_groups(self):
401
+ """
402
+ Return the number of row groups viewed by this fragment (not the
403
+ number of row groups in the origin file).
404
+ """
405
+ self.ensure_complete_metadata()
406
+ return self.parquet_file_fragment.row_groups().size()
407
+
408
+ def split_by_row_group(self, Expression filter=None,
409
+ Schema schema=None):
410
+ """
411
+ Split the fragment into multiple fragments.
412
+
413
+ Yield a Fragment wrapping each row group in this ParquetFileFragment.
414
+ Row groups will be excluded whose metadata contradicts the optional
415
+ filter.
416
+
417
+ Parameters
418
+ ----------
419
+ filter : Expression, default None
420
+ Only include the row groups which satisfy this predicate (using
421
+ the Parquet RowGroup statistics).
422
+ schema : Schema, default None
423
+ Schema to use when filtering row groups. Defaults to the
424
+ Fragment's physical schema
425
+
426
+ Returns
427
+ -------
428
+ A list of Fragments
429
+ """
430
+ cdef:
431
+ vector[shared_ptr[CFragment]] c_fragments
432
+ CExpression c_filter
433
+ shared_ptr[CFragment] c_fragment
434
+
435
+ schema = schema or self.physical_schema
436
+ c_filter = _bind(filter, schema)
437
+ with nogil:
438
+ c_fragments = move(GetResultValue(
439
+ self.parquet_file_fragment.SplitByRowGroup(move(c_filter))))
440
+
441
+ return [Fragment.wrap(c_fragment) for c_fragment in c_fragments]
442
+
443
+ def subset(self, Expression filter=None, Schema schema=None,
444
+ object row_group_ids=None):
445
+ """
446
+ Create a subset of the fragment (viewing a subset of the row groups).
447
+
448
+ Subset can be specified by either a filter predicate (with optional
449
+ schema) or by a list of row group IDs. Note that when using a filter,
450
+ the resulting fragment can be empty (viewing no row groups).
451
+
452
+ Parameters
453
+ ----------
454
+ filter : Expression, default None
455
+ Only include the row groups which satisfy this predicate (using
456
+ the Parquet RowGroup statistics).
457
+ schema : Schema, default None
458
+ Schema to use when filtering row groups. Defaults to the
459
+ Fragment's physical schema
460
+ row_group_ids : list of ints
461
+ The row group IDs to include in the subset. Can only be specified
462
+ if `filter` is None.
463
+
464
+ Returns
465
+ -------
466
+ ParquetFileFragment
467
+ """
468
+ cdef:
469
+ CExpression c_filter
470
+ vector[int] c_row_group_ids
471
+ shared_ptr[CFragment] c_fragment
472
+
473
+ if filter is not None and row_group_ids is not None:
474
+ raise ValueError(
475
+ "Cannot specify both 'filter' and 'row_group_ids'."
476
+ )
477
+
478
+ if filter is not None:
479
+ schema = schema or self.physical_schema
480
+ c_filter = _bind(filter, schema)
481
+ with nogil:
482
+ c_fragment = move(GetResultValue(
483
+ self.parquet_file_fragment.SubsetWithFilter(
484
+ move(c_filter))))
485
+ elif row_group_ids is not None:
486
+ c_row_group_ids = [
487
+ <int> row_group for row_group in sorted(set(row_group_ids))
488
+ ]
489
+ with nogil:
490
+ c_fragment = move(GetResultValue(
491
+ self.parquet_file_fragment.SubsetWithIds(
492
+ move(c_row_group_ids))))
493
+ else:
494
+ raise ValueError(
495
+ "Need to specify one of 'filter' or 'row_group_ids'"
496
+ )
497
+
498
+ return Fragment.wrap(c_fragment)
499
+
500
+
501
+ cdef class ParquetReadOptions(_Weakrefable):
502
+ """
503
+ Parquet format specific options for reading.
504
+
505
+ Parameters
506
+ ----------
507
+ dictionary_columns : list of string, default None
508
+ Names of columns which should be dictionary encoded as
509
+ they are read
510
+ coerce_int96_timestamp_unit : str, default None
511
+ Cast timestamps that are stored in INT96 format to a particular
512
+ resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
513
+ and therefore INT96 timestamps will be inferred as timestamps
514
+ in nanoseconds
515
+ """
516
+
517
+ cdef public:
518
+ set dictionary_columns
519
+ TimeUnit _coerce_int96_timestamp_unit
520
+
521
+ # Also see _PARQUET_READ_OPTIONS
522
+ def __init__(self, dictionary_columns=None,
523
+ coerce_int96_timestamp_unit=None):
524
+ self.dictionary_columns = set(dictionary_columns or set())
525
+ self.coerce_int96_timestamp_unit = coerce_int96_timestamp_unit
526
+
527
+ @property
528
+ def coerce_int96_timestamp_unit(self):
529
+ return timeunit_to_string(self._coerce_int96_timestamp_unit)
530
+
531
+ @coerce_int96_timestamp_unit.setter
532
+ def coerce_int96_timestamp_unit(self, unit):
533
+ if unit is not None:
534
+ self._coerce_int96_timestamp_unit = string_to_timeunit(unit)
535
+ else:
536
+ self._coerce_int96_timestamp_unit = TimeUnit_NANO
537
+
538
+ def equals(self, ParquetReadOptions other):
539
+ """
540
+ Parameters
541
+ ----------
542
+ other : pyarrow.dataset.ParquetReadOptions
543
+
544
+ Returns
545
+ -------
546
+ bool
547
+ """
548
+ return (self.dictionary_columns == other.dictionary_columns and
549
+ self.coerce_int96_timestamp_unit ==
550
+ other.coerce_int96_timestamp_unit)
551
+
552
+ def __eq__(self, other):
553
+ try:
554
+ return self.equals(other)
555
+ except TypeError:
556
+ return False
557
+
558
+ def __repr__(self):
559
+ return (
560
+ f"<ParquetReadOptions"
561
+ f" dictionary_columns={self.dictionary_columns}"
562
+ f" coerce_int96_timestamp_unit={self.coerce_int96_timestamp_unit}>"
563
+ )
564
+
565
+
566
+ cdef class ParquetFileWriteOptions(FileWriteOptions):
567
+
568
+ def update(self, **kwargs):
569
+ """
570
+ Parameters
571
+ ----------
572
+ **kwargs : dict
573
+ """
574
+ arrow_fields = {
575
+ "use_deprecated_int96_timestamps",
576
+ "coerce_timestamps",
577
+ "allow_truncated_timestamps",
578
+ "use_compliant_nested_type",
579
+ }
580
+
581
+ setters = set()
582
+ for name, value in kwargs.items():
583
+ if name not in self._properties:
584
+ raise TypeError("unexpected parquet write option: " + name)
585
+ self._properties[name] = value
586
+ if name in arrow_fields:
587
+ setters.add(self._set_arrow_properties)
588
+ elif name == "encryption_config" and value is not None:
589
+ setters.add(self._set_encryption_config)
590
+ else:
591
+ setters.add(self._set_properties)
592
+
593
+ for setter in setters:
594
+ setter()
595
+
596
+ def _set_properties(self):
597
+ cdef CParquetFileWriteOptions* opts = self.parquet_options
598
+
599
+ opts.writer_properties = _create_writer_properties(
600
+ use_dictionary=self._properties["use_dictionary"],
601
+ compression=self._properties["compression"],
602
+ version=self._properties["version"],
603
+ write_statistics=self._properties["write_statistics"],
604
+ data_page_size=self._properties["data_page_size"],
605
+ compression_level=self._properties["compression_level"],
606
+ use_byte_stream_split=(
607
+ self._properties["use_byte_stream_split"]
608
+ ),
609
+ column_encoding=self._properties["column_encoding"],
610
+ data_page_version=self._properties["data_page_version"],
611
+ encryption_properties=self._properties["encryption_properties"],
612
+ write_batch_size=self._properties["write_batch_size"],
613
+ dictionary_pagesize_limit=self._properties["dictionary_pagesize_limit"],
614
+ write_page_index=self._properties["write_page_index"],
615
+ write_page_checksum=self._properties["write_page_checksum"],
616
+ sorting_columns=self._properties["sorting_columns"],
617
+ )
618
+
619
+ def _set_arrow_properties(self):
620
+ cdef CParquetFileWriteOptions* opts = self.parquet_options
621
+
622
+ opts.arrow_writer_properties = _create_arrow_writer_properties(
623
+ use_deprecated_int96_timestamps=(
624
+ self._properties["use_deprecated_int96_timestamps"]
625
+ ),
626
+ coerce_timestamps=self._properties["coerce_timestamps"],
627
+ allow_truncated_timestamps=(
628
+ self._properties["allow_truncated_timestamps"]
629
+ ),
630
+ writer_engine_version="V2",
631
+ use_compliant_nested_type=(
632
+ self._properties["use_compliant_nested_type"]
633
+ )
634
+ )
635
+
636
+ def _set_encryption_config(self):
637
+ if not parquet_encryption_enabled:
638
+ raise NotImplementedError(
639
+ "Encryption is not enabled in your installation of pyarrow, but an "
640
+ "encryption_config was provided."
641
+ )
642
+ set_encryption_config(self, self._properties["encryption_config"])
643
+
644
+ cdef void init(self, const shared_ptr[CFileWriteOptions]& sp):
645
+ FileWriteOptions.init(self, sp)
646
+ self.parquet_options = <CParquetFileWriteOptions*> sp.get()
647
+ self._properties = dict(
648
+ use_dictionary=True,
649
+ compression="snappy",
650
+ version="2.6",
651
+ write_statistics=None,
652
+ data_page_size=None,
653
+ compression_level=None,
654
+ use_byte_stream_split=False,
655
+ column_encoding=None,
656
+ data_page_version="1.0",
657
+ use_deprecated_int96_timestamps=False,
658
+ coerce_timestamps=None,
659
+ allow_truncated_timestamps=False,
660
+ use_compliant_nested_type=True,
661
+ encryption_properties=None,
662
+ write_batch_size=None,
663
+ dictionary_pagesize_limit=None,
664
+ write_page_index=False,
665
+ encryption_config=None,
666
+ write_page_checksum=False,
667
+ sorting_columns=None,
668
+ )
669
+
670
+ self._set_properties()
671
+ self._set_arrow_properties()
672
+
673
+ def __repr__(self):
674
+ return "<pyarrow.dataset.ParquetFileWriteOptions {0}>".format(
675
+ " ".join([f"{key}={value}" for key, value in self._properties.items()])
676
+ )
677
+
678
+
679
+ cdef set _PARQUET_READ_OPTIONS = {
680
+ 'dictionary_columns', 'coerce_int96_timestamp_unit'
681
+ }
682
+
683
+
684
+ cdef class ParquetFragmentScanOptions(FragmentScanOptions):
685
+ """
686
+ Scan-specific options for Parquet fragments.
687
+
688
+ Parameters
689
+ ----------
690
+ use_buffered_stream : bool, default False
691
+ Read files through buffered input streams rather than loading entire
692
+ row groups at once. This may be enabled to reduce memory overhead.
693
+ Disabled by default.
694
+ buffer_size : int, default 8192
695
+ Size of buffered stream, if enabled. Default is 8KB.
696
+ pre_buffer : bool, default True
697
+ If enabled, pre-buffer the raw Parquet data instead of issuing one
698
+ read per column chunk. This can improve performance on high-latency
699
+ filesystems (e.g. S3, GCS) by coalescing and issuing file reads in
700
+ parallel using a background I/O thread pool.
701
+ Set to False if you want to prioritize minimal memory usage
702
+ over maximum speed.
703
+ cache_options : pyarrow.CacheOptions, default None
704
+ Cache options used when pre_buffer is enabled. The default values should
705
+ be good for most use cases. You may want to adjust these for example if
706
+ you have exceptionally high latency to the file system.
707
+ thrift_string_size_limit : int, default None
708
+ If not None, override the maximum total string size allocated
709
+ when decoding Thrift structures. The default limit should be
710
+ sufficient for most Parquet files.
711
+ thrift_container_size_limit : int, default None
712
+ If not None, override the maximum total size of containers allocated
713
+ when decoding Thrift structures. The default limit should be
714
+ sufficient for most Parquet files.
715
+ decryption_config : pyarrow.dataset.ParquetDecryptionConfig, default None
716
+ If not None, use the provided ParquetDecryptionConfig to decrypt the
717
+ Parquet file.
718
+ page_checksum_verification : bool, default False
719
+ If True, verify the page checksum for each page read from the file.
720
+ """
721
+
722
+ # Avoid mistakingly creating attributes
723
+ __slots__ = ()
724
+
725
+ def __init__(self, *, bint use_buffered_stream=False,
726
+ buffer_size=8192,
727
+ bint pre_buffer=True,
728
+ cache_options=None,
729
+ thrift_string_size_limit=None,
730
+ thrift_container_size_limit=None,
731
+ decryption_config=None,
732
+ bint page_checksum_verification=False):
733
+ self.init(shared_ptr[CFragmentScanOptions](
734
+ new CParquetFragmentScanOptions()))
735
+ self.use_buffered_stream = use_buffered_stream
736
+ self.buffer_size = buffer_size
737
+ self.pre_buffer = pre_buffer
738
+ if cache_options is not None:
739
+ self.cache_options = cache_options
740
+ if thrift_string_size_limit is not None:
741
+ self.thrift_string_size_limit = thrift_string_size_limit
742
+ if thrift_container_size_limit is not None:
743
+ self.thrift_container_size_limit = thrift_container_size_limit
744
+ if decryption_config is not None:
745
+ self.parquet_decryption_config = decryption_config
746
+ self.page_checksum_verification = page_checksum_verification
747
+
748
+ cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp):
749
+ FragmentScanOptions.init(self, sp)
750
+ self.parquet_options = <CParquetFragmentScanOptions*> sp.get()
751
+
752
+ cdef CReaderProperties* reader_properties(self):
753
+ return self.parquet_options.reader_properties.get()
754
+
755
+ cdef ArrowReaderProperties* arrow_reader_properties(self):
756
+ return self.parquet_options.arrow_reader_properties.get()
757
+
758
+ @property
759
+ def use_buffered_stream(self):
760
+ return self.reader_properties().is_buffered_stream_enabled()
761
+
762
+ @use_buffered_stream.setter
763
+ def use_buffered_stream(self, bint use_buffered_stream):
764
+ if use_buffered_stream:
765
+ self.reader_properties().enable_buffered_stream()
766
+ else:
767
+ self.reader_properties().disable_buffered_stream()
768
+
769
+ @property
770
+ def buffer_size(self):
771
+ return self.reader_properties().buffer_size()
772
+
773
+ @buffer_size.setter
774
+ def buffer_size(self, buffer_size):
775
+ if buffer_size <= 0:
776
+ raise ValueError("Buffer size must be larger than zero")
777
+ self.reader_properties().set_buffer_size(buffer_size)
778
+
779
+ @property
780
+ def pre_buffer(self):
781
+ return self.arrow_reader_properties().pre_buffer()
782
+
783
+ @pre_buffer.setter
784
+ def pre_buffer(self, bint pre_buffer):
785
+ self.arrow_reader_properties().set_pre_buffer(pre_buffer)
786
+
787
+ @property
788
+ def cache_options(self):
789
+ return CacheOptions.wrap(self.arrow_reader_properties().cache_options())
790
+
791
+ @cache_options.setter
792
+ def cache_options(self, CacheOptions options):
793
+ self.arrow_reader_properties().set_cache_options(options.unwrap())
794
+
795
+ @property
796
+ def thrift_string_size_limit(self):
797
+ return self.reader_properties().thrift_string_size_limit()
798
+
799
+ @thrift_string_size_limit.setter
800
+ def thrift_string_size_limit(self, size):
801
+ if size <= 0:
802
+ raise ValueError("size must be larger than zero")
803
+ self.reader_properties().set_thrift_string_size_limit(size)
804
+
805
+ @property
806
+ def thrift_container_size_limit(self):
807
+ return self.reader_properties().thrift_container_size_limit()
808
+
809
+ @thrift_container_size_limit.setter
810
+ def thrift_container_size_limit(self, size):
811
+ if size <= 0:
812
+ raise ValueError("size must be larger than zero")
813
+ self.reader_properties().set_thrift_container_size_limit(size)
814
+
815
+ @property
816
+ def parquet_decryption_config(self):
817
+ if not parquet_encryption_enabled:
818
+ raise NotImplementedError(
819
+ "Unable to access encryption features. "
820
+ "Encryption is not enabled in your installation of pyarrow."
821
+ )
822
+ return self._parquet_decryption_config
823
+
824
+ @parquet_decryption_config.setter
825
+ def parquet_decryption_config(self, config):
826
+ if not parquet_encryption_enabled:
827
+ raise NotImplementedError(
828
+ "Encryption is not enabled in your installation of pyarrow, but a "
829
+ "decryption_config was provided."
830
+ )
831
+ set_decryption_config(self, config)
832
+ self._parquet_decryption_config = config
833
+
834
+ @property
835
+ def page_checksum_verification(self):
836
+ return self.reader_properties().page_checksum_verification()
837
+
838
+ @page_checksum_verification.setter
839
+ def page_checksum_verification(self, bint page_checksum_verification):
840
+ self.reader_properties().set_page_checksum_verification(page_checksum_verification)
841
+
842
+ def equals(self, ParquetFragmentScanOptions other):
843
+ """
844
+ Parameters
845
+ ----------
846
+ other : pyarrow.dataset.ParquetFragmentScanOptions
847
+
848
+ Returns
849
+ -------
850
+ bool
851
+ """
852
+ attrs = (
853
+ self.use_buffered_stream, self.buffer_size, self.pre_buffer, self.cache_options,
854
+ self.thrift_string_size_limit, self.thrift_container_size_limit,
855
+ self.page_checksum_verification)
856
+ other_attrs = (
857
+ other.use_buffered_stream, other.buffer_size, other.pre_buffer, other.cache_options,
858
+ other.thrift_string_size_limit,
859
+ other.thrift_container_size_limit, other.page_checksum_verification)
860
+ return attrs == other_attrs
861
+
862
+ @staticmethod
863
+ @binding(True) # Required for Cython < 3
864
+ def _reconstruct(kwargs):
865
+ # __reduce__ doesn't allow passing named arguments directly to the
866
+ # reconstructor, hence this wrapper.
867
+ return ParquetFragmentScanOptions(**kwargs)
868
+
869
+ def __reduce__(self):
870
+ kwargs = dict(
871
+ use_buffered_stream=self.use_buffered_stream,
872
+ buffer_size=self.buffer_size,
873
+ pre_buffer=self.pre_buffer,
874
+ cache_options=self.cache_options,
875
+ thrift_string_size_limit=self.thrift_string_size_limit,
876
+ thrift_container_size_limit=self.thrift_container_size_limit,
877
+ page_checksum_verification=self.page_checksum_verification
878
+ )
879
+ return ParquetFragmentScanOptions._reconstruct, (kwargs,)
880
+
881
+
882
+ cdef class ParquetFactoryOptions(_Weakrefable):
883
+ """
884
+ Influences the discovery of parquet dataset.
885
+
886
+ Parameters
887
+ ----------
888
+ partition_base_dir : str, optional
889
+ For the purposes of applying the partitioning, paths will be
890
+ stripped of the partition_base_dir. Files not matching the
891
+ partition_base_dir prefix will be skipped for partitioning discovery.
892
+ The ignored files will still be part of the Dataset, but will not
893
+ have partition information.
894
+ partitioning : Partitioning, PartitioningFactory, optional
895
+ The partitioning scheme applied to fragments, see ``Partitioning``.
896
+ validate_column_chunk_paths : bool, default False
897
+ Assert that all ColumnChunk paths are consistent. The parquet spec
898
+ allows for ColumnChunk data to be stored in multiple files, but
899
+ ParquetDatasetFactory supports only a single file with all ColumnChunk
900
+ data. If this flag is set construction of a ParquetDatasetFactory will
901
+ raise an error if ColumnChunk data is not resident in a single file.
902
+ """
903
+
904
+ cdef:
905
+ CParquetFactoryOptions options
906
+
907
+ __slots__ = () # avoid mistakingly creating attributes
908
+
909
+ def __init__(self, partition_base_dir=None, partitioning=None,
910
+ validate_column_chunk_paths=False):
911
+ if isinstance(partitioning, PartitioningFactory):
912
+ self.partitioning_factory = partitioning
913
+ elif isinstance(partitioning, Partitioning):
914
+ self.partitioning = partitioning
915
+
916
+ if partition_base_dir is not None:
917
+ self.partition_base_dir = partition_base_dir
918
+
919
+ self.options.validate_column_chunk_paths = validate_column_chunk_paths
920
+
921
+ cdef inline CParquetFactoryOptions unwrap(self):
922
+ return self.options
923
+
924
+ @property
925
+ def partitioning(self):
926
+ """Partitioning to apply to discovered files.
927
+
928
+ NOTE: setting this property will overwrite partitioning_factory.
929
+ """
930
+ c_partitioning = self.options.partitioning.partitioning()
931
+ if c_partitioning.get() == nullptr:
932
+ return None
933
+ return Partitioning.wrap(c_partitioning)
934
+
935
+ @partitioning.setter
936
+ def partitioning(self, Partitioning value):
937
+ self.options.partitioning = (<Partitioning> value).unwrap()
938
+
939
+ @property
940
+ def partitioning_factory(self):
941
+ """PartitioningFactory to apply to discovered files and
942
+ discover a Partitioning.
943
+
944
+ NOTE: setting this property will overwrite partitioning.
945
+ """
946
+ c_factory = self.options.partitioning.factory()
947
+ if c_factory.get() == nullptr:
948
+ return None
949
+ return PartitioningFactory.wrap(c_factory, None, None)
950
+
951
+ @partitioning_factory.setter
952
+ def partitioning_factory(self, PartitioningFactory value):
953
+ self.options.partitioning = (<PartitioningFactory> value).unwrap()
954
+
955
+ @property
956
+ def partition_base_dir(self):
957
+ """
958
+ Base directory to strip paths before applying the partitioning.
959
+ """
960
+ return frombytes(self.options.partition_base_dir)
961
+
962
+ @partition_base_dir.setter
963
+ def partition_base_dir(self, value):
964
+ self.options.partition_base_dir = tobytes(value)
965
+
966
+ @property
967
+ def validate_column_chunk_paths(self):
968
+ """
969
+ Base directory to strip paths before applying the partitioning.
970
+ """
971
+ return self.options.validate_column_chunk_paths
972
+
973
+ @validate_column_chunk_paths.setter
974
+ def validate_column_chunk_paths(self, value):
975
+ self.options.validate_column_chunk_paths = value
976
+
977
+
978
+ cdef class ParquetDatasetFactory(DatasetFactory):
979
+ """
980
+ Create a ParquetDatasetFactory from a Parquet `_metadata` file.
981
+
982
+ Parameters
983
+ ----------
984
+ metadata_path : str
985
+ Path to the `_metadata` parquet metadata-only file generated with
986
+ `pyarrow.parquet.write_metadata`.
987
+ filesystem : pyarrow.fs.FileSystem
988
+ Filesystem to read the metadata_path from, and subsequent parquet
989
+ files.
990
+ format : ParquetFileFormat
991
+ Parquet format options.
992
+ options : ParquetFactoryOptions, optional
993
+ Various flags influencing the discovery of filesystem paths.
994
+ """
995
+
996
+ cdef:
997
+ CParquetDatasetFactory* parquet_factory
998
+
999
+ def __init__(self, metadata_path, FileSystem filesystem not None,
1000
+ FileFormat format not None,
1001
+ ParquetFactoryOptions options=None):
1002
+ cdef:
1003
+ c_string c_path
1004
+ shared_ptr[CFileSystem] c_filesystem
1005
+ shared_ptr[CParquetFileFormat] c_format
1006
+ CResult[shared_ptr[CDatasetFactory]] result
1007
+ CParquetFactoryOptions c_options
1008
+
1009
+ c_path = tobytes(metadata_path)
1010
+ c_filesystem = filesystem.unwrap()
1011
+ c_format = static_pointer_cast[CParquetFileFormat, CFileFormat](
1012
+ format.unwrap())
1013
+ options = options or ParquetFactoryOptions()
1014
+ c_options = options.unwrap()
1015
+
1016
+ with nogil:
1017
+ result = CParquetDatasetFactory.MakeFromMetaDataPath(
1018
+ c_path, c_filesystem, c_format, c_options)
1019
+ self.init(GetResultValue(result))
1020
+
1021
+ cdef init(self, shared_ptr[CDatasetFactory]& sp):
1022
+ DatasetFactory.init(self, sp)
1023
+ self.parquet_factory = <CParquetDatasetFactory*> sp.get()
llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (116 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.pyx ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ """Dataset support for Parquet encryption."""
21
+
22
+ from pyarrow.includes.libarrow_dataset_parquet cimport *
23
+ from pyarrow._parquet_encryption cimport *
24
+ from pyarrow._dataset_parquet cimport ParquetFragmentScanOptions, ParquetFileWriteOptions
25
+
26
+
27
+ cdef class ParquetEncryptionConfig(_Weakrefable):
28
+ """
29
+ Core configuration class encapsulating parameters for high-level encryption
30
+ within the Parquet framework.
31
+
32
+ The ParquetEncryptionConfig class serves as a bridge for passing encryption-related
33
+ parameters to the appropriate components within the Parquet library. It maintains references
34
+ to objects that define the encryption strategy, Key Management Service (KMS) configuration,
35
+ and specific encryption configurations for Parquet data.
36
+
37
+ Parameters
38
+ ----------
39
+ crypto_factory : pyarrow.parquet.encryption.CryptoFactory
40
+ Shared pointer to a `CryptoFactory` object. The `CryptoFactory` is responsible for
41
+ creating cryptographic components, such as encryptors and decryptors.
42
+ kms_connection_config : pyarrow.parquet.encryption.KmsConnectionConfig
43
+ Shared pointer to a `KmsConnectionConfig` object. This object holds the configuration
44
+ parameters necessary for connecting to a Key Management Service (KMS).
45
+ encryption_config : pyarrow.parquet.encryption.EncryptionConfiguration
46
+ Shared pointer to an `EncryptionConfiguration` object. This object defines specific
47
+ encryption settings for Parquet data, including the keys assigned to different columns.
48
+
49
+ Raises
50
+ ------
51
+ ValueError
52
+ Raised if `encryption_config` is None.
53
+ """
54
+ cdef:
55
+ shared_ptr[CParquetEncryptionConfig] c_config
56
+
57
+ # Avoid mistakenly creating attributes
58
+ __slots__ = ()
59
+
60
+ def __cinit__(self, CryptoFactory crypto_factory, KmsConnectionConfig kms_connection_config,
61
+ EncryptionConfiguration encryption_config):
62
+
63
+ cdef shared_ptr[CEncryptionConfiguration] c_encryption_config
64
+
65
+ if crypto_factory is None:
66
+ raise ValueError("crypto_factory cannot be None")
67
+
68
+ if kms_connection_config is None:
69
+ raise ValueError("kms_connection_config cannot be None")
70
+
71
+ if encryption_config is None:
72
+ raise ValueError("encryption_config cannot be None")
73
+
74
+ self.c_config.reset(new CParquetEncryptionConfig())
75
+
76
+ c_encryption_config = pyarrow_unwrap_encryptionconfig(
77
+ encryption_config)
78
+
79
+ self.c_config.get().crypto_factory = pyarrow_unwrap_cryptofactory(crypto_factory)
80
+ self.c_config.get().kms_connection_config = pyarrow_unwrap_kmsconnectionconfig(
81
+ kms_connection_config)
82
+ self.c_config.get().encryption_config = c_encryption_config
83
+
84
+ @staticmethod
85
+ cdef wrap(shared_ptr[CParquetEncryptionConfig] c_config):
86
+ cdef ParquetEncryptionConfig python_config = ParquetEncryptionConfig.__new__(ParquetEncryptionConfig)
87
+ python_config.c_config = c_config
88
+ return python_config
89
+
90
+ cdef shared_ptr[CParquetEncryptionConfig] unwrap(self):
91
+ return self.c_config
92
+
93
+
94
+ cdef class ParquetDecryptionConfig(_Weakrefable):
95
+ """
96
+ Core configuration class encapsulating parameters for high-level decryption
97
+ within the Parquet framework.
98
+
99
+ ParquetDecryptionConfig is designed to pass decryption-related parameters to
100
+ the appropriate decryption components within the Parquet library. It holds references to
101
+ objects that define the decryption strategy, Key Management Service (KMS) configuration,
102
+ and specific decryption configurations for reading encrypted Parquet data.
103
+
104
+ Parameters
105
+ ----------
106
+ crypto_factory : pyarrow.parquet.encryption.CryptoFactory
107
+ Shared pointer to a `CryptoFactory` object, pivotal in creating cryptographic
108
+ components for the decryption process.
109
+ kms_connection_config : pyarrow.parquet.encryption.KmsConnectionConfig
110
+ Shared pointer to a `KmsConnectionConfig` object, containing parameters necessary
111
+ for connecting to a Key Management Service (KMS) during decryption.
112
+ decryption_config : pyarrow.parquet.encryption.DecryptionConfiguration
113
+ Shared pointer to a `DecryptionConfiguration` object, specifying decryption settings
114
+ for reading encrypted Parquet data.
115
+
116
+ Raises
117
+ ------
118
+ ValueError
119
+ Raised if `decryption_config` is None.
120
+ """
121
+
122
+ cdef:
123
+ shared_ptr[CParquetDecryptionConfig] c_config
124
+
125
+ # Avoid mistakingly creating attributes
126
+ __slots__ = ()
127
+
128
+ def __cinit__(self, CryptoFactory crypto_factory, KmsConnectionConfig kms_connection_config,
129
+ DecryptionConfiguration decryption_config):
130
+
131
+ cdef shared_ptr[CDecryptionConfiguration] c_decryption_config
132
+
133
+ if decryption_config is None:
134
+ raise ValueError(
135
+ "decryption_config cannot be None")
136
+
137
+ self.c_config.reset(new CParquetDecryptionConfig())
138
+
139
+ c_decryption_config = pyarrow_unwrap_decryptionconfig(
140
+ decryption_config)
141
+
142
+ self.c_config.get().crypto_factory = pyarrow_unwrap_cryptofactory(crypto_factory)
143
+ self.c_config.get().kms_connection_config = pyarrow_unwrap_kmsconnectionconfig(
144
+ kms_connection_config)
145
+ self.c_config.get().decryption_config = c_decryption_config
146
+
147
+ @staticmethod
148
+ cdef wrap(shared_ptr[CParquetDecryptionConfig] c_config):
149
+ cdef ParquetDecryptionConfig python_config = ParquetDecryptionConfig.__new__(ParquetDecryptionConfig)
150
+ python_config.c_config = c_config
151
+ return python_config
152
+
153
+ cdef shared_ptr[CParquetDecryptionConfig] unwrap(self):
154
+ return self.c_config
155
+
156
+
157
+ def set_encryption_config(
158
+ ParquetFileWriteOptions opts not None,
159
+ ParquetEncryptionConfig config not None
160
+ ):
161
+ cdef shared_ptr[CParquetEncryptionConfig] c_config = config.unwrap()
162
+ opts.parquet_options.parquet_encryption_config = c_config
163
+
164
+
165
+ def set_decryption_config(
166
+ ParquetFragmentScanOptions opts not None,
167
+ ParquetDecryptionConfig config not None
168
+ ):
169
+ cdef shared_ptr[CParquetDecryptionConfig] c_config = config.unwrap()
170
+ opts.parquet_options.parquet_decryption_config = c_config
llmeval-env/lib/python3.10/site-packages/pyarrow/_dlpack.pxi ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ cimport cpython
19
+ from cpython.pycapsule cimport PyCapsule_New
20
+
21
+
22
+ cdef void dlpack_pycapsule_deleter(object dltensor) noexcept:
23
+ cdef DLManagedTensor* dlm_tensor
24
+ cdef PyObject* err_type
25
+ cdef PyObject* err_value
26
+ cdef PyObject* err_traceback
27
+
28
+ # Do nothing if the capsule has been consumed
29
+ if cpython.PyCapsule_IsValid(dltensor, "used_dltensor"):
30
+ return
31
+
32
+ # An exception may be in-flight, we must save it in case
33
+ # we create another one
34
+ cpython.PyErr_Fetch(&err_type, &err_value, &err_traceback)
35
+
36
+ dlm_tensor = <DLManagedTensor*>cpython.PyCapsule_GetPointer(dltensor, 'dltensor')
37
+ if dlm_tensor == NULL:
38
+ cpython.PyErr_WriteUnraisable(dltensor)
39
+ # The deleter can be NULL if there is no way for the caller
40
+ # to provide a reasonable destructor
41
+ elif dlm_tensor.deleter:
42
+ dlm_tensor.deleter(dlm_tensor)
43
+ assert (not cpython.PyErr_Occurred())
44
+
45
+ # Set the error indicator from err_type, err_value, err_traceback
46
+ cpython.PyErr_Restore(err_type, err_value, err_traceback)
llmeval-env/lib/python3.10/site-packages/pyarrow/_flight.pyx ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (496 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_fs.pyx ADDED
@@ -0,0 +1,1634 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from cpython.datetime cimport datetime, PyDateTime_DateTime
21
+ from cython cimport binding
22
+
23
+ from pyarrow.includes.common cimport *
24
+ from pyarrow.includes.libarrow_python cimport PyDateTime_to_TimePoint
25
+ from pyarrow.lib import _detect_compression, frombytes, tobytes
26
+ from pyarrow.lib cimport *
27
+ from pyarrow.util import _stringify_path
28
+
29
+ from abc import ABC, abstractmethod
30
+ from datetime import datetime, timezone
31
+ import os
32
+ import pathlib
33
+ import sys
34
+
35
+
36
+ cdef _init_ca_paths():
37
+ cdef CFileSystemGlobalOptions options
38
+
39
+ import ssl
40
+ paths = ssl.get_default_verify_paths()
41
+ if paths.cafile:
42
+ options.tls_ca_file_path = os.fsencode(paths.cafile)
43
+ if paths.capath:
44
+ options.tls_ca_dir_path = os.fsencode(paths.capath)
45
+ check_status(CFileSystemsInitialize(options))
46
+
47
+
48
+ if sys.platform == 'linux':
49
+ # ARROW-9261: On Linux, we may need to fixup the paths to TLS CA certs
50
+ # (especially in manylinux packages) since the values hardcoded at
51
+ # compile-time in libcurl may be wrong.
52
+ _init_ca_paths()
53
+
54
+
55
+ cdef inline c_string _path_as_bytes(path) except *:
56
+ # handle only abstract paths, not bound to any filesystem like pathlib is,
57
+ # so we only accept plain strings
58
+ if not isinstance(path, (bytes, str)):
59
+ raise TypeError('Path must be a string')
60
+ # tobytes always uses utf-8, which is more or less ok, at least on Windows
61
+ # since the C++ side then decodes from utf-8. On Unix, os.fsencode may be
62
+ # better.
63
+ return tobytes(path)
64
+
65
+
66
+ cdef object _wrap_file_type(CFileType ty):
67
+ return FileType(<int8_t> ty)
68
+
69
+
70
+ cdef CFileType _unwrap_file_type(FileType ty) except *:
71
+ if ty == FileType.Unknown:
72
+ return CFileType_Unknown
73
+ elif ty == FileType.NotFound:
74
+ return CFileType_NotFound
75
+ elif ty == FileType.File:
76
+ return CFileType_File
77
+ elif ty == FileType.Directory:
78
+ return CFileType_Directory
79
+ assert 0
80
+
81
+
82
+ def _file_type_to_string(ty):
83
+ # Python 3.11 changed str(IntEnum) to return the string representation
84
+ # of the integer value: https://github.com/python/cpython/issues/94763
85
+ return f"{ty.__class__.__name__}.{ty._name_}"
86
+
87
+
88
+ cdef class FileInfo(_Weakrefable):
89
+ """
90
+ FileSystem entry info.
91
+
92
+ Parameters
93
+ ----------
94
+ path : str
95
+ The full path to the filesystem entry.
96
+ type : FileType
97
+ The type of the filesystem entry.
98
+ mtime : datetime or float, default None
99
+ If given, the modification time of the filesystem entry.
100
+ If a float is given, it is the number of seconds since the
101
+ Unix epoch.
102
+ mtime_ns : int, default None
103
+ If given, the modification time of the filesystem entry,
104
+ in nanoseconds since the Unix epoch.
105
+ `mtime` and `mtime_ns` are mutually exclusive.
106
+ size : int, default None
107
+ If given, the filesystem entry size in bytes. This should only
108
+ be given if `type` is `FileType.File`.
109
+
110
+ Examples
111
+ --------
112
+ Generate a file:
113
+
114
+ >>> from pyarrow import fs
115
+ >>> local = fs.LocalFileSystem()
116
+ >>> path_fs = local_path + '/pyarrow-fs-example.dat'
117
+ >>> with local.open_output_stream(path_fs) as stream:
118
+ ... stream.write(b'data')
119
+ 4
120
+
121
+ Get FileInfo object using ``get_file_info()``:
122
+
123
+ >>> file_info = local.get_file_info(path_fs)
124
+ >>> file_info
125
+ <FileInfo for '.../pyarrow-fs-example.dat': type=FileType.File, size=4>
126
+
127
+ Inspect FileInfo attributes:
128
+
129
+ >>> file_info.type
130
+ <FileType.File: 2>
131
+
132
+ >>> file_info.is_file
133
+ True
134
+
135
+ >>> file_info.path
136
+ '/.../pyarrow-fs-example.dat'
137
+
138
+ >>> file_info.base_name
139
+ 'pyarrow-fs-example.dat'
140
+
141
+ >>> file_info.size
142
+ 4
143
+
144
+ >>> file_info.extension
145
+ 'dat'
146
+
147
+ >>> file_info.mtime # doctest: +SKIP
148
+ datetime.datetime(2022, 6, 29, 7, 56, 10, 873922, tzinfo=datetime.timezone.utc)
149
+
150
+ >>> file_info.mtime_ns # doctest: +SKIP
151
+ 1656489370873922073
152
+ """
153
+
154
+ def __init__(self, path, FileType type=FileType.Unknown, *,
155
+ mtime=None, mtime_ns=None, size=None):
156
+ self.info.set_path(tobytes(path))
157
+ self.info.set_type(_unwrap_file_type(type))
158
+ if mtime is not None:
159
+ if mtime_ns is not None:
160
+ raise TypeError("Only one of mtime and mtime_ns "
161
+ "can be given")
162
+ if isinstance(mtime, datetime):
163
+ self.info.set_mtime(PyDateTime_to_TimePoint(
164
+ <PyDateTime_DateTime*> mtime))
165
+ else:
166
+ self.info.set_mtime(TimePoint_from_s(mtime))
167
+ elif mtime_ns is not None:
168
+ self.info.set_mtime(TimePoint_from_ns(mtime_ns))
169
+ if size is not None:
170
+ self.info.set_size(size)
171
+
172
+ @staticmethod
173
+ cdef wrap(CFileInfo info):
174
+ cdef FileInfo self = FileInfo.__new__(FileInfo)
175
+ self.info = move(info)
176
+ return self
177
+
178
+ cdef inline CFileInfo unwrap(self) nogil:
179
+ return self.info
180
+
181
+ @staticmethod
182
+ cdef CFileInfo unwrap_safe(obj):
183
+ if not isinstance(obj, FileInfo):
184
+ raise TypeError("Expected FileInfo instance, got {0}"
185
+ .format(type(obj)))
186
+ return (<FileInfo> obj).unwrap()
187
+
188
+ def __repr__(self):
189
+ def getvalue(attr):
190
+ try:
191
+ return getattr(self, attr)
192
+ except ValueError:
193
+ return ''
194
+
195
+ s = (f'<FileInfo for {self.path!r}: '
196
+ f'type={_file_type_to_string(self.type)}')
197
+ if self.is_file:
198
+ s += f', size={self.size}'
199
+ s += '>'
200
+ return s
201
+
202
+ @property
203
+ def type(self):
204
+ """
205
+ Type of the file.
206
+
207
+ The returned enum values can be the following:
208
+
209
+ - FileType.NotFound: target does not exist
210
+ - FileType.Unknown: target exists but its type is unknown (could be a
211
+ special file such as a Unix socket or character device, or
212
+ Windows NUL / CON / ...)
213
+ - FileType.File: target is a regular file
214
+ - FileType.Directory: target is a regular directory
215
+
216
+ Returns
217
+ -------
218
+ type : FileType
219
+ """
220
+ return _wrap_file_type(self.info.type())
221
+
222
+ @property
223
+ def is_file(self):
224
+ """
225
+ """
226
+ return self.type == FileType.File
227
+
228
+ @property
229
+ def path(self):
230
+ """
231
+ The full file path in the filesystem.
232
+
233
+ Examples
234
+ --------
235
+ >>> file_info = local.get_file_info(path)
236
+ >>> file_info.path
237
+ '/.../pyarrow-fs-example.dat'
238
+ """
239
+ return frombytes(self.info.path())
240
+
241
+ @property
242
+ def base_name(self):
243
+ """
244
+ The file base name.
245
+
246
+ Component after the last directory separator.
247
+
248
+ Examples
249
+ --------
250
+ >>> file_info = local.get_file_info(path)
251
+ >>> file_info.base_name
252
+ 'pyarrow-fs-example.dat'
253
+ """
254
+ return frombytes(self.info.base_name())
255
+
256
+ @property
257
+ def size(self):
258
+ """
259
+ The size in bytes, if available.
260
+
261
+ Only regular files are guaranteed to have a size.
262
+
263
+ Returns
264
+ -------
265
+ size : int or None
266
+ """
267
+ cdef int64_t size
268
+ size = self.info.size()
269
+ return (size if size != -1 else None)
270
+
271
+ @property
272
+ def extension(self):
273
+ """
274
+ The file extension.
275
+
276
+ Examples
277
+ --------
278
+ >>> file_info = local.get_file_info(path)
279
+ >>> file_info.extension
280
+ 'dat'
281
+ """
282
+ return frombytes(self.info.extension())
283
+
284
+ @property
285
+ def mtime(self):
286
+ """
287
+ The time of last modification, if available.
288
+
289
+ Returns
290
+ -------
291
+ mtime : datetime.datetime or None
292
+
293
+ Examples
294
+ --------
295
+ >>> file_info = local.get_file_info(path)
296
+ >>> file_info.mtime # doctest: +SKIP
297
+ datetime.datetime(2022, 6, 29, 7, 56, 10, 873922, tzinfo=datetime.timezone.utc)
298
+ """
299
+ cdef int64_t nanoseconds
300
+ nanoseconds = TimePoint_to_ns(self.info.mtime())
301
+ return (datetime.fromtimestamp(nanoseconds / 1.0e9, timezone.utc)
302
+ if nanoseconds != -1 else None)
303
+
304
+ @property
305
+ def mtime_ns(self):
306
+ """
307
+ The time of last modification, if available, expressed in nanoseconds
308
+ since the Unix epoch.
309
+
310
+ Returns
311
+ -------
312
+ mtime_ns : int or None
313
+
314
+ Examples
315
+ --------
316
+ >>> file_info = local.get_file_info(path)
317
+ >>> file_info.mtime_ns # doctest: +SKIP
318
+ 1656489370873922073
319
+ """
320
+ cdef int64_t nanoseconds
321
+ nanoseconds = TimePoint_to_ns(self.info.mtime())
322
+ return (nanoseconds if nanoseconds != -1 else None)
323
+
324
+
325
+ cdef class FileSelector(_Weakrefable):
326
+ """
327
+ File and directory selector.
328
+
329
+ It contains a set of options that describes how to search for files and
330
+ directories.
331
+
332
+ Parameters
333
+ ----------
334
+ base_dir : str
335
+ The directory in which to select files. Relative paths also work, use
336
+ '.' for the current directory and '..' for the parent.
337
+ allow_not_found : bool, default False
338
+ The behavior if `base_dir` doesn't exist in the filesystem.
339
+ If false, an error is returned.
340
+ If true, an empty selection is returned.
341
+ recursive : bool, default False
342
+ Whether to recurse into subdirectories.
343
+
344
+ Examples
345
+ --------
346
+ List the contents of a directory and subdirectories:
347
+
348
+ >>> selector_1 = fs.FileSelector(local_path, recursive=True)
349
+ >>> local.get_file_info(selector_1) # doctest: +SKIP
350
+ [<FileInfo for 'tmp/alphabet/example.dat': type=FileType.File, size=4>,
351
+ <FileInfo for 'tmp/alphabet/subdir': type=FileType.Directory>,
352
+ <FileInfo for 'tmp/alphabet/subdir/example_copy.dat': type=FileType.File, size=4>]
353
+
354
+ List only the contents of the base directory:
355
+
356
+ >>> selector_2 = fs.FileSelector(local_path)
357
+ >>> local.get_file_info(selector_2) # doctest: +SKIP
358
+ [<FileInfo for 'tmp/alphabet/example.dat': type=FileType.File, size=4>,
359
+ <FileInfo for 'tmp/alphabet/subdir': type=FileType.Directory>]
360
+
361
+ Return empty selection if the directory doesn't exist:
362
+
363
+ >>> selector_not_found = fs.FileSelector(local_path + '/missing',
364
+ ... recursive=True,
365
+ ... allow_not_found=True)
366
+ >>> local.get_file_info(selector_not_found)
367
+ []
368
+ """
369
+
370
+ def __init__(self, base_dir, bint allow_not_found=False,
371
+ bint recursive=False):
372
+ self.base_dir = base_dir
373
+ self.recursive = recursive
374
+ self.allow_not_found = allow_not_found
375
+
376
+ @staticmethod
377
+ cdef FileSelector wrap(CFileSelector wrapped):
378
+ cdef FileSelector self = FileSelector.__new__(FileSelector)
379
+ self.selector = move(wrapped)
380
+ return self
381
+
382
+ cdef inline CFileSelector unwrap(self) nogil:
383
+ return self.selector
384
+
385
+ @property
386
+ def base_dir(self):
387
+ return frombytes(self.selector.base_dir)
388
+
389
+ @base_dir.setter
390
+ def base_dir(self, base_dir):
391
+ self.selector.base_dir = _path_as_bytes(base_dir)
392
+
393
+ @property
394
+ def allow_not_found(self):
395
+ return self.selector.allow_not_found
396
+
397
+ @allow_not_found.setter
398
+ def allow_not_found(self, bint allow_not_found):
399
+ self.selector.allow_not_found = allow_not_found
400
+
401
+ @property
402
+ def recursive(self):
403
+ return self.selector.recursive
404
+
405
+ @recursive.setter
406
+ def recursive(self, bint recursive):
407
+ self.selector.recursive = recursive
408
+
409
+ def __repr__(self):
410
+ return ("<FileSelector base_dir={0.base_dir!r} "
411
+ "recursive={0.recursive}>".format(self))
412
+
413
+
414
+ cdef class FileSystem(_Weakrefable):
415
+ """
416
+ Abstract file system API.
417
+ """
418
+
419
+ def __init__(self):
420
+ raise TypeError("FileSystem is an abstract class, instantiate one of "
421
+ "the subclasses instead: LocalFileSystem or "
422
+ "SubTreeFileSystem")
423
+
424
+ @staticmethod
425
+ def from_uri(uri):
426
+ """
427
+ Create a new FileSystem from URI or Path.
428
+
429
+ Recognized URI schemes are "file", "mock", "s3fs", "gs", "gcs", "hdfs" and "viewfs".
430
+ In addition, the argument can be a pathlib.Path object, or a string
431
+ describing an absolute local path.
432
+
433
+ Parameters
434
+ ----------
435
+ uri : string
436
+ URI-based path, for example: file:///some/local/path.
437
+
438
+ Returns
439
+ -------
440
+ tuple of (FileSystem, str path)
441
+ With (filesystem, path) tuple where path is the abstract path
442
+ inside the FileSystem instance.
443
+
444
+ Examples
445
+ --------
446
+ Create a new FileSystem subclass from a URI:
447
+
448
+ >>> uri = 'file:///{}/pyarrow-fs-example.dat'.format(local_path)
449
+ >>> local_new, path_new = fs.FileSystem.from_uri(uri)
450
+ >>> local_new
451
+ <pyarrow._fs.LocalFileSystem object at ...
452
+ >>> path_new
453
+ '/.../pyarrow-fs-example.dat'
454
+
455
+ Or from a s3 bucket:
456
+
457
+ >>> fs.FileSystem.from_uri("s3://usgs-landsat/collection02/")
458
+ (<pyarrow._s3fs.S3FileSystem object at ...>, 'usgs-landsat/collection02')
459
+ """
460
+ cdef:
461
+ c_string c_path
462
+ c_string c_uri
463
+ CResult[shared_ptr[CFileSystem]] result
464
+
465
+ if isinstance(uri, pathlib.Path):
466
+ # Make absolute
467
+ uri = uri.resolve().absolute()
468
+ c_uri = tobytes(_stringify_path(uri))
469
+ with nogil:
470
+ result = CFileSystemFromUriOrPath(c_uri, &c_path)
471
+ return FileSystem.wrap(GetResultValue(result)), frombytes(c_path)
472
+
473
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
474
+ self.wrapped = wrapped
475
+ self.fs = wrapped.get()
476
+
477
+ @staticmethod
478
+ cdef wrap(const shared_ptr[CFileSystem]& sp):
479
+ cdef FileSystem self
480
+
481
+ typ = frombytes(sp.get().type_name())
482
+ if typ == 'local':
483
+ self = LocalFileSystem.__new__(LocalFileSystem)
484
+ elif typ == 'mock':
485
+ self = _MockFileSystem.__new__(_MockFileSystem)
486
+ elif typ == 'subtree':
487
+ self = SubTreeFileSystem.__new__(SubTreeFileSystem)
488
+ elif typ == 's3':
489
+ from pyarrow._s3fs import S3FileSystem
490
+ self = S3FileSystem.__new__(S3FileSystem)
491
+ elif typ == 'gcs':
492
+ from pyarrow._gcsfs import GcsFileSystem
493
+ self = GcsFileSystem.__new__(GcsFileSystem)
494
+ elif typ == 'abfs':
495
+ from pyarrow._azurefs import AzureFileSystem
496
+ self = AzureFileSystem.__new__(AzureFileSystem)
497
+ elif typ == 'hdfs':
498
+ from pyarrow._hdfs import HadoopFileSystem
499
+ self = HadoopFileSystem.__new__(HadoopFileSystem)
500
+ elif typ.startswith('py::'):
501
+ self = PyFileSystem.__new__(PyFileSystem)
502
+ else:
503
+ raise TypeError('Cannot wrap FileSystem pointer')
504
+
505
+ self.init(sp)
506
+ return self
507
+
508
+ cdef inline shared_ptr[CFileSystem] unwrap(self) nogil:
509
+ return self.wrapped
510
+
511
+ def equals(self, FileSystem other not None):
512
+ """
513
+ Parameters
514
+ ----------
515
+ other : pyarrow.fs.FileSystem
516
+
517
+ Returns
518
+ -------
519
+ bool
520
+ """
521
+ return self.fs.Equals(other.unwrap())
522
+
523
+ def __eq__(self, other):
524
+ try:
525
+ return self.equals(other)
526
+ except TypeError:
527
+ return NotImplemented
528
+
529
+ @property
530
+ def type_name(self):
531
+ """
532
+ The filesystem's type name.
533
+ """
534
+ return frombytes(self.fs.type_name())
535
+
536
+ def get_file_info(self, paths_or_selector):
537
+ """
538
+ Get info for the given files.
539
+
540
+ Any symlink is automatically dereferenced, recursively. A non-existing
541
+ or unreachable file returns a FileStat object and has a FileType of
542
+ value NotFound. An exception indicates a truly exceptional condition
543
+ (low-level I/O error, etc.).
544
+
545
+ Parameters
546
+ ----------
547
+ paths_or_selector : FileSelector, path-like or list of path-likes
548
+ Either a selector object, a path-like object or a list of
549
+ path-like objects. The selector's base directory will not be
550
+ part of the results, even if it exists. If it doesn't exist,
551
+ use `allow_not_found`.
552
+
553
+ Returns
554
+ -------
555
+ FileInfo or list of FileInfo
556
+ Single FileInfo object is returned for a single path, otherwise
557
+ a list of FileInfo objects is returned.
558
+
559
+ Examples
560
+ --------
561
+ >>> local
562
+ <pyarrow._fs.LocalFileSystem object at ...>
563
+ >>> local.get_file_info("/{}/pyarrow-fs-example.dat".format(local_path))
564
+ <FileInfo for '/.../pyarrow-fs-example.dat': type=FileType.File, size=4>
565
+ """
566
+ cdef:
567
+ CFileInfo info
568
+ c_string path
569
+ vector[CFileInfo] infos
570
+ vector[c_string] paths
571
+ CFileSelector selector
572
+
573
+ if isinstance(paths_or_selector, FileSelector):
574
+ with nogil:
575
+ selector = (<FileSelector>paths_or_selector).selector
576
+ infos = GetResultValue(self.fs.GetFileInfo(selector))
577
+ elif isinstance(paths_or_selector, (list, tuple)):
578
+ paths = [_path_as_bytes(s) for s in paths_or_selector]
579
+ with nogil:
580
+ infos = GetResultValue(self.fs.GetFileInfo(paths))
581
+ elif isinstance(paths_or_selector, (bytes, str)):
582
+ path =_path_as_bytes(paths_or_selector)
583
+ with nogil:
584
+ info = GetResultValue(self.fs.GetFileInfo(path))
585
+ return FileInfo.wrap(info)
586
+ else:
587
+ raise TypeError('Must pass either path(s) or a FileSelector')
588
+
589
+ return [FileInfo.wrap(info) for info in infos]
590
+
591
+ def create_dir(self, path, *, bint recursive=True):
592
+ """
593
+ Create a directory and subdirectories.
594
+
595
+ This function succeeds if the directory already exists.
596
+
597
+ Parameters
598
+ ----------
599
+ path : str
600
+ The path of the new directory.
601
+ recursive : bool, default True
602
+ Create nested directories as well.
603
+ """
604
+ cdef c_string directory = _path_as_bytes(path)
605
+ with nogil:
606
+ check_status(self.fs.CreateDir(directory, recursive=recursive))
607
+
608
+ def delete_dir(self, path):
609
+ """
610
+ Delete a directory and its contents, recursively.
611
+
612
+ Parameters
613
+ ----------
614
+ path : str
615
+ The path of the directory to be deleted.
616
+ """
617
+ cdef c_string directory = _path_as_bytes(path)
618
+ with nogil:
619
+ check_status(self.fs.DeleteDir(directory))
620
+
621
+ def delete_dir_contents(self, path, *,
622
+ bint accept_root_dir=False,
623
+ bint missing_dir_ok=False):
624
+ """
625
+ Delete a directory's contents, recursively.
626
+
627
+ Like delete_dir, but doesn't delete the directory itself.
628
+
629
+ Parameters
630
+ ----------
631
+ path : str
632
+ The path of the directory to be deleted.
633
+ accept_root_dir : boolean, default False
634
+ Allow deleting the root directory's contents
635
+ (if path is empty or "/")
636
+ missing_dir_ok : boolean, default False
637
+ If False then an error is raised if path does
638
+ not exist
639
+ """
640
+ cdef c_string directory = _path_as_bytes(path)
641
+ if accept_root_dir and directory.strip(b"/") == b"":
642
+ with nogil:
643
+ check_status(self.fs.DeleteRootDirContents())
644
+ else:
645
+ with nogil:
646
+ check_status(self.fs.DeleteDirContents(directory,
647
+ missing_dir_ok))
648
+
649
+ def move(self, src, dest):
650
+ """
651
+ Move / rename a file or directory.
652
+
653
+ If the destination exists:
654
+ - if it is a non-empty directory, an error is returned
655
+ - otherwise, if it has the same type as the source, it is replaced
656
+ - otherwise, behavior is unspecified (implementation-dependent).
657
+
658
+ Parameters
659
+ ----------
660
+ src : str
661
+ The path of the file or the directory to be moved.
662
+ dest : str
663
+ The destination path where the file or directory is moved to.
664
+
665
+ Examples
666
+ --------
667
+ Create a new folder with a file:
668
+
669
+ >>> local.create_dir('/tmp/other_dir')
670
+ >>> local.copy_file(path,'/tmp/move_example.dat')
671
+
672
+ Move the file:
673
+
674
+ >>> local.move('/tmp/move_example.dat',
675
+ ... '/tmp/other_dir/move_example_2.dat')
676
+
677
+ Inspect the file info:
678
+
679
+ >>> local.get_file_info('/tmp/other_dir/move_example_2.dat')
680
+ <FileInfo for '/tmp/other_dir/move_example_2.dat': type=FileType.File, size=4>
681
+ >>> local.get_file_info('/tmp/move_example.dat')
682
+ <FileInfo for '/tmp/move_example.dat': type=FileType.NotFound>
683
+
684
+ Delete the folder:
685
+ >>> local.delete_dir('/tmp/other_dir')
686
+ """
687
+ cdef:
688
+ c_string source = _path_as_bytes(src)
689
+ c_string destination = _path_as_bytes(dest)
690
+ with nogil:
691
+ check_status(self.fs.Move(source, destination))
692
+
693
+ def copy_file(self, src, dest):
694
+ """
695
+ Copy a file.
696
+
697
+ If the destination exists and is a directory, an error is returned.
698
+ Otherwise, it is replaced.
699
+
700
+ Parameters
701
+ ----------
702
+ src : str
703
+ The path of the file to be copied from.
704
+ dest : str
705
+ The destination path where the file is copied to.
706
+
707
+ Examples
708
+ --------
709
+ >>> local.copy_file(path,
710
+ ... local_path + '/pyarrow-fs-example_copy.dat')
711
+
712
+ Inspect the file info:
713
+
714
+ >>> local.get_file_info(local_path + '/pyarrow-fs-example_copy.dat')
715
+ <FileInfo for '/.../pyarrow-fs-example_copy.dat': type=FileType.File, size=4>
716
+ >>> local.get_file_info(path)
717
+ <FileInfo for '/.../pyarrow-fs-example.dat': type=FileType.File, size=4>
718
+ """
719
+ cdef:
720
+ c_string source = _path_as_bytes(src)
721
+ c_string destination = _path_as_bytes(dest)
722
+ with nogil:
723
+ check_status(self.fs.CopyFile(source, destination))
724
+
725
+ def delete_file(self, path):
726
+ """
727
+ Delete a file.
728
+
729
+ Parameters
730
+ ----------
731
+ path : str
732
+ The path of the file to be deleted.
733
+ """
734
+ cdef c_string file = _path_as_bytes(path)
735
+ with nogil:
736
+ check_status(self.fs.DeleteFile(file))
737
+
738
+ def _wrap_input_stream(self, stream, path, compression, buffer_size):
739
+ if buffer_size is not None and buffer_size != 0:
740
+ stream = BufferedInputStream(stream, buffer_size)
741
+ if compression == 'detect':
742
+ compression = _detect_compression(path)
743
+ if compression is not None:
744
+ stream = CompressedInputStream(stream, compression)
745
+ return stream
746
+
747
+ def _wrap_output_stream(self, stream, path, compression, buffer_size):
748
+ if buffer_size is not None and buffer_size != 0:
749
+ stream = BufferedOutputStream(stream, buffer_size)
750
+ if compression == 'detect':
751
+ compression = _detect_compression(path)
752
+ if compression is not None:
753
+ stream = CompressedOutputStream(stream, compression)
754
+ return stream
755
+
756
+ def open_input_file(self, path):
757
+ """
758
+ Open an input file for random access reading.
759
+
760
+ Parameters
761
+ ----------
762
+ path : str
763
+ The source to open for reading.
764
+
765
+ Returns
766
+ -------
767
+ stream : NativeFile
768
+
769
+ Examples
770
+ --------
771
+ Print the data from the file with `open_input_file()`:
772
+
773
+ >>> with local.open_input_file(path) as f:
774
+ ... print(f.readall())
775
+ b'data'
776
+ """
777
+ cdef:
778
+ c_string pathstr = _path_as_bytes(path)
779
+ NativeFile stream = NativeFile()
780
+ shared_ptr[CRandomAccessFile] in_handle
781
+
782
+ with nogil:
783
+ in_handle = GetResultValue(self.fs.OpenInputFile(pathstr))
784
+
785
+ stream.set_random_access_file(in_handle)
786
+ stream.is_readable = True
787
+ return stream
788
+
789
+ def open_input_stream(self, path, compression='detect', buffer_size=None):
790
+ """
791
+ Open an input stream for sequential reading.
792
+
793
+ Parameters
794
+ ----------
795
+ path : str
796
+ The source to open for reading.
797
+ compression : str optional, default 'detect'
798
+ The compression algorithm to use for on-the-fly decompression.
799
+ If "detect" and source is a file path, then compression will be
800
+ chosen based on the file extension.
801
+ If None, no compression will be applied. Otherwise, a well-known
802
+ algorithm name must be supplied (e.g. "gzip").
803
+ buffer_size : int optional, default None
804
+ If None or 0, no buffering will happen. Otherwise the size of the
805
+ temporary read buffer.
806
+
807
+ Returns
808
+ -------
809
+ stream : NativeFile
810
+
811
+ Examples
812
+ --------
813
+ Print the data from the file with `open_input_stream()`:
814
+
815
+ >>> with local.open_input_stream(path) as f:
816
+ ... print(f.readall())
817
+ b'data'
818
+ """
819
+ cdef:
820
+ c_string pathstr = _path_as_bytes(path)
821
+ NativeFile stream = NativeFile()
822
+ shared_ptr[CInputStream] in_handle
823
+
824
+ with nogil:
825
+ in_handle = GetResultValue(self.fs.OpenInputStream(pathstr))
826
+
827
+ stream.set_input_stream(in_handle)
828
+ stream.is_readable = True
829
+
830
+ return self._wrap_input_stream(
831
+ stream, path=path, compression=compression, buffer_size=buffer_size
832
+ )
833
+
834
+ def open_output_stream(self, path, compression='detect',
835
+ buffer_size=None, metadata=None):
836
+ """
837
+ Open an output stream for sequential writing.
838
+
839
+ If the target already exists, existing data is truncated.
840
+
841
+ Parameters
842
+ ----------
843
+ path : str
844
+ The source to open for writing.
845
+ compression : str optional, default 'detect'
846
+ The compression algorithm to use for on-the-fly compression.
847
+ If "detect" and source is a file path, then compression will be
848
+ chosen based on the file extension.
849
+ If None, no compression will be applied. Otherwise, a well-known
850
+ algorithm name must be supplied (e.g. "gzip").
851
+ buffer_size : int optional, default None
852
+ If None or 0, no buffering will happen. Otherwise the size of the
853
+ temporary write buffer.
854
+ metadata : dict optional, default None
855
+ If not None, a mapping of string keys to string values.
856
+ Some filesystems support storing metadata along the file
857
+ (such as "Content-Type").
858
+ Unsupported metadata keys will be ignored.
859
+
860
+ Returns
861
+ -------
862
+ stream : NativeFile
863
+
864
+ Examples
865
+ --------
866
+ >>> local = fs.LocalFileSystem()
867
+ >>> with local.open_output_stream(path) as stream:
868
+ ... stream.write(b'data')
869
+ 4
870
+ """
871
+ cdef:
872
+ c_string pathstr = _path_as_bytes(path)
873
+ NativeFile stream = NativeFile()
874
+ shared_ptr[COutputStream] out_handle
875
+ shared_ptr[const CKeyValueMetadata] c_metadata
876
+
877
+ if metadata is not None:
878
+ c_metadata = pyarrow_unwrap_metadata(KeyValueMetadata(metadata))
879
+
880
+ with nogil:
881
+ out_handle = GetResultValue(
882
+ self.fs.OpenOutputStream(pathstr, c_metadata))
883
+
884
+ stream.set_output_stream(out_handle)
885
+ stream.is_writable = True
886
+
887
+ return self._wrap_output_stream(
888
+ stream, path=path, compression=compression, buffer_size=buffer_size
889
+ )
890
+
891
+ def open_append_stream(self, path, compression='detect',
892
+ buffer_size=None, metadata=None):
893
+ """
894
+ Open an output stream for appending.
895
+
896
+ If the target doesn't exist, a new empty file is created.
897
+
898
+ .. note::
899
+ Some filesystem implementations do not support efficient
900
+ appending to an existing file, in which case this method will
901
+ raise NotImplementedError.
902
+ Consider writing to multiple files (using e.g. the dataset layer)
903
+ instead.
904
+
905
+ Parameters
906
+ ----------
907
+ path : str
908
+ The source to open for writing.
909
+ compression : str optional, default 'detect'
910
+ The compression algorithm to use for on-the-fly compression.
911
+ If "detect" and source is a file path, then compression will be
912
+ chosen based on the file extension.
913
+ If None, no compression will be applied. Otherwise, a well-known
914
+ algorithm name must be supplied (e.g. "gzip").
915
+ buffer_size : int optional, default None
916
+ If None or 0, no buffering will happen. Otherwise the size of the
917
+ temporary write buffer.
918
+ metadata : dict optional, default None
919
+ If not None, a mapping of string keys to string values.
920
+ Some filesystems support storing metadata along the file
921
+ (such as "Content-Type").
922
+ Unsupported metadata keys will be ignored.
923
+
924
+ Returns
925
+ -------
926
+ stream : NativeFile
927
+
928
+ Examples
929
+ --------
930
+ Append new data to a FileSystem subclass with nonempty file:
931
+
932
+ >>> with local.open_append_stream(path) as f:
933
+ ... f.write(b'+newly added')
934
+ 12
935
+
936
+ Print out the content fo the file:
937
+
938
+ >>> with local.open_input_file(path) as f:
939
+ ... print(f.readall())
940
+ b'data+newly added'
941
+ """
942
+ cdef:
943
+ c_string pathstr = _path_as_bytes(path)
944
+ NativeFile stream = NativeFile()
945
+ shared_ptr[COutputStream] out_handle
946
+ shared_ptr[const CKeyValueMetadata] c_metadata
947
+
948
+ if metadata is not None:
949
+ c_metadata = pyarrow_unwrap_metadata(KeyValueMetadata(metadata))
950
+
951
+ with nogil:
952
+ out_handle = GetResultValue(
953
+ self.fs.OpenAppendStream(pathstr, c_metadata))
954
+
955
+ stream.set_output_stream(out_handle)
956
+ stream.is_writable = True
957
+
958
+ return self._wrap_output_stream(
959
+ stream, path=path, compression=compression, buffer_size=buffer_size
960
+ )
961
+
962
+ def normalize_path(self, path):
963
+ """
964
+ Normalize filesystem path.
965
+
966
+ Parameters
967
+ ----------
968
+ path : str
969
+ The path to normalize
970
+
971
+ Returns
972
+ -------
973
+ normalized_path : str
974
+ The normalized path
975
+ """
976
+ cdef:
977
+ c_string c_path = _path_as_bytes(path)
978
+ c_string c_path_normalized
979
+
980
+ c_path_normalized = GetResultValue(self.fs.NormalizePath(c_path))
981
+ return frombytes(c_path_normalized)
982
+
983
+
984
+ cdef class LocalFileSystem(FileSystem):
985
+ """
986
+ A FileSystem implementation accessing files on the local machine.
987
+
988
+ Details such as symlinks are abstracted away (symlinks are always followed,
989
+ except when deleting an entry).
990
+
991
+ Parameters
992
+ ----------
993
+ use_mmap : bool, default False
994
+ Whether open_input_stream and open_input_file should return
995
+ a mmap'ed file or a regular file.
996
+
997
+ Examples
998
+ --------
999
+ Create a FileSystem object with LocalFileSystem constructor:
1000
+
1001
+ >>> from pyarrow import fs
1002
+ >>> local = fs.LocalFileSystem()
1003
+ >>> local
1004
+ <pyarrow._fs.LocalFileSystem object at ...>
1005
+
1006
+ and write data on to the file:
1007
+
1008
+ >>> with local.open_output_stream('/tmp/local_fs.dat') as stream:
1009
+ ... stream.write(b'data')
1010
+ 4
1011
+ >>> with local.open_input_stream('/tmp/local_fs.dat') as stream:
1012
+ ... print(stream.readall())
1013
+ b'data'
1014
+
1015
+ Create a FileSystem object inferred from a URI of the saved file:
1016
+
1017
+ >>> local_new, path = fs.LocalFileSystem().from_uri('/tmp/local_fs.dat')
1018
+ >>> local_new
1019
+ <pyarrow._fs.LocalFileSystem object at ...
1020
+ >>> path
1021
+ '/tmp/local_fs.dat'
1022
+
1023
+ Check if FileSystems `local` and `local_new` are equal:
1024
+
1025
+ >>> local.equals(local_new)
1026
+ True
1027
+
1028
+ Compare two different FileSystems:
1029
+
1030
+ >>> local2 = fs.LocalFileSystem(use_mmap=True)
1031
+ >>> local.equals(local2)
1032
+ False
1033
+
1034
+ Copy a file and print out the data:
1035
+
1036
+ >>> local.copy_file('/tmp/local_fs.dat', '/tmp/local_fs-copy.dat')
1037
+ >>> with local.open_input_stream('/tmp/local_fs-copy.dat') as stream:
1038
+ ... print(stream.readall())
1039
+ ...
1040
+ b'data'
1041
+
1042
+ Open an output stream for appending, add text and print the new data:
1043
+
1044
+ >>> with local.open_append_stream('/tmp/local_fs-copy.dat') as f:
1045
+ ... f.write(b'+newly added')
1046
+ 12
1047
+
1048
+ >>> with local.open_input_stream('/tmp/local_fs-copy.dat') as f:
1049
+ ... print(f.readall())
1050
+ b'data+newly added'
1051
+
1052
+ Create a directory, copy a file into it and then delete the whole directory:
1053
+
1054
+ >>> local.create_dir('/tmp/new_folder')
1055
+ >>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat')
1056
+ >>> local.get_file_info('/tmp/new_folder')
1057
+ <FileInfo for '/tmp/new_folder': type=FileType.Directory>
1058
+ >>> local.delete_dir('/tmp/new_folder')
1059
+ >>> local.get_file_info('/tmp/new_folder')
1060
+ <FileInfo for '/tmp/new_folder': type=FileType.NotFound>
1061
+
1062
+ Create a directory, copy a file into it and then delete
1063
+ the content of the directory:
1064
+
1065
+ >>> local.create_dir('/tmp/new_folder')
1066
+ >>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat')
1067
+ >>> local.get_file_info('/tmp/new_folder/local_fs.dat')
1068
+ <FileInfo for '/tmp/new_folder/local_fs.dat': type=FileType.File, size=4>
1069
+ >>> local.delete_dir_contents('/tmp/new_folder')
1070
+ >>> local.get_file_info('/tmp/new_folder')
1071
+ <FileInfo for '/tmp/new_folder': type=FileType.Directory>
1072
+ >>> local.get_file_info('/tmp/new_folder/local_fs.dat')
1073
+ <FileInfo for '/tmp/new_folder/local_fs.dat': type=FileType.NotFound>
1074
+
1075
+ Create a directory, copy a file into it and then delete
1076
+ the file from the directory:
1077
+
1078
+ >>> local.create_dir('/tmp/new_folder')
1079
+ >>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat')
1080
+ >>> local.delete_file('/tmp/new_folder/local_fs.dat')
1081
+ >>> local.get_file_info('/tmp/new_folder/local_fs.dat')
1082
+ <FileInfo for '/tmp/new_folder/local_fs.dat': type=FileType.NotFound>
1083
+ >>> local.get_file_info('/tmp/new_folder')
1084
+ <FileInfo for '/tmp/new_folder': type=FileType.Directory>
1085
+
1086
+ Move the file:
1087
+
1088
+ >>> local.move('/tmp/local_fs-copy.dat', '/tmp/new_folder/local_fs-copy.dat')
1089
+ >>> local.get_file_info('/tmp/new_folder/local_fs-copy.dat')
1090
+ <FileInfo for '/tmp/new_folder/local_fs-copy.dat': type=FileType.File, size=16>
1091
+ >>> local.get_file_info('/tmp/local_fs-copy.dat')
1092
+ <FileInfo for '/tmp/local_fs-copy.dat': type=FileType.NotFound>
1093
+
1094
+ To finish delete the file left:
1095
+ >>> local.delete_file('/tmp/local_fs.dat')
1096
+ """
1097
+
1098
+ def __init__(self, *, use_mmap=False):
1099
+ cdef:
1100
+ CLocalFileSystemOptions opts
1101
+ shared_ptr[CLocalFileSystem] fs
1102
+
1103
+ opts = CLocalFileSystemOptions.Defaults()
1104
+ opts.use_mmap = use_mmap
1105
+
1106
+ fs = make_shared[CLocalFileSystem](opts)
1107
+ self.init(<shared_ptr[CFileSystem]> fs)
1108
+
1109
+ cdef init(self, const shared_ptr[CFileSystem]& c_fs):
1110
+ FileSystem.init(self, c_fs)
1111
+ self.localfs = <CLocalFileSystem*> c_fs.get()
1112
+
1113
+ @staticmethod
1114
+ @binding(True) # Required for cython < 3
1115
+ def _reconstruct(kwargs):
1116
+ # __reduce__ doesn't allow passing named arguments directly to the
1117
+ # reconstructor, hence this wrapper.
1118
+ return LocalFileSystem(**kwargs)
1119
+
1120
+ def __reduce__(self):
1121
+ cdef CLocalFileSystemOptions opts = self.localfs.options()
1122
+ return LocalFileSystem._reconstruct, (dict(
1123
+ use_mmap=opts.use_mmap),)
1124
+
1125
+
1126
+ cdef class SubTreeFileSystem(FileSystem):
1127
+ """
1128
+ Delegates to another implementation after prepending a fixed base path.
1129
+
1130
+ This is useful to expose a logical view of a subtree of a filesystem,
1131
+ for example a directory in a LocalFileSystem.
1132
+
1133
+ Note, that this makes no security guarantee. For example, symlinks may
1134
+ allow to "escape" the subtree and access other parts of the underlying
1135
+ filesystem.
1136
+
1137
+ Parameters
1138
+ ----------
1139
+ base_path : str
1140
+ The root of the subtree.
1141
+ base_fs : FileSystem
1142
+ FileSystem object the operations delegated to.
1143
+
1144
+ Examples
1145
+ --------
1146
+ Create a LocalFileSystem instance:
1147
+
1148
+ >>> from pyarrow import fs
1149
+ >>> local = fs.LocalFileSystem()
1150
+ >>> with local.open_output_stream('/tmp/local_fs.dat') as stream:
1151
+ ... stream.write(b'data')
1152
+ 4
1153
+
1154
+ Create a directory and a SubTreeFileSystem instance:
1155
+
1156
+ >>> local.create_dir('/tmp/sub_tree')
1157
+ >>> subtree = fs.SubTreeFileSystem('/tmp/sub_tree', local)
1158
+
1159
+ Write data into the existing file:
1160
+
1161
+ >>> with subtree.open_append_stream('sub_tree_fs.dat') as f:
1162
+ ... f.write(b'+newly added')
1163
+ 12
1164
+
1165
+ Print out the attributes:
1166
+
1167
+ >>> subtree.base_fs
1168
+ <pyarrow._fs.LocalFileSystem object at ...>
1169
+ >>> subtree.base_path
1170
+ '/tmp/sub_tree/'
1171
+
1172
+ Get info for the given directory or given file:
1173
+
1174
+ >>> subtree.get_file_info('')
1175
+ <FileInfo for '': type=FileType.Directory>
1176
+ >>> subtree.get_file_info('sub_tree_fs.dat')
1177
+ <FileInfo for 'sub_tree_fs.dat': type=FileType.File, size=12>
1178
+
1179
+ Delete the file and directory:
1180
+
1181
+ >>> subtree.delete_file('sub_tree_fs.dat')
1182
+ >>> local.delete_dir('/tmp/sub_tree')
1183
+ >>> local.delete_file('/tmp/local_fs.dat')
1184
+
1185
+ For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`.
1186
+ """
1187
+
1188
+ def __init__(self, base_path, FileSystem base_fs):
1189
+ cdef:
1190
+ c_string pathstr
1191
+ shared_ptr[CSubTreeFileSystem] wrapped
1192
+
1193
+ pathstr = _path_as_bytes(base_path)
1194
+ wrapped = make_shared[CSubTreeFileSystem](pathstr, base_fs.wrapped)
1195
+
1196
+ self.init(<shared_ptr[CFileSystem]> wrapped)
1197
+
1198
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
1199
+ FileSystem.init(self, wrapped)
1200
+ self.subtreefs = <CSubTreeFileSystem*> wrapped.get()
1201
+
1202
+ def __repr__(self):
1203
+ return ("SubTreeFileSystem(base_path={}, base_fs={}"
1204
+ .format(self.base_path, self.base_fs))
1205
+
1206
+ def __reduce__(self):
1207
+ return SubTreeFileSystem, (
1208
+ frombytes(self.subtreefs.base_path()),
1209
+ FileSystem.wrap(self.subtreefs.base_fs())
1210
+ )
1211
+
1212
+ @property
1213
+ def base_path(self):
1214
+ return frombytes(self.subtreefs.base_path())
1215
+
1216
+ @property
1217
+ def base_fs(self):
1218
+ return FileSystem.wrap(self.subtreefs.base_fs())
1219
+
1220
+
1221
+ cdef class _MockFileSystem(FileSystem):
1222
+
1223
+ def __init__(self, datetime current_time=None):
1224
+ cdef shared_ptr[CMockFileSystem] wrapped
1225
+
1226
+ current_time = current_time or datetime.now()
1227
+ wrapped = make_shared[CMockFileSystem](
1228
+ PyDateTime_to_TimePoint(<PyDateTime_DateTime*> current_time)
1229
+ )
1230
+
1231
+ self.init(<shared_ptr[CFileSystem]> wrapped)
1232
+
1233
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
1234
+ FileSystem.init(self, wrapped)
1235
+ self.mockfs = <CMockFileSystem*> wrapped.get()
1236
+
1237
+
1238
+ cdef class PyFileSystem(FileSystem):
1239
+ """
1240
+ A FileSystem with behavior implemented in Python.
1241
+
1242
+ Parameters
1243
+ ----------
1244
+ handler : FileSystemHandler
1245
+ The handler object implementing custom filesystem behavior.
1246
+
1247
+ Examples
1248
+ --------
1249
+ Create an fsspec-based filesystem object for GitHub:
1250
+
1251
+ >>> from fsspec.implementations import github
1252
+ >>> gfs = github.GithubFileSystem('apache', 'arrow') # doctest: +SKIP
1253
+
1254
+ Get a PyArrow FileSystem object:
1255
+
1256
+ >>> from pyarrow.fs import PyFileSystem, FSSpecHandler
1257
+ >>> pa_fs = PyFileSystem(FSSpecHandler(gfs)) # doctest: +SKIP
1258
+
1259
+ Use :func:`~pyarrow.fs.FileSystem` functionality ``get_file_info()``:
1260
+
1261
+ >>> pa_fs.get_file_info('README.md') # doctest: +SKIP
1262
+ <FileInfo for 'README.md': type=FileType.File, size=...>
1263
+ """
1264
+
1265
+ def __init__(self, handler):
1266
+ cdef:
1267
+ CPyFileSystemVtable vtable
1268
+ shared_ptr[CPyFileSystem] wrapped
1269
+
1270
+ if not isinstance(handler, FileSystemHandler):
1271
+ raise TypeError("Expected a FileSystemHandler instance, got {0}"
1272
+ .format(type(handler)))
1273
+
1274
+ vtable.get_type_name = _cb_get_type_name
1275
+ vtable.equals = _cb_equals
1276
+ vtable.get_file_info = _cb_get_file_info
1277
+ vtable.get_file_info_vector = _cb_get_file_info_vector
1278
+ vtable.get_file_info_selector = _cb_get_file_info_selector
1279
+ vtable.create_dir = _cb_create_dir
1280
+ vtable.delete_dir = _cb_delete_dir
1281
+ vtable.delete_dir_contents = _cb_delete_dir_contents
1282
+ vtable.delete_root_dir_contents = _cb_delete_root_dir_contents
1283
+ vtable.delete_file = _cb_delete_file
1284
+ vtable.move = _cb_move
1285
+ vtable.copy_file = _cb_copy_file
1286
+ vtable.open_input_stream = _cb_open_input_stream
1287
+ vtable.open_input_file = _cb_open_input_file
1288
+ vtable.open_output_stream = _cb_open_output_stream
1289
+ vtable.open_append_stream = _cb_open_append_stream
1290
+ vtable.normalize_path = _cb_normalize_path
1291
+
1292
+ wrapped = CPyFileSystem.Make(handler, move(vtable))
1293
+ self.init(<shared_ptr[CFileSystem]> wrapped)
1294
+
1295
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
1296
+ FileSystem.init(self, wrapped)
1297
+ self.pyfs = <CPyFileSystem*> wrapped.get()
1298
+
1299
+ @property
1300
+ def handler(self):
1301
+ """
1302
+ The filesystem's underlying handler.
1303
+
1304
+ Returns
1305
+ -------
1306
+ handler : FileSystemHandler
1307
+ """
1308
+ return <object> self.pyfs.handler()
1309
+
1310
+ def __reduce__(self):
1311
+ return PyFileSystem, (self.handler,)
1312
+
1313
+
1314
+ class FileSystemHandler(ABC):
1315
+ """
1316
+ An abstract class exposing methods to implement PyFileSystem's behavior.
1317
+ """
1318
+
1319
+ @abstractmethod
1320
+ def get_type_name(self):
1321
+ """
1322
+ Implement PyFileSystem.type_name.
1323
+ """
1324
+
1325
+ @abstractmethod
1326
+ def get_file_info(self, paths):
1327
+ """
1328
+ Implement PyFileSystem.get_file_info(paths).
1329
+
1330
+ Parameters
1331
+ ----------
1332
+ paths : list of str
1333
+ paths for which we want to retrieve the info.
1334
+ """
1335
+
1336
+ @abstractmethod
1337
+ def get_file_info_selector(self, selector):
1338
+ """
1339
+ Implement PyFileSystem.get_file_info(selector).
1340
+
1341
+ Parameters
1342
+ ----------
1343
+ selector : FileSelector
1344
+ selector for which we want to retrieve the info.
1345
+ """
1346
+
1347
+ @abstractmethod
1348
+ def create_dir(self, path, recursive):
1349
+ """
1350
+ Implement PyFileSystem.create_dir(...).
1351
+
1352
+ Parameters
1353
+ ----------
1354
+ path : str
1355
+ path of the directory.
1356
+ recursive : bool
1357
+ if the parent directories should be created too.
1358
+ """
1359
+
1360
+ @abstractmethod
1361
+ def delete_dir(self, path):
1362
+ """
1363
+ Implement PyFileSystem.delete_dir(...).
1364
+
1365
+ Parameters
1366
+ ----------
1367
+ path : str
1368
+ path of the directory.
1369
+ """
1370
+
1371
+ @abstractmethod
1372
+ def delete_dir_contents(self, path, missing_dir_ok=False):
1373
+ """
1374
+ Implement PyFileSystem.delete_dir_contents(...).
1375
+
1376
+ Parameters
1377
+ ----------
1378
+ path : str
1379
+ path of the directory.
1380
+ missing_dir_ok : bool
1381
+ if False an error should be raised if path does not exist
1382
+ """
1383
+
1384
+ @abstractmethod
1385
+ def delete_root_dir_contents(self):
1386
+ """
1387
+ Implement PyFileSystem.delete_dir_contents("/", accept_root_dir=True).
1388
+ """
1389
+
1390
+ @abstractmethod
1391
+ def delete_file(self, path):
1392
+ """
1393
+ Implement PyFileSystem.delete_file(...).
1394
+
1395
+ Parameters
1396
+ ----------
1397
+ path : str
1398
+ path of the file.
1399
+ """
1400
+
1401
+ @abstractmethod
1402
+ def move(self, src, dest):
1403
+ """
1404
+ Implement PyFileSystem.move(...).
1405
+
1406
+ Parameters
1407
+ ----------
1408
+ src : str
1409
+ path of what should be moved.
1410
+ dest : str
1411
+ path of where it should be moved to.
1412
+ """
1413
+
1414
+ @abstractmethod
1415
+ def copy_file(self, src, dest):
1416
+ """
1417
+ Implement PyFileSystem.copy_file(...).
1418
+
1419
+ Parameters
1420
+ ----------
1421
+ src : str
1422
+ path of what should be copied.
1423
+ dest : str
1424
+ path of where it should be copied to.
1425
+ """
1426
+
1427
+ @abstractmethod
1428
+ def open_input_stream(self, path):
1429
+ """
1430
+ Implement PyFileSystem.open_input_stream(...).
1431
+
1432
+ Parameters
1433
+ ----------
1434
+ path : str
1435
+ path of what should be opened.
1436
+ """
1437
+
1438
+ @abstractmethod
1439
+ def open_input_file(self, path):
1440
+ """
1441
+ Implement PyFileSystem.open_input_file(...).
1442
+
1443
+ Parameters
1444
+ ----------
1445
+ path : str
1446
+ path of what should be opened.
1447
+ """
1448
+
1449
+ @abstractmethod
1450
+ def open_output_stream(self, path, metadata):
1451
+ """
1452
+ Implement PyFileSystem.open_output_stream(...).
1453
+
1454
+ Parameters
1455
+ ----------
1456
+ path : str
1457
+ path of what should be opened.
1458
+ metadata : mapping
1459
+ Mapping of string keys to string values.
1460
+ Some filesystems support storing metadata along the file
1461
+ (such as "Content-Type").
1462
+ """
1463
+
1464
+ @abstractmethod
1465
+ def open_append_stream(self, path, metadata):
1466
+ """
1467
+ Implement PyFileSystem.open_append_stream(...).
1468
+
1469
+ Parameters
1470
+ ----------
1471
+ path : str
1472
+ path of what should be opened.
1473
+ metadata : mapping
1474
+ Mapping of string keys to string values.
1475
+ Some filesystems support storing metadata along the file
1476
+ (such as "Content-Type").
1477
+ """
1478
+
1479
+ @abstractmethod
1480
+ def normalize_path(self, path):
1481
+ """
1482
+ Implement PyFileSystem.normalize_path(...).
1483
+
1484
+ Parameters
1485
+ ----------
1486
+ path : str
1487
+ path of what should be normalized.
1488
+ """
1489
+
1490
+ # Callback definitions for CPyFileSystemVtable
1491
+
1492
+
1493
+ cdef void _cb_get_type_name(handler, c_string* out) except *:
1494
+ out[0] = tobytes("py::" + handler.get_type_name())
1495
+
1496
+ cdef c_bool _cb_equals(handler, const CFileSystem& c_other) except False:
1497
+ if c_other.type_name().startswith(b"py::"):
1498
+ return <object> (<const CPyFileSystem&> c_other).handler() == handler
1499
+
1500
+ return False
1501
+
1502
+ cdef void _cb_get_file_info(handler, const c_string& path,
1503
+ CFileInfo* out) except *:
1504
+ infos = handler.get_file_info([frombytes(path)])
1505
+ if not isinstance(infos, list) or len(infos) != 1:
1506
+ raise TypeError("get_file_info should have returned a 1-element list")
1507
+ out[0] = FileInfo.unwrap_safe(infos[0])
1508
+
1509
+ cdef void _cb_get_file_info_vector(handler, const vector[c_string]& paths,
1510
+ vector[CFileInfo]* out) except *:
1511
+ py_paths = [frombytes(paths[i]) for i in range(len(paths))]
1512
+ infos = handler.get_file_info(py_paths)
1513
+ if not isinstance(infos, list):
1514
+ raise TypeError("get_file_info should have returned a list")
1515
+ out[0].clear()
1516
+ out[0].reserve(len(infos))
1517
+ for info in infos:
1518
+ out[0].push_back(FileInfo.unwrap_safe(info))
1519
+
1520
+ cdef void _cb_get_file_info_selector(handler, const CFileSelector& selector,
1521
+ vector[CFileInfo]* out) except *:
1522
+ infos = handler.get_file_info_selector(FileSelector.wrap(selector))
1523
+ if not isinstance(infos, list):
1524
+ raise TypeError("get_file_info_selector should have returned a list")
1525
+ out[0].clear()
1526
+ out[0].reserve(len(infos))
1527
+ for info in infos:
1528
+ out[0].push_back(FileInfo.unwrap_safe(info))
1529
+
1530
+ cdef void _cb_create_dir(handler, const c_string& path,
1531
+ c_bool recursive) except *:
1532
+ handler.create_dir(frombytes(path), recursive)
1533
+
1534
+ cdef void _cb_delete_dir(handler, const c_string& path) except *:
1535
+ handler.delete_dir(frombytes(path))
1536
+
1537
+ cdef void _cb_delete_dir_contents(handler, const c_string& path,
1538
+ c_bool missing_dir_ok) except *:
1539
+ handler.delete_dir_contents(frombytes(path), missing_dir_ok)
1540
+
1541
+ cdef void _cb_delete_root_dir_contents(handler) except *:
1542
+ handler.delete_root_dir_contents()
1543
+
1544
+ cdef void _cb_delete_file(handler, const c_string& path) except *:
1545
+ handler.delete_file(frombytes(path))
1546
+
1547
+ cdef void _cb_move(handler, const c_string& src,
1548
+ const c_string& dest) except *:
1549
+ handler.move(frombytes(src), frombytes(dest))
1550
+
1551
+ cdef void _cb_copy_file(handler, const c_string& src,
1552
+ const c_string& dest) except *:
1553
+ handler.copy_file(frombytes(src), frombytes(dest))
1554
+
1555
+ cdef void _cb_open_input_stream(handler, const c_string& path,
1556
+ shared_ptr[CInputStream]* out) except *:
1557
+ stream = handler.open_input_stream(frombytes(path))
1558
+ if not isinstance(stream, NativeFile):
1559
+ raise TypeError("open_input_stream should have returned "
1560
+ "a PyArrow file")
1561
+ out[0] = (<NativeFile> stream).get_input_stream()
1562
+
1563
+ cdef void _cb_open_input_file(handler, const c_string& path,
1564
+ shared_ptr[CRandomAccessFile]* out) except *:
1565
+ stream = handler.open_input_file(frombytes(path))
1566
+ if not isinstance(stream, NativeFile):
1567
+ raise TypeError("open_input_file should have returned "
1568
+ "a PyArrow file")
1569
+ out[0] = (<NativeFile> stream).get_random_access_file()
1570
+
1571
+ cdef void _cb_open_output_stream(
1572
+ handler, const c_string& path,
1573
+ const shared_ptr[const CKeyValueMetadata]& metadata,
1574
+ shared_ptr[COutputStream]* out) except *:
1575
+ stream = handler.open_output_stream(
1576
+ frombytes(path), pyarrow_wrap_metadata(metadata))
1577
+ if not isinstance(stream, NativeFile):
1578
+ raise TypeError("open_output_stream should have returned "
1579
+ "a PyArrow file")
1580
+ out[0] = (<NativeFile> stream).get_output_stream()
1581
+
1582
+ cdef void _cb_open_append_stream(
1583
+ handler, const c_string& path,
1584
+ const shared_ptr[const CKeyValueMetadata]& metadata,
1585
+ shared_ptr[COutputStream]* out) except *:
1586
+ stream = handler.open_append_stream(
1587
+ frombytes(path), pyarrow_wrap_metadata(metadata))
1588
+ if not isinstance(stream, NativeFile):
1589
+ raise TypeError("open_append_stream should have returned "
1590
+ "a PyArrow file")
1591
+ out[0] = (<NativeFile> stream).get_output_stream()
1592
+
1593
+ cdef void _cb_normalize_path(handler, const c_string& path,
1594
+ c_string* out) except *:
1595
+ out[0] = tobytes(handler.normalize_path(frombytes(path)))
1596
+
1597
+
1598
+ def _copy_files(FileSystem source_fs, str source_path,
1599
+ FileSystem destination_fs, str destination_path,
1600
+ int64_t chunk_size, c_bool use_threads):
1601
+ # low-level helper exposed through pyarrow/fs.py::copy_files
1602
+ cdef:
1603
+ CFileLocator c_source
1604
+ vector[CFileLocator] c_sources
1605
+ CFileLocator c_destination
1606
+ vector[CFileLocator] c_destinations
1607
+
1608
+ c_source.filesystem = source_fs.unwrap()
1609
+ c_source.path = tobytes(source_path)
1610
+ c_sources.push_back(c_source)
1611
+
1612
+ c_destination.filesystem = destination_fs.unwrap()
1613
+ c_destination.path = tobytes(destination_path)
1614
+ c_destinations.push_back(c_destination)
1615
+
1616
+ with nogil:
1617
+ check_status(CCopyFiles(
1618
+ c_sources, c_destinations,
1619
+ c_default_io_context(), chunk_size, use_threads,
1620
+ ))
1621
+
1622
+
1623
+ def _copy_files_selector(FileSystem source_fs, FileSelector source_sel,
1624
+ FileSystem destination_fs, str destination_base_dir,
1625
+ int64_t chunk_size, c_bool use_threads):
1626
+ # low-level helper exposed through pyarrow/fs.py::copy_files
1627
+ cdef c_string c_destination_base_dir = tobytes(destination_base_dir)
1628
+
1629
+ with nogil:
1630
+ check_status(CCopyFilesWithSelector(
1631
+ source_fs.unwrap(), source_sel.unwrap(),
1632
+ destination_fs.unwrap(), c_destination_base_dir,
1633
+ c_default_io_context(), chunk_size, use_threads,
1634
+ ))
llmeval-env/lib/python3.10/site-packages/pyarrow/_gcsfs.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (132 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_gcsfs.pyx ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from cython cimport binding
21
+
22
+ from pyarrow.lib cimport (pyarrow_wrap_metadata,
23
+ pyarrow_unwrap_metadata)
24
+ from pyarrow.lib import frombytes, tobytes, ensure_metadata
25
+ from pyarrow.includes.common cimport *
26
+ from pyarrow.includes.libarrow cimport *
27
+ from pyarrow.includes.libarrow_fs cimport *
28
+ from pyarrow._fs cimport FileSystem, TimePoint_to_ns, PyDateTime_to_TimePoint
29
+
30
+ from datetime import datetime, timedelta, timezone
31
+
32
+
33
+ cdef class GcsFileSystem(FileSystem):
34
+ """
35
+ Google Cloud Storage (GCS) backed FileSystem implementation
36
+
37
+ By default uses the process described in https://google.aip.dev/auth/4110
38
+ to resolve credentials. If not running on Google Cloud Platform (GCP),
39
+ this generally requires the environment variable
40
+ GOOGLE_APPLICATION_CREDENTIALS to point to a JSON file
41
+ containing credentials.
42
+
43
+ Note: GCS buckets are special and the operations available on them may be
44
+ limited or more expensive than expected compared to local file systems.
45
+
46
+ Note: When pickling a GcsFileSystem that uses default credentials, resolution
47
+ credentials are not stored in the serialized data. Therefore, when unpickling
48
+ it is assumed that the necessary credentials are in place for the target
49
+ process.
50
+
51
+ Parameters
52
+ ----------
53
+ anonymous : boolean, default False
54
+ Whether to connect anonymously.
55
+ If true, will not attempt to look up credentials using standard GCP
56
+ configuration methods.
57
+ access_token : str, default None
58
+ GCP access token. If provided, temporary credentials will be fetched by
59
+ assuming this role; also, a `credential_token_expiration` must be
60
+ specified as well.
61
+ target_service_account : str, default None
62
+ An optional service account to try to impersonate when accessing GCS. This
63
+ requires the specified credential user or service account to have the necessary
64
+ permissions.
65
+ credential_token_expiration : datetime, default None
66
+ Expiration for credential generated with an access token. Must be specified
67
+ if `access_token` is specified.
68
+ default_bucket_location : str, default 'US'
69
+ GCP region to create buckets in.
70
+ scheme : str, default 'https'
71
+ GCS connection transport scheme.
72
+ endpoint_override : str, default None
73
+ Override endpoint with a connect string such as "localhost:9000"
74
+ default_metadata : mapping or pyarrow.KeyValueMetadata, default None
75
+ Default metadata for `open_output_stream`. This will be ignored if
76
+ non-empty metadata is passed to `open_output_stream`.
77
+ retry_time_limit : timedelta, default None
78
+ Set the maximum amount of time the GCS client will attempt to retry
79
+ transient errors. Subsecond granularity is ignored.
80
+ project_id : str, default None
81
+ The GCP project identifier to use for creating buckets.
82
+ If not set, the library uses the GOOGLE_CLOUD_PROJECT environment
83
+ variable. Most I/O operations do not need a project id, only applications
84
+ that create new buckets need a project id.
85
+ """
86
+
87
+ cdef:
88
+ CGcsFileSystem* gcsfs
89
+
90
+ def __init__(self, *, bint anonymous=False, access_token=None,
91
+ target_service_account=None, credential_token_expiration=None,
92
+ default_bucket_location='US',
93
+ scheme=None,
94
+ endpoint_override=None,
95
+ default_metadata=None,
96
+ retry_time_limit=None,
97
+ project_id=None):
98
+ cdef:
99
+ CGcsOptions options
100
+ shared_ptr[CGcsFileSystem] wrapped
101
+ double time_limit_seconds
102
+
103
+ # Intentional use of truthiness because empty strings aren't valid and
104
+ # for reconstruction from pickling will give empty strings.
105
+ if anonymous and (target_service_account or access_token):
106
+ raise ValueError(
107
+ 'anonymous option is not compatible with target_service_account and '
108
+ 'access_token'
109
+ )
110
+ elif bool(access_token) != bool(credential_token_expiration):
111
+ raise ValueError(
112
+ 'access_token and credential_token_expiration must be '
113
+ 'specified together'
114
+ )
115
+
116
+ elif anonymous:
117
+ options = CGcsOptions.Anonymous()
118
+ elif access_token:
119
+ if not isinstance(credential_token_expiration, datetime):
120
+ raise ValueError(
121
+ "credential_token_expiration must be a datetime")
122
+ options = CGcsOptions.FromAccessToken(
123
+ tobytes(access_token),
124
+ PyDateTime_to_TimePoint(<PyDateTime_DateTime*>credential_token_expiration))
125
+ else:
126
+ options = CGcsOptions.Defaults()
127
+
128
+ # Target service account requires base credentials so
129
+ # it is not part of the if/else chain above which only
130
+ # handles base credentials.
131
+ if target_service_account:
132
+ options = CGcsOptions.FromImpersonatedServiceAccount(
133
+ options.credentials, tobytes(target_service_account))
134
+
135
+ options.default_bucket_location = tobytes(default_bucket_location)
136
+
137
+ if scheme is not None:
138
+ options.scheme = tobytes(scheme)
139
+ if endpoint_override is not None:
140
+ options.endpoint_override = tobytes(endpoint_override)
141
+ if default_metadata is not None:
142
+ options.default_metadata = pyarrow_unwrap_metadata(
143
+ ensure_metadata(default_metadata))
144
+ if retry_time_limit is not None:
145
+ time_limit_seconds = retry_time_limit.total_seconds()
146
+ options.retry_limit_seconds = time_limit_seconds
147
+ if project_id is not None:
148
+ options.project_id = <c_string>tobytes(project_id)
149
+
150
+ with nogil:
151
+ wrapped = GetResultValue(CGcsFileSystem.Make(options))
152
+
153
+ self.init(<shared_ptr[CFileSystem]> wrapped)
154
+
155
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
156
+ FileSystem.init(self, wrapped)
157
+ self.gcsfs = <CGcsFileSystem*> wrapped.get()
158
+
159
+ def _expiration_datetime_from_options(self):
160
+ expiration_ns = TimePoint_to_ns(
161
+ self.gcsfs.options().credentials.expiration())
162
+ if expiration_ns == 0:
163
+ return None
164
+ return datetime.fromtimestamp(expiration_ns / 1.0e9, timezone.utc)
165
+
166
+ @staticmethod
167
+ @binding(True) # Required for cython < 3
168
+ def _reconstruct(kwargs):
169
+ # __reduce__ doesn't allow passing named arguments directly to the
170
+ # reconstructor, hence this wrapper.
171
+ return GcsFileSystem(**kwargs)
172
+
173
+ def __reduce__(self):
174
+ cdef CGcsOptions opts = self.gcsfs.options()
175
+ service_account = frombytes(opts.credentials.target_service_account())
176
+ expiration_dt = self._expiration_datetime_from_options()
177
+ retry_time_limit = None
178
+ if opts.retry_limit_seconds.has_value():
179
+ retry_time_limit = timedelta(
180
+ seconds=opts.retry_limit_seconds.value())
181
+ project_id = None
182
+ if opts.project_id.has_value():
183
+ project_id = frombytes(opts.project_id.value())
184
+ return (
185
+ GcsFileSystem._reconstruct, (dict(
186
+ access_token=frombytes(opts.credentials.access_token()),
187
+ anonymous=opts.credentials.anonymous(),
188
+ credential_token_expiration=expiration_dt,
189
+ target_service_account=service_account,
190
+ scheme=frombytes(opts.scheme),
191
+ endpoint_override=frombytes(opts.endpoint_override),
192
+ default_bucket_location=frombytes(
193
+ opts.default_bucket_location),
194
+ default_metadata=pyarrow_wrap_metadata(opts.default_metadata),
195
+ retry_time_limit=retry_time_limit,
196
+ project_id=project_id
197
+ ),))
198
+
199
+ @property
200
+ def default_bucket_location(self):
201
+ """
202
+ The GCP location this filesystem will write to.
203
+ """
204
+ return frombytes(self.gcsfs.options().default_bucket_location)
205
+
206
+ @property
207
+ def project_id(self):
208
+ """
209
+ The GCP project id this filesystem will use.
210
+ """
211
+ if self.gcsfs.options().project_id.has_value():
212
+ return frombytes(self.gcsfs.options().project_id.value())
llmeval-env/lib/python3.10/site-packages/pyarrow/_hdfs.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (130 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_hdfs.pyx ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from cython cimport binding
21
+
22
+ from pyarrow.includes.common cimport *
23
+ from pyarrow.includes.libarrow cimport *
24
+ from pyarrow.includes.libarrow_fs cimport *
25
+ from pyarrow._fs cimport FileSystem
26
+
27
+ from pyarrow.lib import frombytes, tobytes
28
+ from pyarrow.util import _stringify_path
29
+
30
+
31
+ cdef class HadoopFileSystem(FileSystem):
32
+ """
33
+ HDFS backed FileSystem implementation
34
+
35
+ Parameters
36
+ ----------
37
+ host : str
38
+ HDFS host to connect to. Set to "default" for fs.defaultFS from
39
+ core-site.xml.
40
+ port : int, default 8020
41
+ HDFS port to connect to. Set to 0 for default or logical (HA) nodes.
42
+ user : str, default None
43
+ Username when connecting to HDFS; None implies login user.
44
+ replication : int, default 3
45
+ Number of copies each block will have.
46
+ buffer_size : int, default 0
47
+ If 0, no buffering will happen otherwise the size of the temporary read
48
+ and write buffer.
49
+ default_block_size : int, default None
50
+ None means the default configuration for HDFS, a typical block size is
51
+ 128 MB.
52
+ kerb_ticket : string or path, default None
53
+ If not None, the path to the Kerberos ticket cache.
54
+ extra_conf : dict, default None
55
+ Extra key/value pairs for configuration; will override any
56
+ hdfs-site.xml properties.
57
+
58
+ Examples
59
+ --------
60
+ >>> from pyarrow import fs
61
+ >>> hdfs = fs.HadoopFileSystem(host, port, user=user, kerb_ticket=ticket_cache_path) # doctest: +SKIP
62
+
63
+ For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`.
64
+ """
65
+
66
+ cdef:
67
+ CHadoopFileSystem* hdfs
68
+
69
+ def __init__(self, str host, int port=8020, *, str user=None,
70
+ int replication=3, int buffer_size=0,
71
+ default_block_size=None, kerb_ticket=None,
72
+ extra_conf=None):
73
+ cdef:
74
+ CHdfsOptions options
75
+ shared_ptr[CHadoopFileSystem] wrapped
76
+
77
+ if not host.startswith(('hdfs://', 'viewfs://')) and host != "default":
78
+ # TODO(kszucs): do more sanitization
79
+ host = 'hdfs://{}'.format(host)
80
+
81
+ options.ConfigureEndPoint(tobytes(host), int(port))
82
+ options.ConfigureReplication(replication)
83
+ options.ConfigureBufferSize(buffer_size)
84
+
85
+ if user is not None:
86
+ options.ConfigureUser(tobytes(user))
87
+ if default_block_size is not None:
88
+ options.ConfigureBlockSize(default_block_size)
89
+ if kerb_ticket is not None:
90
+ options.ConfigureKerberosTicketCachePath(
91
+ tobytes(_stringify_path(kerb_ticket)))
92
+ if extra_conf is not None:
93
+ for k, v in extra_conf.items():
94
+ options.ConfigureExtraConf(tobytes(k), tobytes(v))
95
+
96
+ with nogil:
97
+ wrapped = GetResultValue(CHadoopFileSystem.Make(options))
98
+ self.init(<shared_ptr[CFileSystem]> wrapped)
99
+
100
+ cdef init(self, const shared_ptr[CFileSystem]& wrapped):
101
+ FileSystem.init(self, wrapped)
102
+ self.hdfs = <CHadoopFileSystem*> wrapped.get()
103
+
104
+ @staticmethod
105
+ def from_uri(uri):
106
+ """
107
+ Instantiate HadoopFileSystem object from an URI string.
108
+
109
+ The following two calls are equivalent
110
+
111
+ * ``HadoopFileSystem.from_uri('hdfs://localhost:8020/?user=test\
112
+ &replication=1')``
113
+ * ``HadoopFileSystem('localhost', port=8020, user='test', \
114
+ replication=1)``
115
+
116
+ Parameters
117
+ ----------
118
+ uri : str
119
+ A string URI describing the connection to HDFS.
120
+ In order to change the user, replication, buffer_size or
121
+ default_block_size pass the values as query parts.
122
+
123
+ Returns
124
+ -------
125
+ HadoopFileSystem
126
+ """
127
+ cdef:
128
+ HadoopFileSystem self = HadoopFileSystem.__new__(HadoopFileSystem)
129
+ shared_ptr[CHadoopFileSystem] wrapped
130
+ CHdfsOptions options
131
+
132
+ options = GetResultValue(CHdfsOptions.FromUriString(tobytes(uri)))
133
+ with nogil:
134
+ wrapped = GetResultValue(CHadoopFileSystem.Make(options))
135
+
136
+ self.init(<shared_ptr[CFileSystem]> wrapped)
137
+ return self
138
+
139
+ @staticmethod
140
+ @binding(True) # Required for cython < 3
141
+ def _reconstruct(kwargs):
142
+ # __reduce__ doesn't allow passing named arguments directly to the
143
+ # reconstructor, hence this wrapper.
144
+ return HadoopFileSystem(**kwargs)
145
+
146
+ def __reduce__(self):
147
+ cdef CHdfsOptions opts = self.hdfs.options()
148
+ return (
149
+ HadoopFileSystem._reconstruct, (dict(
150
+ host=frombytes(opts.connection_config.host),
151
+ port=opts.connection_config.port,
152
+ user=frombytes(opts.connection_config.user),
153
+ replication=opts.replication,
154
+ buffer_size=opts.buffer_size,
155
+ default_block_size=opts.default_block_size,
156
+ kerb_ticket=frombytes(opts.connection_config.kerb_ticket),
157
+ extra_conf={frombytes(k): frombytes(v)
158
+ for k, v in opts.connection_config.extra_conf},
159
+ ),)
160
+ )
llmeval-env/lib/python3.10/site-packages/pyarrow/_json.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (108 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_json.pxd ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+
20
+ from pyarrow.includes.libarrow cimport *
21
+ from pyarrow.lib cimport _Weakrefable
22
+
23
+
24
+ cdef class ParseOptions(_Weakrefable):
25
+ cdef:
26
+ CJSONParseOptions options
27
+
28
+ @staticmethod
29
+ cdef ParseOptions wrap(CJSONParseOptions options)
30
+
31
+ cdef class ReadOptions(_Weakrefable):
32
+ cdef:
33
+ CJSONReadOptions options
34
+
35
+ @staticmethod
36
+ cdef ReadOptions wrap(CJSONReadOptions options)
llmeval-env/lib/python3.10/site-packages/pyarrow/_orc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (204 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_orc.pyx ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: profile=False
19
+ # distutils: language = c++
20
+
21
+ from cython.operator cimport dereference as deref
22
+ from libcpp.vector cimport vector as std_vector
23
+ from libcpp.utility cimport move
24
+ from pyarrow.includes.common cimport *
25
+ from pyarrow.includes.libarrow cimport *
26
+ from pyarrow.lib cimport (check_status, _Weakrefable,
27
+ MemoryPool, maybe_unbox_memory_pool,
28
+ pyarrow_wrap_schema,
29
+ pyarrow_wrap_batch,
30
+ Table,
31
+ pyarrow_wrap_table,
32
+ pyarrow_wrap_metadata,
33
+ pyarrow_unwrap_table,
34
+ get_reader,
35
+ get_writer)
36
+ from pyarrow.lib import frombytes, tobytes
37
+ from pyarrow.util import _stringify_path
38
+
39
+
40
+ cdef compression_type_from_enum(CCompressionType compression_type):
41
+ compression_map = {
42
+ CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED',
43
+ CCompressionType_GZIP: 'ZLIB',
44
+ CCompressionType_SNAPPY: 'SNAPPY',
45
+ CCompressionType_LZ4: 'LZ4',
46
+ CCompressionType_ZSTD: 'ZSTD',
47
+ }
48
+ if compression_type in compression_map:
49
+ return compression_map[compression_type]
50
+ raise ValueError('Unsupported compression')
51
+
52
+
53
+ cdef CCompressionType compression_type_from_name(name) except *:
54
+ if not isinstance(name, str):
55
+ raise TypeError('compression must be a string')
56
+ name = name.upper()
57
+ if name == 'ZLIB':
58
+ return CCompressionType_GZIP
59
+ elif name == 'SNAPPY':
60
+ return CCompressionType_SNAPPY
61
+ elif name == 'LZ4':
62
+ return CCompressionType_LZ4
63
+ elif name == 'ZSTD':
64
+ return CCompressionType_ZSTD
65
+ elif name == 'UNCOMPRESSED':
66
+ return CCompressionType_UNCOMPRESSED
67
+ raise ValueError(f'Unknown CompressionKind: {name}')
68
+
69
+
70
+ cdef compression_strategy_from_enum(
71
+ CompressionStrategy compression_strategy
72
+ ):
73
+ compression_strategy_map = {
74
+ _CompressionStrategy_SPEED: 'SPEED',
75
+ _CompressionStrategy_COMPRESSION: 'COMPRESSION',
76
+ }
77
+ if compression_strategy in compression_strategy_map:
78
+ return compression_strategy_map[compression_strategy]
79
+ raise ValueError('Unsupported compression strategy')
80
+
81
+
82
+ cdef CompressionStrategy compression_strategy_from_name(name) except *:
83
+ if not isinstance(name, str):
84
+ raise TypeError('compression strategy must be a string')
85
+ name = name.upper()
86
+ if name == 'COMPRESSION':
87
+ return _CompressionStrategy_COMPRESSION
88
+ elif name == 'SPEED':
89
+ return _CompressionStrategy_SPEED
90
+ raise ValueError(f'Unknown CompressionStrategy: {name}')
91
+
92
+
93
+ cdef file_version_from_class(FileVersion file_version):
94
+ return frombytes(file_version.ToString())
95
+
96
+
97
+ cdef writer_id_from_enum(WriterId writer_id):
98
+ writer_id_map = {
99
+ _WriterId_ORC_JAVA_WRITER: 'ORC_JAVA',
100
+ _WriterId_ORC_CPP_WRITER: 'ORC_CPP',
101
+ _WriterId_PRESTO_WRITER: 'PRESTO',
102
+ _WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO',
103
+ _WriterId_TRINO_WRITER: 'TRINO',
104
+ }
105
+ if writer_id in writer_id_map:
106
+ return writer_id_map[writer_id]
107
+ raise ValueError('Unsupported writer ID')
108
+
109
+
110
+ cdef writer_version_from_enum(WriterVersion writer_version):
111
+ writer_version_map = {
112
+ _WriterVersion_ORIGINAL: 'ORIGINAL',
113
+ _WriterVersion_HIVE_8732: 'HIVE_8732',
114
+ _WriterVersion_HIVE_4243: 'HIVE_4243',
115
+ _WriterVersion_HIVE_12055: 'HIVE_12055',
116
+ _WriterVersion_HIVE_13083: 'HIVE_13083',
117
+ _WriterVersion_ORC_101: 'ORC_101',
118
+ _WriterVersion_ORC_135: 'ORC_135',
119
+ _WriterVersion_ORC_517: 'ORC_517',
120
+ _WriterVersion_ORC_203: 'ORC_203',
121
+ _WriterVersion_ORC_14: 'ORC_14',
122
+ }
123
+ if writer_version in writer_version_map:
124
+ return writer_version_map[writer_version]
125
+ raise ValueError('Unsupported writer version')
126
+
127
+
128
+ cdef shared_ptr[WriteOptions] _create_write_options(
129
+ file_version=None,
130
+ batch_size=None,
131
+ stripe_size=None,
132
+ compression=None,
133
+ compression_block_size=None,
134
+ compression_strategy=None,
135
+ row_index_stride=None,
136
+ padding_tolerance=None,
137
+ dictionary_key_size_threshold=None,
138
+ bloom_filter_columns=None,
139
+ bloom_filter_fpp=None
140
+ ) except *:
141
+ """General writer options"""
142
+ cdef:
143
+ shared_ptr[WriteOptions] options
144
+ options = make_shared[WriteOptions]()
145
+ # batch_size
146
+ if batch_size is not None:
147
+ if isinstance(batch_size, int) and batch_size > 0:
148
+ deref(options).batch_size = batch_size
149
+ else:
150
+ raise ValueError(f"Invalid ORC writer batch size: {batch_size}")
151
+ # file_version
152
+ if file_version is not None:
153
+ if file_version == "0.12":
154
+ deref(options).file_version = FileVersion(0, 12)
155
+ elif file_version == "0.11":
156
+ deref(options).file_version = FileVersion(0, 11)
157
+ else:
158
+ raise ValueError(f"Unsupported ORC file version: {file_version}")
159
+ # stripe_size
160
+ if stripe_size is not None:
161
+ if isinstance(stripe_size, int) and stripe_size > 0:
162
+ deref(options).stripe_size = stripe_size
163
+ else:
164
+ raise ValueError(f"Invalid ORC stripe size: {stripe_size}")
165
+ # compression
166
+ if compression is not None:
167
+ if isinstance(compression, str):
168
+ deref(options).compression = compression_type_from_name(
169
+ compression)
170
+ else:
171
+ raise TypeError("Unsupported ORC compression type: "
172
+ f"{compression}")
173
+ # compression_block_size
174
+ if compression_block_size is not None:
175
+ if (isinstance(compression_block_size, int) and
176
+ compression_block_size > 0):
177
+ deref(options).compression_block_size = compression_block_size
178
+ else:
179
+ raise ValueError("Invalid ORC compression block size: "
180
+ f"{compression_block_size}")
181
+ # compression_strategy
182
+ if compression_strategy is not None:
183
+ if isinstance(compression, str):
184
+ deref(options).compression_strategy = \
185
+ compression_strategy_from_name(compression_strategy)
186
+ else:
187
+ raise TypeError("Unsupported ORC compression strategy: "
188
+ f"{compression_strategy}")
189
+ # row_index_stride
190
+ if row_index_stride is not None:
191
+ if isinstance(row_index_stride, int) and row_index_stride > 0:
192
+ deref(options).row_index_stride = row_index_stride
193
+ else:
194
+ raise ValueError("Invalid ORC row index stride: "
195
+ f"{row_index_stride}")
196
+ # padding_tolerance
197
+ if padding_tolerance is not None:
198
+ try:
199
+ padding_tolerance = float(padding_tolerance)
200
+ deref(options).padding_tolerance = padding_tolerance
201
+ except Exception:
202
+ raise ValueError("Invalid ORC padding tolerance: "
203
+ f"{padding_tolerance}")
204
+ # dictionary_key_size_threshold
205
+ if dictionary_key_size_threshold is not None:
206
+ try:
207
+ dictionary_key_size_threshold = float(
208
+ dictionary_key_size_threshold)
209
+ assert 0 <= dictionary_key_size_threshold <= 1
210
+ deref(options).dictionary_key_size_threshold = \
211
+ dictionary_key_size_threshold
212
+ except Exception:
213
+ raise ValueError("Invalid ORC dictionary key size threshold: "
214
+ f"{dictionary_key_size_threshold}")
215
+ # bloom_filter_columns
216
+ if bloom_filter_columns is not None:
217
+ try:
218
+ bloom_filter_columns = list(bloom_filter_columns)
219
+ for col in bloom_filter_columns:
220
+ assert isinstance(col, int) and col >= 0
221
+ deref(options).bloom_filter_columns = bloom_filter_columns
222
+ except Exception:
223
+ raise ValueError("Invalid ORC BloomFilter columns: "
224
+ f"{bloom_filter_columns}")
225
+ # Max false positive rate of the Bloom Filter
226
+ if bloom_filter_fpp is not None:
227
+ try:
228
+ bloom_filter_fpp = float(bloom_filter_fpp)
229
+ assert 0 <= bloom_filter_fpp <= 1
230
+ deref(options).bloom_filter_fpp = bloom_filter_fpp
231
+ except Exception:
232
+ raise ValueError("Invalid ORC BloomFilter false positive rate: "
233
+ f"{bloom_filter_fpp}")
234
+ return options
235
+
236
+
237
+ cdef class ORCReader(_Weakrefable):
238
+ cdef:
239
+ object source
240
+ CMemoryPool* allocator
241
+ unique_ptr[ORCFileReader] reader
242
+
243
+ def __cinit__(self, MemoryPool memory_pool=None):
244
+ self.allocator = maybe_unbox_memory_pool(memory_pool)
245
+
246
+ def open(self, object source, c_bool use_memory_map=True):
247
+ cdef:
248
+ shared_ptr[CRandomAccessFile] rd_handle
249
+
250
+ self.source = source
251
+
252
+ get_reader(source, use_memory_map, &rd_handle)
253
+ with nogil:
254
+ self.reader = move(GetResultValue(
255
+ ORCFileReader.Open(rd_handle, self.allocator)
256
+ ))
257
+
258
+ def metadata(self):
259
+ """
260
+ The arrow metadata for this file.
261
+
262
+ Returns
263
+ -------
264
+ metadata : pyarrow.KeyValueMetadata
265
+ """
266
+ cdef:
267
+ shared_ptr[const CKeyValueMetadata] sp_arrow_metadata
268
+
269
+ with nogil:
270
+ sp_arrow_metadata = GetResultValue(
271
+ deref(self.reader).ReadMetadata()
272
+ )
273
+
274
+ return pyarrow_wrap_metadata(sp_arrow_metadata)
275
+
276
+ def schema(self):
277
+ """
278
+ The arrow schema for this file.
279
+
280
+ Returns
281
+ -------
282
+ schema : pyarrow.Schema
283
+ """
284
+ cdef:
285
+ shared_ptr[CSchema] sp_arrow_schema
286
+
287
+ with nogil:
288
+ sp_arrow_schema = GetResultValue(deref(self.reader).ReadSchema())
289
+
290
+ return pyarrow_wrap_schema(sp_arrow_schema)
291
+
292
+ def nrows(self):
293
+ return deref(self.reader).NumberOfRows()
294
+
295
+ def nstripes(self):
296
+ return deref(self.reader).NumberOfStripes()
297
+
298
+ def file_version(self):
299
+ return file_version_from_class(deref(self.reader).GetFileVersion())
300
+
301
+ def software_version(self):
302
+ return frombytes(deref(self.reader).GetSoftwareVersion())
303
+
304
+ def compression(self):
305
+ return compression_type_from_enum(
306
+ GetResultValue(deref(self.reader).GetCompression()))
307
+
308
+ def compression_size(self):
309
+ return deref(self.reader).GetCompressionSize()
310
+
311
+ def row_index_stride(self):
312
+ return deref(self.reader).GetRowIndexStride()
313
+
314
+ def writer(self):
315
+ writer_name = writer_id_from_enum(deref(self.reader).GetWriterId())
316
+ if writer_name == 'UNKNOWN':
317
+ return deref(self.reader).GetWriterIdValue()
318
+ else:
319
+ return writer_name
320
+
321
+ def writer_version(self):
322
+ return writer_version_from_enum(deref(self.reader).GetWriterVersion())
323
+
324
+ def nstripe_statistics(self):
325
+ return deref(self.reader).GetNumberOfStripeStatistics()
326
+
327
+ def content_length(self):
328
+ return deref(self.reader).GetContentLength()
329
+
330
+ def stripe_statistics_length(self):
331
+ return deref(self.reader).GetStripeStatisticsLength()
332
+
333
+ def file_footer_length(self):
334
+ return deref(self.reader).GetFileFooterLength()
335
+
336
+ def file_postscript_length(self):
337
+ return deref(self.reader).GetFilePostscriptLength()
338
+
339
+ def file_length(self):
340
+ return deref(self.reader).GetFileLength()
341
+
342
+ def serialized_file_tail(self):
343
+ return deref(self.reader).GetSerializedFileTail()
344
+
345
+ def read_stripe(self, n, columns=None):
346
+ cdef:
347
+ shared_ptr[CRecordBatch] sp_record_batch
348
+ int64_t stripe
349
+ std_vector[c_string] c_names
350
+
351
+ stripe = n
352
+
353
+ if columns is None:
354
+ with nogil:
355
+ sp_record_batch = GetResultValue(
356
+ deref(self.reader).ReadStripe(stripe)
357
+ )
358
+ else:
359
+ c_names = [tobytes(name) for name in columns]
360
+ with nogil:
361
+ sp_record_batch = GetResultValue(
362
+ deref(self.reader).ReadStripe(stripe, c_names)
363
+ )
364
+
365
+ return pyarrow_wrap_batch(sp_record_batch)
366
+
367
+ def read(self, columns=None):
368
+ cdef:
369
+ shared_ptr[CTable] sp_table
370
+ std_vector[c_string] c_names
371
+
372
+ if columns is None:
373
+ with nogil:
374
+ sp_table = GetResultValue(deref(self.reader).Read())
375
+ else:
376
+ c_names = [tobytes(name) for name in columns]
377
+ with nogil:
378
+ sp_table = GetResultValue(deref(self.reader).Read(c_names))
379
+
380
+ return pyarrow_wrap_table(sp_table)
381
+
382
+
383
+ cdef class ORCWriter(_Weakrefable):
384
+ cdef:
385
+ unique_ptr[ORCFileWriter] writer
386
+ shared_ptr[COutputStream] sink
387
+ c_bool own_sink
388
+
389
+ def open(self, object where, *,
390
+ file_version=None,
391
+ batch_size=None,
392
+ stripe_size=None,
393
+ compression=None,
394
+ compression_block_size=None,
395
+ compression_strategy=None,
396
+ row_index_stride=None,
397
+ padding_tolerance=None,
398
+ dictionary_key_size_threshold=None,
399
+ bloom_filter_columns=None,
400
+ bloom_filter_fpp=None):
401
+ cdef:
402
+ shared_ptr[WriteOptions] write_options
403
+ c_string c_where
404
+ try:
405
+ where = _stringify_path(where)
406
+ except TypeError:
407
+ get_writer(where, &self.sink)
408
+ self.own_sink = False
409
+ else:
410
+ c_where = tobytes(where)
411
+ with nogil:
412
+ self.sink = GetResultValue(FileOutputStream.Open(c_where))
413
+ self.own_sink = True
414
+
415
+ write_options = _create_write_options(
416
+ file_version=file_version,
417
+ batch_size=batch_size,
418
+ stripe_size=stripe_size,
419
+ compression=compression,
420
+ compression_block_size=compression_block_size,
421
+ compression_strategy=compression_strategy,
422
+ row_index_stride=row_index_stride,
423
+ padding_tolerance=padding_tolerance,
424
+ dictionary_key_size_threshold=dictionary_key_size_threshold,
425
+ bloom_filter_columns=bloom_filter_columns,
426
+ bloom_filter_fpp=bloom_filter_fpp
427
+ )
428
+
429
+ with nogil:
430
+ self.writer = move(GetResultValue(
431
+ ORCFileWriter.Open(self.sink.get(),
432
+ deref(write_options))))
433
+
434
+ def write(self, Table table):
435
+ cdef:
436
+ shared_ptr[CTable] sp_table
437
+ sp_table = pyarrow_unwrap_table(table)
438
+ with nogil:
439
+ check_status(deref(self.writer).Write(deref(sp_table)))
440
+
441
+ def close(self):
442
+ with nogil:
443
+ check_status(deref(self.writer).Close())
444
+ if self.own_sink:
445
+ check_status(deref(self.sink).Close())
llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (593 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet.pxd ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # distutils: language = c++
19
+ # cython: language_level = 3
20
+
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport (CChunkedArray, CScalar, CSchema, CStatus,
23
+ CTable, CMemoryPool, CBuffer,
24
+ CKeyValueMetadata, CRandomAccessFile,
25
+ COutputStream, CCacheOptions,
26
+ TimeUnit, CRecordBatchReader)
27
+ from pyarrow.lib cimport _Weakrefable
28
+
29
+
30
+ cdef extern from "parquet/api/schema.h" namespace "parquet::schema" nogil:
31
+ cdef cppclass Node:
32
+ pass
33
+
34
+ cdef cppclass GroupNode(Node):
35
+ pass
36
+
37
+ cdef cppclass PrimitiveNode(Node):
38
+ pass
39
+
40
+ cdef cppclass ColumnPath:
41
+ c_string ToDotString()
42
+ vector[c_string] ToDotVector()
43
+
44
+
45
+ cdef extern from "parquet/api/schema.h" namespace "parquet" nogil:
46
+ enum ParquetType" parquet::Type::type":
47
+ ParquetType_BOOLEAN" parquet::Type::BOOLEAN"
48
+ ParquetType_INT32" parquet::Type::INT32"
49
+ ParquetType_INT64" parquet::Type::INT64"
50
+ ParquetType_INT96" parquet::Type::INT96"
51
+ ParquetType_FLOAT" parquet::Type::FLOAT"
52
+ ParquetType_DOUBLE" parquet::Type::DOUBLE"
53
+ ParquetType_BYTE_ARRAY" parquet::Type::BYTE_ARRAY"
54
+ ParquetType_FIXED_LEN_BYTE_ARRAY" parquet::Type::FIXED_LEN_BYTE_ARRAY"
55
+
56
+ enum ParquetLogicalTypeId" parquet::LogicalType::Type::type":
57
+ ParquetLogicalType_UNDEFINED" parquet::LogicalType::Type::UNDEFINED"
58
+ ParquetLogicalType_STRING" parquet::LogicalType::Type::STRING"
59
+ ParquetLogicalType_MAP" parquet::LogicalType::Type::MAP"
60
+ ParquetLogicalType_LIST" parquet::LogicalType::Type::LIST"
61
+ ParquetLogicalType_ENUM" parquet::LogicalType::Type::ENUM"
62
+ ParquetLogicalType_DECIMAL" parquet::LogicalType::Type::DECIMAL"
63
+ ParquetLogicalType_DATE" parquet::LogicalType::Type::DATE"
64
+ ParquetLogicalType_TIME" parquet::LogicalType::Type::TIME"
65
+ ParquetLogicalType_TIMESTAMP" parquet::LogicalType::Type::TIMESTAMP"
66
+ ParquetLogicalType_INT" parquet::LogicalType::Type::INT"
67
+ ParquetLogicalType_JSON" parquet::LogicalType::Type::JSON"
68
+ ParquetLogicalType_BSON" parquet::LogicalType::Type::BSON"
69
+ ParquetLogicalType_UUID" parquet::LogicalType::Type::UUID"
70
+ ParquetLogicalType_NONE" parquet::LogicalType::Type::NONE"
71
+
72
+ enum ParquetTimeUnit" parquet::LogicalType::TimeUnit::unit":
73
+ ParquetTimeUnit_UNKNOWN" parquet::LogicalType::TimeUnit::UNKNOWN"
74
+ ParquetTimeUnit_MILLIS" parquet::LogicalType::TimeUnit::MILLIS"
75
+ ParquetTimeUnit_MICROS" parquet::LogicalType::TimeUnit::MICROS"
76
+ ParquetTimeUnit_NANOS" parquet::LogicalType::TimeUnit::NANOS"
77
+
78
+ enum ParquetConvertedType" parquet::ConvertedType::type":
79
+ ParquetConvertedType_NONE" parquet::ConvertedType::NONE"
80
+ ParquetConvertedType_UTF8" parquet::ConvertedType::UTF8"
81
+ ParquetConvertedType_MAP" parquet::ConvertedType::MAP"
82
+ ParquetConvertedType_MAP_KEY_VALUE \
83
+ " parquet::ConvertedType::MAP_KEY_VALUE"
84
+ ParquetConvertedType_LIST" parquet::ConvertedType::LIST"
85
+ ParquetConvertedType_ENUM" parquet::ConvertedType::ENUM"
86
+ ParquetConvertedType_DECIMAL" parquet::ConvertedType::DECIMAL"
87
+ ParquetConvertedType_DATE" parquet::ConvertedType::DATE"
88
+ ParquetConvertedType_TIME_MILLIS" parquet::ConvertedType::TIME_MILLIS"
89
+ ParquetConvertedType_TIME_MICROS" parquet::ConvertedType::TIME_MICROS"
90
+ ParquetConvertedType_TIMESTAMP_MILLIS \
91
+ " parquet::ConvertedType::TIMESTAMP_MILLIS"
92
+ ParquetConvertedType_TIMESTAMP_MICROS \
93
+ " parquet::ConvertedType::TIMESTAMP_MICROS"
94
+ ParquetConvertedType_UINT_8" parquet::ConvertedType::UINT_8"
95
+ ParquetConvertedType_UINT_16" parquet::ConvertedType::UINT_16"
96
+ ParquetConvertedType_UINT_32" parquet::ConvertedType::UINT_32"
97
+ ParquetConvertedType_UINT_64" parquet::ConvertedType::UINT_64"
98
+ ParquetConvertedType_INT_8" parquet::ConvertedType::INT_8"
99
+ ParquetConvertedType_INT_16" parquet::ConvertedType::INT_16"
100
+ ParquetConvertedType_INT_32" parquet::ConvertedType::INT_32"
101
+ ParquetConvertedType_INT_64" parquet::ConvertedType::INT_64"
102
+ ParquetConvertedType_JSON" parquet::ConvertedType::JSON"
103
+ ParquetConvertedType_BSON" parquet::ConvertedType::BSON"
104
+ ParquetConvertedType_INTERVAL" parquet::ConvertedType::INTERVAL"
105
+
106
+ enum ParquetRepetition" parquet::Repetition::type":
107
+ ParquetRepetition_REQUIRED" parquet::REPETITION::REQUIRED"
108
+ ParquetRepetition_OPTIONAL" parquet::REPETITION::OPTIONAL"
109
+ ParquetRepetition_REPEATED" parquet::REPETITION::REPEATED"
110
+
111
+ enum ParquetEncoding" parquet::Encoding::type":
112
+ ParquetEncoding_PLAIN" parquet::Encoding::PLAIN"
113
+ ParquetEncoding_PLAIN_DICTIONARY" parquet::Encoding::PLAIN_DICTIONARY"
114
+ ParquetEncoding_RLE" parquet::Encoding::RLE"
115
+ ParquetEncoding_BIT_PACKED" parquet::Encoding::BIT_PACKED"
116
+ ParquetEncoding_DELTA_BINARY_PACKED \
117
+ " parquet::Encoding::DELTA_BINARY_PACKED"
118
+ ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY \
119
+ " parquet::Encoding::DELTA_LENGTH_BYTE_ARRAY"
120
+ ParquetEncoding_DELTA_BYTE_ARRAY" parquet::Encoding::DELTA_BYTE_ARRAY"
121
+ ParquetEncoding_RLE_DICTIONARY" parquet::Encoding::RLE_DICTIONARY"
122
+ ParquetEncoding_BYTE_STREAM_SPLIT \
123
+ " parquet::Encoding::BYTE_STREAM_SPLIT"
124
+
125
+ enum ParquetCompression" parquet::Compression::type":
126
+ ParquetCompression_UNCOMPRESSED" parquet::Compression::UNCOMPRESSED"
127
+ ParquetCompression_SNAPPY" parquet::Compression::SNAPPY"
128
+ ParquetCompression_GZIP" parquet::Compression::GZIP"
129
+ ParquetCompression_LZO" parquet::Compression::LZO"
130
+ ParquetCompression_BROTLI" parquet::Compression::BROTLI"
131
+ ParquetCompression_LZ4" parquet::Compression::LZ4"
132
+ ParquetCompression_ZSTD" parquet::Compression::ZSTD"
133
+
134
+ enum ParquetVersion" parquet::ParquetVersion::type":
135
+ ParquetVersion_V1" parquet::ParquetVersion::PARQUET_1_0"
136
+ ParquetVersion_V2_0" parquet::ParquetVersion::PARQUET_2_0"
137
+ ParquetVersion_V2_4" parquet::ParquetVersion::PARQUET_2_4"
138
+ ParquetVersion_V2_6" parquet::ParquetVersion::PARQUET_2_6"
139
+
140
+ enum ParquetSortOrder" parquet::SortOrder::type":
141
+ ParquetSortOrder_SIGNED" parquet::SortOrder::SIGNED"
142
+ ParquetSortOrder_UNSIGNED" parquet::SortOrder::UNSIGNED"
143
+ ParquetSortOrder_UNKNOWN" parquet::SortOrder::UNKNOWN"
144
+
145
+ cdef cppclass CParquetLogicalType" parquet::LogicalType":
146
+ c_string ToString() const
147
+ c_string ToJSON() const
148
+ ParquetLogicalTypeId type() const
149
+
150
+ cdef cppclass CParquetDecimalType \
151
+ " parquet::DecimalLogicalType"(CParquetLogicalType):
152
+ int32_t precision() const
153
+ int32_t scale() const
154
+
155
+ cdef cppclass CParquetIntType \
156
+ " parquet::IntLogicalType"(CParquetLogicalType):
157
+ int bit_width() const
158
+ c_bool is_signed() const
159
+
160
+ cdef cppclass CParquetTimeType \
161
+ " parquet::TimeLogicalType"(CParquetLogicalType):
162
+ c_bool is_adjusted_to_utc() const
163
+ ParquetTimeUnit time_unit() const
164
+
165
+ cdef cppclass CParquetTimestampType \
166
+ " parquet::TimestampLogicalType"(CParquetLogicalType):
167
+ c_bool is_adjusted_to_utc() const
168
+ ParquetTimeUnit time_unit() const
169
+
170
+ cdef cppclass ColumnDescriptor" parquet::ColumnDescriptor":
171
+ c_bool Equals(const ColumnDescriptor& other)
172
+
173
+ shared_ptr[ColumnPath] path()
174
+ int16_t max_definition_level()
175
+ int16_t max_repetition_level()
176
+
177
+ ParquetType physical_type()
178
+ const shared_ptr[const CParquetLogicalType]& logical_type()
179
+ ParquetConvertedType converted_type()
180
+ const c_string& name()
181
+ int type_length()
182
+ int type_precision()
183
+ int type_scale()
184
+
185
+ cdef cppclass SchemaDescriptor:
186
+ const ColumnDescriptor* Column(int i)
187
+ shared_ptr[Node] schema()
188
+ GroupNode* group()
189
+ c_bool Equals(const SchemaDescriptor& other)
190
+ c_string ToString()
191
+ int num_columns()
192
+
193
+ cdef c_string FormatStatValue(ParquetType parquet_type, c_string val)
194
+
195
+ enum ParquetCipher" parquet::ParquetCipher::type":
196
+ ParquetCipher_AES_GCM_V1" parquet::ParquetCipher::AES_GCM_V1"
197
+ ParquetCipher_AES_GCM_CTR_V1" parquet::ParquetCipher::AES_GCM_CTR_V1"
198
+
199
+ struct AadMetadata:
200
+ c_string aad_prefix
201
+ c_string aad_file_unique
202
+ c_bool supply_aad_prefix
203
+
204
+ struct EncryptionAlgorithm:
205
+ ParquetCipher algorithm
206
+ AadMetadata aad
207
+
208
+ cdef extern from "parquet/api/reader.h" namespace "parquet" nogil:
209
+ cdef cppclass ColumnReader:
210
+ pass
211
+
212
+ cdef cppclass BoolReader(ColumnReader):
213
+ pass
214
+
215
+ cdef cppclass Int32Reader(ColumnReader):
216
+ pass
217
+
218
+ cdef cppclass Int64Reader(ColumnReader):
219
+ pass
220
+
221
+ cdef cppclass Int96Reader(ColumnReader):
222
+ pass
223
+
224
+ cdef cppclass FloatReader(ColumnReader):
225
+ pass
226
+
227
+ cdef cppclass DoubleReader(ColumnReader):
228
+ pass
229
+
230
+ cdef cppclass ByteArrayReader(ColumnReader):
231
+ pass
232
+
233
+ cdef cppclass RowGroupReader:
234
+ pass
235
+
236
+ cdef cppclass CEncodedStatistics" parquet::EncodedStatistics":
237
+ const c_string& max() const
238
+ const c_string& min() const
239
+ int64_t null_count
240
+ int64_t distinct_count
241
+ bint has_min
242
+ bint has_max
243
+ bint has_null_count
244
+ bint has_distinct_count
245
+
246
+ cdef cppclass ParquetByteArray" parquet::ByteArray":
247
+ uint32_t len
248
+ const uint8_t* ptr
249
+
250
+ cdef cppclass ParquetFLBA" parquet::FLBA":
251
+ const uint8_t* ptr
252
+
253
+ cdef cppclass CStatistics" parquet::Statistics":
254
+ int64_t null_count() const
255
+ int64_t distinct_count() const
256
+ int64_t num_values() const
257
+ bint HasMinMax()
258
+ bint HasNullCount()
259
+ bint HasDistinctCount()
260
+ c_bool Equals(const CStatistics&) const
261
+ void Reset()
262
+ c_string EncodeMin()
263
+ c_string EncodeMax()
264
+ CEncodedStatistics Encode()
265
+ void SetComparator()
266
+ ParquetType physical_type() const
267
+ const ColumnDescriptor* descr() const
268
+
269
+ cdef cppclass CBoolStatistics" parquet::BoolStatistics"(CStatistics):
270
+ c_bool min()
271
+ c_bool max()
272
+
273
+ cdef cppclass CInt32Statistics" parquet::Int32Statistics"(CStatistics):
274
+ int32_t min()
275
+ int32_t max()
276
+
277
+ cdef cppclass CInt64Statistics" parquet::Int64Statistics"(CStatistics):
278
+ int64_t min()
279
+ int64_t max()
280
+
281
+ cdef cppclass CFloatStatistics" parquet::FloatStatistics"(CStatistics):
282
+ float min()
283
+ float max()
284
+
285
+ cdef cppclass CDoubleStatistics" parquet::DoubleStatistics"(CStatistics):
286
+ double min()
287
+ double max()
288
+
289
+ cdef cppclass CByteArrayStatistics \
290
+ " parquet::ByteArrayStatistics"(CStatistics):
291
+ ParquetByteArray min()
292
+ ParquetByteArray max()
293
+
294
+ cdef cppclass CFLBAStatistics" parquet::FLBAStatistics"(CStatistics):
295
+ ParquetFLBA min()
296
+ ParquetFLBA max()
297
+
298
+ cdef cppclass CColumnCryptoMetaData" parquet::ColumnCryptoMetaData":
299
+ shared_ptr[ColumnPath] path_in_schema() const
300
+ c_bool encrypted_with_footer_key() const
301
+ const c_string& key_metadata() const
302
+
303
+ cdef cppclass ParquetIndexLocation" parquet::IndexLocation":
304
+ int64_t offset
305
+ int32_t length
306
+
307
+ cdef cppclass CColumnChunkMetaData" parquet::ColumnChunkMetaData":
308
+ int64_t file_offset() const
309
+ const c_string& file_path() const
310
+
311
+ c_bool is_metadata_set() const
312
+ ParquetType type() const
313
+ int64_t num_values() const
314
+ shared_ptr[ColumnPath] path_in_schema() const
315
+ bint is_stats_set() const
316
+ shared_ptr[CStatistics] statistics() const
317
+ ParquetCompression compression() const
318
+ const vector[ParquetEncoding]& encodings() const
319
+ c_bool Equals(const CColumnChunkMetaData&) const
320
+
321
+ int64_t has_dictionary_page() const
322
+ int64_t dictionary_page_offset() const
323
+ int64_t data_page_offset() const
324
+ int64_t index_page_offset() const
325
+ int64_t total_compressed_size() const
326
+ int64_t total_uncompressed_size() const
327
+ unique_ptr[CColumnCryptoMetaData] crypto_metadata() const
328
+ optional[ParquetIndexLocation] GetColumnIndexLocation() const
329
+ optional[ParquetIndexLocation] GetOffsetIndexLocation() const
330
+
331
+ struct CSortingColumn" parquet::SortingColumn":
332
+ int column_idx
333
+ c_bool descending
334
+ c_bool nulls_first
335
+
336
+ cdef cppclass CRowGroupMetaData" parquet::RowGroupMetaData":
337
+ c_bool Equals(const CRowGroupMetaData&) const
338
+ int num_columns() const
339
+ int64_t num_rows() const
340
+ int64_t total_byte_size() const
341
+ vector[CSortingColumn] sorting_columns() const
342
+ unique_ptr[CColumnChunkMetaData] ColumnChunk(int i) const
343
+
344
+ cdef cppclass CFileMetaData" parquet::FileMetaData":
345
+ c_bool Equals(const CFileMetaData&) const
346
+ uint32_t size()
347
+ int num_columns()
348
+ int64_t num_rows()
349
+ int num_row_groups()
350
+ ParquetVersion version()
351
+ const c_string created_by()
352
+ int num_schema_elements()
353
+
354
+ void set_file_path(const c_string& path)
355
+ void AppendRowGroups(const CFileMetaData& other) except +
356
+
357
+ unique_ptr[CRowGroupMetaData] RowGroup(int i)
358
+ const SchemaDescriptor* schema()
359
+ shared_ptr[const CKeyValueMetadata] key_value_metadata() const
360
+ void WriteTo(COutputStream* dst) const
361
+
362
+ inline c_bool is_encryption_algorithm_set() const
363
+ inline EncryptionAlgorithm encryption_algorithm() const
364
+ inline const c_string& footer_signing_key_metadata() const
365
+
366
+ cdef shared_ptr[CFileMetaData] CFileMetaData_Make \
367
+ " parquet::FileMetaData::Make"(const void* serialized_metadata,
368
+ uint32_t* metadata_len)
369
+
370
+ cdef cppclass CReaderProperties" parquet::ReaderProperties":
371
+ c_bool is_buffered_stream_enabled() const
372
+ void enable_buffered_stream()
373
+ void disable_buffered_stream()
374
+
375
+ void set_buffer_size(int64_t buf_size)
376
+ int64_t buffer_size() const
377
+
378
+ void set_thrift_string_size_limit(int32_t size)
379
+ int32_t thrift_string_size_limit() const
380
+
381
+ void set_thrift_container_size_limit(int32_t size)
382
+ int32_t thrift_container_size_limit() const
383
+
384
+ void file_decryption_properties(shared_ptr[CFileDecryptionProperties]
385
+ decryption)
386
+ shared_ptr[CFileDecryptionProperties] file_decryption_properties() \
387
+ const
388
+
389
+ c_bool page_checksum_verification() const
390
+ void set_page_checksum_verification(c_bool check_crc)
391
+
392
+ CReaderProperties default_reader_properties()
393
+
394
+ cdef cppclass ArrowReaderProperties:
395
+ ArrowReaderProperties()
396
+ void set_read_dictionary(int column_index, c_bool read_dict)
397
+ c_bool read_dictionary()
398
+ void set_batch_size(int64_t batch_size)
399
+ int64_t batch_size()
400
+ void set_pre_buffer(c_bool pre_buffer)
401
+ c_bool pre_buffer() const
402
+ void set_cache_options(CCacheOptions options)
403
+ CCacheOptions cache_options() const
404
+ void set_coerce_int96_timestamp_unit(TimeUnit unit)
405
+ TimeUnit coerce_int96_timestamp_unit() const
406
+
407
+ ArrowReaderProperties default_arrow_reader_properties()
408
+
409
+ cdef cppclass ParquetFileReader:
410
+ shared_ptr[CFileMetaData] metadata()
411
+
412
+
413
+ cdef extern from "parquet/api/writer.h" namespace "parquet" nogil:
414
+ cdef cppclass WriterProperties:
415
+ cppclass Builder:
416
+ Builder* data_page_version(ParquetDataPageVersion version)
417
+ Builder* version(ParquetVersion version)
418
+ Builder* compression(ParquetCompression codec)
419
+ Builder* compression(const c_string& path,
420
+ ParquetCompression codec)
421
+ Builder* compression_level(int compression_level)
422
+ Builder* compression_level(const c_string& path,
423
+ int compression_level)
424
+ Builder* encryption(
425
+ shared_ptr[CFileEncryptionProperties]
426
+ file_encryption_properties)
427
+ Builder* disable_dictionary()
428
+ Builder* enable_dictionary()
429
+ Builder* enable_dictionary(const c_string& path)
430
+ Builder* set_sorting_columns(vector[CSortingColumn] sorting_columns)
431
+ Builder* disable_statistics()
432
+ Builder* enable_statistics()
433
+ Builder* enable_statistics(const c_string& path)
434
+ Builder* data_pagesize(int64_t size)
435
+ Builder* encoding(ParquetEncoding encoding)
436
+ Builder* encoding(const c_string& path,
437
+ ParquetEncoding encoding)
438
+ Builder* max_row_group_length(int64_t size)
439
+ Builder* write_batch_size(int64_t batch_size)
440
+ Builder* dictionary_pagesize_limit(int64_t dictionary_pagesize_limit)
441
+ Builder* enable_write_page_index()
442
+ Builder* disable_write_page_index()
443
+ Builder* enable_page_checksum()
444
+ Builder* disable_page_checksum()
445
+ shared_ptr[WriterProperties] build()
446
+
447
+ cdef cppclass ArrowWriterProperties:
448
+ cppclass Builder:
449
+ Builder()
450
+ Builder* disable_deprecated_int96_timestamps()
451
+ Builder* enable_deprecated_int96_timestamps()
452
+ Builder* coerce_timestamps(TimeUnit unit)
453
+ Builder* allow_truncated_timestamps()
454
+ Builder* disallow_truncated_timestamps()
455
+ Builder* store_schema()
456
+ Builder* enable_compliant_nested_types()
457
+ Builder* disable_compliant_nested_types()
458
+ Builder* set_engine_version(ArrowWriterEngineVersion version)
459
+ shared_ptr[ArrowWriterProperties] build()
460
+ c_bool support_deprecated_int96_timestamps()
461
+
462
+
463
+ cdef extern from "parquet/arrow/reader.h" namespace "parquet::arrow" nogil:
464
+ cdef cppclass FileReader:
465
+ FileReader(CMemoryPool* pool, unique_ptr[ParquetFileReader] reader)
466
+
467
+ CStatus GetSchema(shared_ptr[CSchema]* out)
468
+
469
+ CStatus ReadColumn(int i, shared_ptr[CChunkedArray]* out)
470
+ CStatus ReadSchemaField(int i, shared_ptr[CChunkedArray]* out)
471
+
472
+ int num_row_groups()
473
+ CStatus ReadRowGroup(int i, shared_ptr[CTable]* out)
474
+ CStatus ReadRowGroup(int i, const vector[int]& column_indices,
475
+ shared_ptr[CTable]* out)
476
+
477
+ CStatus ReadRowGroups(const vector[int]& row_groups,
478
+ shared_ptr[CTable]* out)
479
+ CStatus ReadRowGroups(const vector[int]& row_groups,
480
+ const vector[int]& column_indices,
481
+ shared_ptr[CTable]* out)
482
+
483
+ CStatus GetRecordBatchReader(const vector[int]& row_group_indices,
484
+ const vector[int]& column_indices,
485
+ unique_ptr[CRecordBatchReader]* out)
486
+ CStatus GetRecordBatchReader(const vector[int]& row_group_indices,
487
+ unique_ptr[CRecordBatchReader]* out)
488
+
489
+ CStatus ReadTable(shared_ptr[CTable]* out)
490
+ CStatus ReadTable(const vector[int]& column_indices,
491
+ shared_ptr[CTable]* out)
492
+
493
+ CStatus ScanContents(vector[int] columns, int32_t column_batch_size,
494
+ int64_t* num_rows)
495
+
496
+ const ParquetFileReader* parquet_reader()
497
+
498
+ void set_use_threads(c_bool use_threads)
499
+
500
+ void set_batch_size(int64_t batch_size)
501
+
502
+ cdef cppclass FileReaderBuilder:
503
+ FileReaderBuilder()
504
+ CStatus Open(const shared_ptr[CRandomAccessFile]& file,
505
+ const CReaderProperties& properties,
506
+ const shared_ptr[CFileMetaData]& metadata)
507
+
508
+ ParquetFileReader* raw_reader()
509
+ FileReaderBuilder* memory_pool(CMemoryPool*)
510
+ FileReaderBuilder* properties(const ArrowReaderProperties&)
511
+ CStatus Build(unique_ptr[FileReader]* out)
512
+
513
+ CStatus FromParquetSchema(
514
+ const SchemaDescriptor* parquet_schema,
515
+ const ArrowReaderProperties& properties,
516
+ const shared_ptr[const CKeyValueMetadata]& key_value_metadata,
517
+ shared_ptr[CSchema]* out)
518
+
519
+ CStatus StatisticsAsScalars(const CStatistics& Statistics,
520
+ shared_ptr[CScalar]* min,
521
+ shared_ptr[CScalar]* max)
522
+
523
+ cdef extern from "parquet/arrow/schema.h" namespace "parquet::arrow" nogil:
524
+
525
+ CStatus ToParquetSchema(
526
+ const CSchema* arrow_schema,
527
+ const WriterProperties& properties,
528
+ const ArrowWriterProperties& arrow_properties,
529
+ shared_ptr[SchemaDescriptor]* out)
530
+
531
+
532
+ cdef extern from "parquet/properties.h" namespace "parquet" nogil:
533
+ cdef enum ArrowWriterEngineVersion:
534
+ V1 "parquet::ArrowWriterProperties::V1",
535
+ V2 "parquet::ArrowWriterProperties::V2"
536
+
537
+ cdef cppclass ParquetDataPageVersion:
538
+ pass
539
+
540
+ cdef ParquetDataPageVersion ParquetDataPageVersion_V1 \
541
+ " parquet::ParquetDataPageVersion::V1"
542
+ cdef ParquetDataPageVersion ParquetDataPageVersion_V2 \
543
+ " parquet::ParquetDataPageVersion::V2"
544
+
545
+ cdef extern from "parquet/arrow/writer.h" namespace "parquet::arrow" nogil:
546
+ cdef cppclass FileWriter:
547
+
548
+ @staticmethod
549
+ CResult[unique_ptr[FileWriter]] Open(const CSchema& schema, CMemoryPool* pool,
550
+ const shared_ptr[COutputStream]& sink,
551
+ const shared_ptr[WriterProperties]& properties,
552
+ const shared_ptr[ArrowWriterProperties]& arrow_properties)
553
+
554
+ CStatus WriteTable(const CTable& table, int64_t chunk_size)
555
+ CStatus NewRowGroup(int64_t chunk_size)
556
+ CStatus Close()
557
+
558
+ const shared_ptr[CFileMetaData] metadata() const
559
+
560
+ CStatus WriteMetaDataFile(
561
+ const CFileMetaData& file_metadata,
562
+ const COutputStream* sink)
563
+
564
+ cdef class FileEncryptionProperties:
565
+ """File-level encryption properties for the low-level API"""
566
+ cdef:
567
+ shared_ptr[CFileEncryptionProperties] properties
568
+
569
+ @staticmethod
570
+ cdef inline FileEncryptionProperties wrap(
571
+ shared_ptr[CFileEncryptionProperties] properties):
572
+
573
+ result = FileEncryptionProperties()
574
+ result.properties = properties
575
+ return result
576
+
577
+ cdef inline shared_ptr[CFileEncryptionProperties] unwrap(self):
578
+ return self.properties
579
+
580
+ cdef shared_ptr[WriterProperties] _create_writer_properties(
581
+ use_dictionary=*,
582
+ compression=*,
583
+ version=*,
584
+ write_statistics=*,
585
+ data_page_size=*,
586
+ compression_level=*,
587
+ use_byte_stream_split=*,
588
+ column_encoding=*,
589
+ data_page_version=*,
590
+ FileEncryptionProperties encryption_properties=*,
591
+ write_batch_size=*,
592
+ dictionary_pagesize_limit=*,
593
+ write_page_index=*,
594
+ write_page_checksum=*,
595
+ sorting_columns=*,
596
+ ) except *
597
+
598
+
599
+ cdef shared_ptr[ArrowWriterProperties] _create_arrow_writer_properties(
600
+ use_deprecated_int96_timestamps=*,
601
+ coerce_timestamps=*,
602
+ allow_truncated_timestamps=*,
603
+ writer_engine_version=*,
604
+ use_compliant_nested_type=*,
605
+ store_schema=*,
606
+ ) except *
607
+
608
+ cdef class ParquetSchema(_Weakrefable):
609
+ cdef:
610
+ FileMetaData parent # the FileMetaData owning the SchemaDescriptor
611
+ const SchemaDescriptor* schema
612
+
613
+ cdef class FileMetaData(_Weakrefable):
614
+ cdef:
615
+ shared_ptr[CFileMetaData] sp_metadata
616
+ CFileMetaData* _metadata
617
+ ParquetSchema _schema
618
+
619
+ cdef inline init(self, const shared_ptr[CFileMetaData]& metadata):
620
+ self.sp_metadata = metadata
621
+ self._metadata = metadata.get()
622
+
623
+ cdef class RowGroupMetaData(_Weakrefable):
624
+ cdef:
625
+ int index # for pickling support
626
+ unique_ptr[CRowGroupMetaData] up_metadata
627
+ CRowGroupMetaData* metadata
628
+ FileMetaData parent
629
+
630
+ cdef class ColumnChunkMetaData(_Weakrefable):
631
+ cdef:
632
+ unique_ptr[CColumnChunkMetaData] up_metadata
633
+ CColumnChunkMetaData* metadata
634
+ RowGroupMetaData parent
635
+
636
+ cdef inline init(self, RowGroupMetaData parent, int i):
637
+ self.up_metadata = parent.metadata.ColumnChunk(i)
638
+ self.metadata = self.up_metadata.get()
639
+ self.parent = parent
640
+
641
+ cdef class Statistics(_Weakrefable):
642
+ cdef:
643
+ shared_ptr[CStatistics] statistics
644
+ ColumnChunkMetaData parent
645
+
646
+ cdef inline init(self, const shared_ptr[CStatistics]& statistics,
647
+ ColumnChunkMetaData parent):
648
+ self.statistics = statistics
649
+ self.parent = parent
650
+
651
+ cdef extern from "parquet/encryption/encryption.h" namespace "parquet" nogil:
652
+ cdef cppclass CFileDecryptionProperties\
653
+ " parquet::FileDecryptionProperties":
654
+ pass
655
+
656
+ cdef cppclass CFileEncryptionProperties\
657
+ " parquet::FileEncryptionProperties":
658
+ pass
659
+
660
+ cdef class FileDecryptionProperties:
661
+ """File-level decryption properties for the low-level API"""
662
+ cdef:
663
+ shared_ptr[CFileDecryptionProperties] properties
664
+
665
+ @staticmethod
666
+ cdef inline FileDecryptionProperties wrap(
667
+ shared_ptr[CFileDecryptionProperties] properties):
668
+
669
+ result = FileDecryptionProperties()
670
+ result.properties = properties
671
+ return result
672
+
673
+ cdef inline shared_ptr[CFileDecryptionProperties] unwrap(self):
674
+ return self.properties
llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet_encryption.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (280 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pyx ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: profile=False, binding=True
19
+ # distutils: language = c++
20
+
21
+ from pyarrow.includes.common cimport *
22
+ from pyarrow.includes.libarrow cimport *
23
+ from pyarrow.lib cimport check_status
24
+
25
+ from pyarrow.lib import frombytes
26
+
27
+
28
+ cdef class CppTestCase:
29
+ """
30
+ A simple wrapper for a C++ test case.
31
+ """
32
+ cdef:
33
+ CTestCase c_case
34
+
35
+ @staticmethod
36
+ cdef wrap(CTestCase c_case):
37
+ cdef:
38
+ CppTestCase obj
39
+ obj = CppTestCase.__new__(CppTestCase)
40
+ obj.c_case = c_case
41
+ return obj
42
+
43
+ @property
44
+ def name(self):
45
+ return frombytes(self.c_case.name)
46
+
47
+ def __repr__(self):
48
+ return f"<{self.__class__.__name__} {self.name!r}>"
49
+
50
+ def __call__(self):
51
+ check_status(self.c_case.func())
52
+
53
+
54
+ def get_cpp_tests():
55
+ """
56
+ Get a list of C++ test cases.
57
+ """
58
+ cases = []
59
+ c_cases = GetCppTestCases()
60
+ for c_case in c_cases:
61
+ cases.append(CppTestCase.wrap(c_case))
62
+ return cases
llmeval-env/lib/python3.10/site-packages/pyarrow/_s3fs.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (226 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_substrait.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (185 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/_substrait.pyx ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # cython: language_level = 3
19
+ from cython.operator cimport dereference as deref
20
+ from libcpp.vector cimport vector as std_vector
21
+
22
+ from pyarrow import Buffer, py_buffer
23
+ from pyarrow._compute cimport Expression
24
+ from pyarrow.lib import frombytes, tobytes
25
+ from pyarrow.lib cimport *
26
+ from pyarrow.includes.libarrow cimport *
27
+ from pyarrow.includes.libarrow_substrait cimport *
28
+
29
+
30
+ # TODO GH-37235: Fix exception handling
31
+ cdef CDeclaration _create_named_table_provider(
32
+ dict named_args, const std_vector[c_string]& names, const CSchema& schema
33
+ ) noexcept:
34
+ cdef:
35
+ c_string c_name
36
+ shared_ptr[CTable] c_in_table
37
+ shared_ptr[CTableSourceNodeOptions] c_tablesourceopts
38
+ shared_ptr[CExecNodeOptions] c_input_node_opts
39
+ vector[CDeclaration.Input] no_c_inputs
40
+
41
+ py_names = []
42
+ for i in range(names.size()):
43
+ c_name = names[i]
44
+ py_names.append(frombytes(c_name))
45
+ py_schema = pyarrow_wrap_schema(make_shared[CSchema](schema))
46
+
47
+ py_table = named_args["provider"](py_names, py_schema)
48
+ c_in_table = pyarrow_unwrap_table(py_table)
49
+ c_tablesourceopts = make_shared[CTableSourceNodeOptions](c_in_table)
50
+ c_input_node_opts = static_pointer_cast[CExecNodeOptions, CTableSourceNodeOptions](
51
+ c_tablesourceopts)
52
+ return CDeclaration(tobytes("table_source"),
53
+ no_c_inputs, c_input_node_opts)
54
+
55
+
56
+ def run_query(plan, *, table_provider=None, use_threads=True):
57
+ """
58
+ Execute a Substrait plan and read the results as a RecordBatchReader.
59
+
60
+ Parameters
61
+ ----------
62
+ plan : Union[Buffer, bytes]
63
+ The serialized Substrait plan to execute.
64
+ table_provider : object (optional)
65
+ A function to resolve any NamedTable relation to a table.
66
+ The function will receive two arguments which will be a list
67
+ of strings representing the table name and a pyarrow.Schema representing
68
+ the expected schema and should return a pyarrow.Table.
69
+ use_threads : bool, default True
70
+ If True then multiple threads will be used to run the query. If False then
71
+ all CPU intensive work will be done on the calling thread.
72
+
73
+ Returns
74
+ -------
75
+ RecordBatchReader
76
+ A reader containing the result of the executed query
77
+
78
+ Examples
79
+ --------
80
+ >>> import pyarrow as pa
81
+ >>> from pyarrow.lib import tobytes
82
+ >>> import pyarrow.substrait as substrait
83
+ >>> test_table_1 = pa.Table.from_pydict({"x": [1, 2, 3]})
84
+ >>> test_table_2 = pa.Table.from_pydict({"x": [4, 5, 6]})
85
+ >>> def table_provider(names, schema):
86
+ ... if not names:
87
+ ... raise Exception("No names provided")
88
+ ... elif names[0] == "t1":
89
+ ... return test_table_1
90
+ ... elif names[1] == "t2":
91
+ ... return test_table_2
92
+ ... else:
93
+ ... raise Exception("Unrecognized table name")
94
+ ...
95
+ >>> substrait_query = '''
96
+ ... {
97
+ ... "relations": [
98
+ ... {"rel": {
99
+ ... "read": {
100
+ ... "base_schema": {
101
+ ... "struct": {
102
+ ... "types": [
103
+ ... {"i64": {}}
104
+ ... ]
105
+ ... },
106
+ ... "names": [
107
+ ... "x"
108
+ ... ]
109
+ ... },
110
+ ... "namedTable": {
111
+ ... "names": ["t1"]
112
+ ... }
113
+ ... }
114
+ ... }}
115
+ ... ]
116
+ ... }
117
+ ... '''
118
+ >>> buf = pa._substrait._parse_json_plan(tobytes(substrait_query))
119
+ >>> reader = pa.substrait.run_query(buf, table_provider=table_provider)
120
+ >>> reader.read_all()
121
+ pyarrow.Table
122
+ x: int64
123
+ ----
124
+ x: [[1,2,3]]
125
+ """
126
+
127
+ cdef:
128
+ CResult[shared_ptr[CRecordBatchReader]] c_res_reader
129
+ shared_ptr[CRecordBatchReader] c_reader
130
+ RecordBatchReader reader
131
+ shared_ptr[CBuffer] c_buf_plan
132
+ CConversionOptions c_conversion_options
133
+ c_bool c_use_threads
134
+
135
+ c_use_threads = use_threads
136
+ if isinstance(plan, bytes):
137
+ c_buf_plan = pyarrow_unwrap_buffer(py_buffer(plan))
138
+ elif isinstance(plan, Buffer):
139
+ c_buf_plan = pyarrow_unwrap_buffer(plan)
140
+ else:
141
+ raise TypeError(
142
+ f"Expected 'pyarrow.Buffer' or bytes, got '{type(plan)}'")
143
+
144
+ if table_provider is not None:
145
+ named_table_args = {
146
+ "provider": table_provider
147
+ }
148
+ c_conversion_options.named_table_provider = BindFunction[CNamedTableProvider](
149
+ &_create_named_table_provider, named_table_args)
150
+
151
+ with nogil:
152
+ c_res_reader = ExecuteSerializedPlan(
153
+ deref(c_buf_plan), default_extension_id_registry(),
154
+ GetFunctionRegistry(), c_conversion_options, c_use_threads)
155
+
156
+ c_reader = GetResultValue(c_res_reader)
157
+
158
+ reader = RecordBatchReader.__new__(RecordBatchReader)
159
+ reader.reader = c_reader
160
+ return reader
161
+
162
+
163
+ def _parse_json_plan(plan):
164
+ """
165
+ Parse a JSON plan into equivalent serialized Protobuf.
166
+
167
+ Parameters
168
+ ----------
169
+ plan : bytes
170
+ Substrait plan in JSON.
171
+
172
+ Returns
173
+ -------
174
+ Buffer
175
+ A buffer containing the serialized Protobuf plan.
176
+ """
177
+
178
+ cdef:
179
+ CResult[shared_ptr[CBuffer]] c_res_buffer
180
+ c_string c_str_plan
181
+ shared_ptr[CBuffer] c_buf_plan
182
+
183
+ c_str_plan = plan
184
+ c_res_buffer = SerializeJsonPlan(c_str_plan)
185
+ with nogil:
186
+ c_buf_plan = GetResultValue(c_res_buffer)
187
+ return pyarrow_wrap_buffer(c_buf_plan)
188
+
189
+
190
+ def serialize_expressions(exprs, names, schema, *, allow_arrow_extensions=False):
191
+ """
192
+ Serialize a collection of expressions into Substrait
193
+
194
+ Substrait expressions must be bound to a schema. For example,
195
+ the Substrait expression ``a:i32 + b:i32`` is different from the
196
+ Substrait expression ``a:i64 + b:i64``. Pyarrow expressions are
197
+ typically unbound. For example, both of the above expressions
198
+ would be represented as ``a + b`` in pyarrow.
199
+
200
+ This means a schema must be provided when serializing an expression.
201
+ It also means that the serialization may fail if a matching function
202
+ call cannot be found for the expression.
203
+
204
+ Parameters
205
+ ----------
206
+ exprs : list of Expression
207
+ The expressions to serialize
208
+ names : list of str
209
+ Names for the expressions
210
+ schema : Schema
211
+ The schema the expressions will be bound to
212
+ allow_arrow_extensions : bool, default False
213
+ If False then only functions that are part of the core Substrait function
214
+ definitions will be allowed. Set this to True to allow pyarrow-specific functions
215
+ and user defined functions but the result may not be accepted by other
216
+ compute libraries.
217
+
218
+ Returns
219
+ -------
220
+ Buffer
221
+ An ExtendedExpression message containing the serialized expressions
222
+ """
223
+ cdef:
224
+ CResult[shared_ptr[CBuffer]] c_res_buffer
225
+ shared_ptr[CBuffer] c_buffer
226
+ CNamedExpression c_named_expr
227
+ CBoundExpressions c_bound_exprs
228
+ CConversionOptions c_conversion_options
229
+
230
+ if len(exprs) != len(names):
231
+ raise ValueError("exprs and names need to have the same length")
232
+ for expr, name in zip(exprs, names):
233
+ if not isinstance(expr, Expression):
234
+ raise TypeError(f"Expected Expression, got '{type(expr)}' in exprs")
235
+ if not isinstance(name, str):
236
+ raise TypeError(f"Expected str, got '{type(name)}' in names")
237
+ c_named_expr.expression = (<Expression> expr).unwrap()
238
+ c_named_expr.name = tobytes(<str> name)
239
+ c_bound_exprs.named_expressions.push_back(c_named_expr)
240
+
241
+ c_bound_exprs.schema = (<Schema> schema).sp_schema
242
+
243
+ c_conversion_options.allow_arrow_extensions = allow_arrow_extensions
244
+
245
+ with nogil:
246
+ c_res_buffer = SerializeExpressions(c_bound_exprs, c_conversion_options)
247
+ c_buffer = GetResultValue(c_res_buffer)
248
+ return pyarrow_wrap_buffer(c_buffer)
249
+
250
+
251
+ cdef class BoundExpressions(_Weakrefable):
252
+ """
253
+ A collection of named expressions and the schema they are bound to
254
+
255
+ This is equivalent to the Substrait ExtendedExpression message
256
+ """
257
+
258
+ cdef:
259
+ CBoundExpressions c_bound_exprs
260
+
261
+ def __init__(self):
262
+ msg = 'BoundExpressions is an abstract class thus cannot be initialized.'
263
+ raise TypeError(msg)
264
+
265
+ cdef void init(self, CBoundExpressions bound_expressions):
266
+ self.c_bound_exprs = bound_expressions
267
+
268
+ @property
269
+ def schema(self):
270
+ """
271
+ The common schema that all expressions are bound to
272
+ """
273
+ return pyarrow_wrap_schema(self.c_bound_exprs.schema)
274
+
275
+ @property
276
+ def expressions(self):
277
+ """
278
+ A dict from expression name to expression
279
+ """
280
+ expr_dict = {}
281
+ for named_expr in self.c_bound_exprs.named_expressions:
282
+ name = frombytes(named_expr.name)
283
+ expr = Expression.wrap(named_expr.expression)
284
+ expr_dict[name] = expr
285
+ return expr_dict
286
+
287
+ @staticmethod
288
+ cdef wrap(const CBoundExpressions& bound_expressions):
289
+ cdef BoundExpressions self = BoundExpressions.__new__(BoundExpressions)
290
+ self.init(bound_expressions)
291
+ return self
292
+
293
+
294
+ def deserialize_expressions(buf):
295
+ """
296
+ Deserialize an ExtendedExpression Substrait message into a BoundExpressions object
297
+
298
+ Parameters
299
+ ----------
300
+ buf : Buffer or bytes
301
+ The message to deserialize
302
+
303
+ Returns
304
+ -------
305
+ BoundExpressions
306
+ The deserialized expressions, their names, and the bound schema
307
+ """
308
+ cdef:
309
+ shared_ptr[CBuffer] c_buffer
310
+ CResult[CBoundExpressions] c_res_bound_exprs
311
+ CBoundExpressions c_bound_exprs
312
+
313
+ if isinstance(buf, bytes):
314
+ c_buffer = pyarrow_unwrap_buffer(py_buffer(buf))
315
+ elif isinstance(buf, Buffer):
316
+ c_buffer = pyarrow_unwrap_buffer(buf)
317
+ else:
318
+ raise TypeError(
319
+ f"Expected 'pyarrow.Buffer' or bytes, got '{type(buf)}'")
320
+
321
+ with nogil:
322
+ c_res_bound_exprs = DeserializeExpressions(deref(c_buffer))
323
+ c_bound_exprs = GetResultValue(c_res_bound_exprs)
324
+
325
+ return BoundExpressions.wrap(c_bound_exprs)
326
+
327
+
328
+ def get_supported_functions():
329
+ """
330
+ Get a list of Substrait functions that the underlying
331
+ engine currently supports.
332
+
333
+ Returns
334
+ -------
335
+ list[str]
336
+ A list of function ids encoded as '{uri}#{name}'
337
+ """
338
+
339
+ cdef:
340
+ ExtensionIdRegistry* c_id_registry
341
+ std_vector[c_string] c_ids
342
+
343
+ c_id_registry = default_extension_id_registry()
344
+ c_ids = c_id_registry.GetSupportedSubstraitFunctions()
345
+
346
+ functions_list = []
347
+ for c_id in c_ids:
348
+ functions_list.append(frombytes(c_id))
349
+ return functions_list
llmeval-env/lib/python3.10/site-packages/pyarrow/array.pxi ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/pyarrow/benchmark.pxi ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ def benchmark_PandasObjectIsNull(list obj):
20
+ Benchmark_PandasObjectIsNull(obj)
llmeval-env/lib/python3.10/site-packages/pyarrow/benchmark.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # flake8: noqa
19
+
20
+
21
+ from pyarrow.lib import benchmark_PandasObjectIsNull
llmeval-env/lib/python3.10/site-packages/pyarrow/builder.pxi ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ cdef class StringBuilder(_Weakrefable):
20
+ """
21
+ Builder class for UTF8 strings.
22
+
23
+ This class exposes facilities for incrementally adding string values and
24
+ building the null bitmap for a pyarrow.Array (type='string').
25
+ """
26
+ cdef:
27
+ unique_ptr[CStringBuilder] builder
28
+
29
+ def __cinit__(self, MemoryPool memory_pool=None):
30
+ cdef CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool)
31
+ self.builder.reset(new CStringBuilder(pool))
32
+
33
+ def append(self, value):
34
+ """
35
+ Append a single value to the builder.
36
+
37
+ The value can either be a string/bytes object or a null value
38
+ (np.nan or None).
39
+
40
+ Parameters
41
+ ----------
42
+ value : string/bytes or np.nan/None
43
+ The value to append to the string array builder.
44
+ """
45
+ if value is None or value is np.nan:
46
+ self.builder.get().AppendNull()
47
+ elif isinstance(value, (bytes, str)):
48
+ self.builder.get().Append(tobytes(value))
49
+ else:
50
+ raise TypeError('StringBuilder only accepts string objects')
51
+
52
+ def append_values(self, values):
53
+ """
54
+ Append all the values from an iterable.
55
+
56
+ Parameters
57
+ ----------
58
+ values : iterable of string/bytes or np.nan/None values
59
+ The values to append to the string array builder.
60
+ """
61
+ for value in values:
62
+ self.append(value)
63
+
64
+ def finish(self):
65
+ """
66
+ Return result of builder as an Array object; also resets the builder.
67
+
68
+ Returns
69
+ -------
70
+ array : pyarrow.Array
71
+ """
72
+ cdef shared_ptr[CArray] out
73
+ with nogil:
74
+ self.builder.get().Finish(&out)
75
+ return pyarrow_wrap_array(out)
76
+
77
+ @property
78
+ def null_count(self):
79
+ return self.builder.get().null_count()
80
+
81
+ def __len__(self):
82
+ return self.builder.get().length()
83
+
84
+
85
+ cdef class StringViewBuilder(_Weakrefable):
86
+ """
87
+ Builder class for UTF8 string views.
88
+
89
+ This class exposes facilities for incrementally adding string values and
90
+ building the null bitmap for a pyarrow.Array (type='string_view').
91
+ """
92
+ cdef:
93
+ unique_ptr[CStringViewBuilder] builder
94
+
95
+ def __cinit__(self, MemoryPool memory_pool=None):
96
+ cdef CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool)
97
+ self.builder.reset(new CStringViewBuilder(pool))
98
+
99
+ def append(self, value):
100
+ """
101
+ Append a single value to the builder.
102
+
103
+ The value can either be a string/bytes object or a null value
104
+ (np.nan or None).
105
+
106
+ Parameters
107
+ ----------
108
+ value : string/bytes or np.nan/None
109
+ The value to append to the string array builder.
110
+ """
111
+ if value is None or value is np.nan:
112
+ self.builder.get().AppendNull()
113
+ elif isinstance(value, (bytes, str)):
114
+ self.builder.get().Append(tobytes(value))
115
+ else:
116
+ raise TypeError('StringViewBuilder only accepts string objects')
117
+
118
+ def append_values(self, values):
119
+ """
120
+ Append all the values from an iterable.
121
+
122
+ Parameters
123
+ ----------
124
+ values : iterable of string/bytes or np.nan/None values
125
+ The values to append to the string array builder.
126
+ """
127
+ for value in values:
128
+ self.append(value)
129
+
130
+ def finish(self):
131
+ """
132
+ Return result of builder as an Array object; also resets the builder.
133
+
134
+ Returns
135
+ -------
136
+ array : pyarrow.Array
137
+ """
138
+ cdef shared_ptr[CArray] out
139
+ with nogil:
140
+ self.builder.get().Finish(&out)
141
+ return pyarrow_wrap_array(out)
142
+
143
+ @property
144
+ def null_count(self):
145
+ return self.builder.get().null_count()
146
+
147
+ def __len__(self):
148
+ return self.builder.get().length()
llmeval-env/lib/python3.10/site-packages/pyarrow/cffi.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from __future__ import absolute_import
19
+
20
+ import cffi
21
+
22
+ c_source = """
23
+ struct ArrowSchema {
24
+ // Array type description
25
+ const char* format;
26
+ const char* name;
27
+ const char* metadata;
28
+ int64_t flags;
29
+ int64_t n_children;
30
+ struct ArrowSchema** children;
31
+ struct ArrowSchema* dictionary;
32
+
33
+ // Release callback
34
+ void (*release)(struct ArrowSchema*);
35
+ // Opaque producer-specific data
36
+ void* private_data;
37
+ };
38
+
39
+ struct ArrowArray {
40
+ // Array data description
41
+ int64_t length;
42
+ int64_t null_count;
43
+ int64_t offset;
44
+ int64_t n_buffers;
45
+ int64_t n_children;
46
+ const void** buffers;
47
+ struct ArrowArray** children;
48
+ struct ArrowArray* dictionary;
49
+
50
+ // Release callback
51
+ void (*release)(struct ArrowArray*);
52
+ // Opaque producer-specific data
53
+ void* private_data;
54
+ };
55
+
56
+ struct ArrowArrayStream {
57
+ int (*get_schema)(struct ArrowArrayStream*, struct ArrowSchema* out);
58
+ int (*get_next)(struct ArrowArrayStream*, struct ArrowArray* out);
59
+
60
+ const char* (*get_last_error)(struct ArrowArrayStream*);
61
+
62
+ // Release callback
63
+ void (*release)(struct ArrowArrayStream*);
64
+ // Opaque producer-specific data
65
+ void* private_data;
66
+ };
67
+
68
+ typedef int32_t ArrowDeviceType;
69
+
70
+ struct ArrowDeviceArray {
71
+ struct ArrowArray array;
72
+ int64_t device_id;
73
+ ArrowDeviceType device_type;
74
+ void* sync_event;
75
+ int64_t reserved[3];
76
+ };
77
+ """
78
+
79
+ # TODO use out-of-line mode for faster import and avoid C parsing
80
+ ffi = cffi.FFI()
81
+ ffi.cdef(c_source)
llmeval-env/lib/python3.10/site-packages/pyarrow/config.pxi ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from pyarrow.includes.libarrow cimport GetBuildInfo
19
+
20
+ from collections import namedtuple
21
+ import os
22
+
23
+
24
+ VersionInfo = namedtuple('VersionInfo', ('major', 'minor', 'patch'))
25
+
26
+ BuildInfo = namedtuple(
27
+ 'BuildInfo',
28
+ ('version', 'version_info', 'so_version', 'full_so_version',
29
+ 'compiler_id', 'compiler_version', 'compiler_flags',
30
+ 'git_id', 'git_description', 'package_kind', 'build_type'))
31
+
32
+ RuntimeInfo = namedtuple('RuntimeInfo',
33
+ ('simd_level', 'detected_simd_level'))
34
+
35
+ cdef _build_info():
36
+ cdef:
37
+ const CBuildInfo* c_info
38
+
39
+ c_info = &GetBuildInfo()
40
+
41
+ return BuildInfo(version=frombytes(c_info.version_string),
42
+ version_info=VersionInfo(c_info.version_major,
43
+ c_info.version_minor,
44
+ c_info.version_patch),
45
+ so_version=frombytes(c_info.so_version),
46
+ full_so_version=frombytes(c_info.full_so_version),
47
+ compiler_id=frombytes(c_info.compiler_id),
48
+ compiler_version=frombytes(c_info.compiler_version),
49
+ compiler_flags=frombytes(c_info.compiler_flags),
50
+ git_id=frombytes(c_info.git_id),
51
+ git_description=frombytes(c_info.git_description),
52
+ package_kind=frombytes(c_info.package_kind),
53
+ build_type=frombytes(c_info.build_type).lower(),
54
+ )
55
+
56
+
57
+ cpp_build_info = _build_info()
58
+ cpp_version = cpp_build_info.version
59
+ cpp_version_info = cpp_build_info.version_info
60
+
61
+
62
+ def runtime_info():
63
+ """
64
+ Get runtime information.
65
+
66
+ Returns
67
+ -------
68
+ info : pyarrow.RuntimeInfo
69
+ """
70
+ cdef:
71
+ CRuntimeInfo c_info
72
+
73
+ c_info = GetRuntimeInfo()
74
+
75
+ return RuntimeInfo(
76
+ simd_level=frombytes(c_info.simd_level),
77
+ detected_simd_level=frombytes(c_info.detected_simd_level))
78
+
79
+
80
+ def set_timezone_db_path(path):
81
+ """
82
+ Configure the path to text timezone database on Windows.
83
+
84
+ Parameters
85
+ ----------
86
+ path : str
87
+ Path to text timezone database.
88
+ """
89
+ cdef:
90
+ CGlobalOptions options
91
+
92
+ if path is not None:
93
+ options.timezone_db_path = <c_string>tobytes(path)
94
+
95
+ check_status(Initialize(options))