Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/_multiprocess/__init__.py +8 -0
- env-llmeval/lib/python3.10/site-packages/_multiprocess/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/click-8.1.7.dist-info/INSTALLER +1 -0
- env-llmeval/lib/python3.10/site-packages/click-8.1.7.dist-info/LICENSE.rst +28 -0
- env-llmeval/lib/python3.10/site-packages/click-8.1.7.dist-info/METADATA +103 -0
- env-llmeval/lib/python3.10/site-packages/click-8.1.7.dist-info/RECORD +39 -0
- env-llmeval/lib/python3.10/site-packages/click-8.1.7.dist-info/WHEEL +5 -0
- env-llmeval/lib/python3.10/site-packages/click-8.1.7.dist-info/top_level.txt +1 -0
- env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/convert.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/datasets_cli.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/env.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/run_beam.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/test.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/features/__init__.py +20 -0
- env-llmeval/lib/python3.10/site-packages/datasets/features/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/features/__pycache__/audio.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/features/__pycache__/features.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/features/__pycache__/image.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/features/__pycache__/translation.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/features/audio.py +277 -0
- env-llmeval/lib/python3.10/site-packages/datasets/features/features.py +2167 -0
- env-llmeval/lib/python3.10/site-packages/datasets/features/image.py +376 -0
- env-llmeval/lib/python3.10/site-packages/datasets/features/translation.py +129 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/_datasets_server.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/_dill.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/_filelock.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/beam_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/deprecation_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/doc_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/download_manager.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/experimental.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/extract.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/file_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/filelock.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/hub.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/info_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/logging.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/metadata.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/patching.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/py_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/readme.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/sharding.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/stratify.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/tf_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/tqdm.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/track.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/typing.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/version.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/_multiprocess/__init__.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
#
|
3 |
+
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
|
4 |
+
# Copyright (c) 2022-2024 The Uncertainty Quantification Foundation.
|
5 |
+
# License: 3-clause BSD. The full license text is available at:
|
6 |
+
# - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE
|
7 |
+
|
8 |
+
from _multiprocessing import *
|
env-llmeval/lib/python3.10/site-packages/_multiprocess/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (211 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/click-8.1.7.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip
|
env-llmeval/lib/python3.10/site-packages/click-8.1.7.dist-info/LICENSE.rst
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright 2014 Pallets
|
2 |
+
|
3 |
+
Redistribution and use in source and binary forms, with or without
|
4 |
+
modification, are permitted provided that the following conditions are
|
5 |
+
met:
|
6 |
+
|
7 |
+
1. Redistributions of source code must retain the above copyright
|
8 |
+
notice, this list of conditions and the following disclaimer.
|
9 |
+
|
10 |
+
2. Redistributions in binary form must reproduce the above copyright
|
11 |
+
notice, this list of conditions and the following disclaimer in the
|
12 |
+
documentation and/or other materials provided with the distribution.
|
13 |
+
|
14 |
+
3. Neither the name of the copyright holder nor the names of its
|
15 |
+
contributors may be used to endorse or promote products derived from
|
16 |
+
this software without specific prior written permission.
|
17 |
+
|
18 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
19 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
20 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
21 |
+
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
22 |
+
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
23 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
24 |
+
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
25 |
+
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
26 |
+
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
27 |
+
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
28 |
+
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
env-llmeval/lib/python3.10/site-packages/click-8.1.7.dist-info/METADATA
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: click
|
3 |
+
Version: 8.1.7
|
4 |
+
Summary: Composable command line interface toolkit
|
5 |
+
Home-page: https://palletsprojects.com/p/click/
|
6 |
+
Maintainer: Pallets
|
7 |
+
Maintainer-email: [email protected]
|
8 |
+
License: BSD-3-Clause
|
9 |
+
Project-URL: Donate, https://palletsprojects.com/donate
|
10 |
+
Project-URL: Documentation, https://click.palletsprojects.com/
|
11 |
+
Project-URL: Changes, https://click.palletsprojects.com/changes/
|
12 |
+
Project-URL: Source Code, https://github.com/pallets/click/
|
13 |
+
Project-URL: Issue Tracker, https://github.com/pallets/click/issues/
|
14 |
+
Project-URL: Chat, https://discord.gg/pallets
|
15 |
+
Classifier: Development Status :: 5 - Production/Stable
|
16 |
+
Classifier: Intended Audience :: Developers
|
17 |
+
Classifier: License :: OSI Approved :: BSD License
|
18 |
+
Classifier: Operating System :: OS Independent
|
19 |
+
Classifier: Programming Language :: Python
|
20 |
+
Requires-Python: >=3.7
|
21 |
+
Description-Content-Type: text/x-rst
|
22 |
+
License-File: LICENSE.rst
|
23 |
+
Requires-Dist: colorama ; platform_system == "Windows"
|
24 |
+
Requires-Dist: importlib-metadata ; python_version < "3.8"
|
25 |
+
|
26 |
+
\$ click\_
|
27 |
+
==========
|
28 |
+
|
29 |
+
Click is a Python package for creating beautiful command line interfaces
|
30 |
+
in a composable way with as little code as necessary. It's the "Command
|
31 |
+
Line Interface Creation Kit". It's highly configurable but comes with
|
32 |
+
sensible defaults out of the box.
|
33 |
+
|
34 |
+
It aims to make the process of writing command line tools quick and fun
|
35 |
+
while also preventing any frustration caused by the inability to
|
36 |
+
implement an intended CLI API.
|
37 |
+
|
38 |
+
Click in three points:
|
39 |
+
|
40 |
+
- Arbitrary nesting of commands
|
41 |
+
- Automatic help page generation
|
42 |
+
- Supports lazy loading of subcommands at runtime
|
43 |
+
|
44 |
+
|
45 |
+
Installing
|
46 |
+
----------
|
47 |
+
|
48 |
+
Install and update using `pip`_:
|
49 |
+
|
50 |
+
.. code-block:: text
|
51 |
+
|
52 |
+
$ pip install -U click
|
53 |
+
|
54 |
+
.. _pip: https://pip.pypa.io/en/stable/getting-started/
|
55 |
+
|
56 |
+
|
57 |
+
A Simple Example
|
58 |
+
----------------
|
59 |
+
|
60 |
+
.. code-block:: python
|
61 |
+
|
62 |
+
import click
|
63 |
+
|
64 |
+
@click.command()
|
65 |
+
@click.option("--count", default=1, help="Number of greetings.")
|
66 |
+
@click.option("--name", prompt="Your name", help="The person to greet.")
|
67 |
+
def hello(count, name):
|
68 |
+
"""Simple program that greets NAME for a total of COUNT times."""
|
69 |
+
for _ in range(count):
|
70 |
+
click.echo(f"Hello, {name}!")
|
71 |
+
|
72 |
+
if __name__ == '__main__':
|
73 |
+
hello()
|
74 |
+
|
75 |
+
.. code-block:: text
|
76 |
+
|
77 |
+
$ python hello.py --count=3
|
78 |
+
Your name: Click
|
79 |
+
Hello, Click!
|
80 |
+
Hello, Click!
|
81 |
+
Hello, Click!
|
82 |
+
|
83 |
+
|
84 |
+
Donate
|
85 |
+
------
|
86 |
+
|
87 |
+
The Pallets organization develops and supports Click and other popular
|
88 |
+
packages. In order to grow the community of contributors and users, and
|
89 |
+
allow the maintainers to devote more time to the projects, `please
|
90 |
+
donate today`_.
|
91 |
+
|
92 |
+
.. _please donate today: https://palletsprojects.com/donate
|
93 |
+
|
94 |
+
|
95 |
+
Links
|
96 |
+
-----
|
97 |
+
|
98 |
+
- Documentation: https://click.palletsprojects.com/
|
99 |
+
- Changes: https://click.palletsprojects.com/changes/
|
100 |
+
- PyPI Releases: https://pypi.org/project/click/
|
101 |
+
- Source Code: https://github.com/pallets/click
|
102 |
+
- Issue Tracker: https://github.com/pallets/click/issues
|
103 |
+
- Chat: https://discord.gg/pallets
|
env-llmeval/lib/python3.10/site-packages/click-8.1.7.dist-info/RECORD
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
click-8.1.7.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
2 |
+
click-8.1.7.dist-info/LICENSE.rst,sha256=morRBqOU6FO_4h9C9OctWSgZoigF2ZG18ydQKSkrZY0,1475
|
3 |
+
click-8.1.7.dist-info/METADATA,sha256=qIMevCxGA9yEmJOM_4WHuUJCwWpsIEVbCPOhs45YPN4,3014
|
4 |
+
click-8.1.7.dist-info/RECORD,,
|
5 |
+
click-8.1.7.dist-info/WHEEL,sha256=5sUXSg9e4bi7lTLOHcm6QEYwO5TIF1TNbTSVFVjcJcc,92
|
6 |
+
click-8.1.7.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6
|
7 |
+
click/__init__.py,sha256=YDDbjm406dTOA0V8bTtdGnhN7zj5j-_dFRewZF_pLvw,3138
|
8 |
+
click/__pycache__/__init__.cpython-310.pyc,,
|
9 |
+
click/__pycache__/_compat.cpython-310.pyc,,
|
10 |
+
click/__pycache__/_termui_impl.cpython-310.pyc,,
|
11 |
+
click/__pycache__/_textwrap.cpython-310.pyc,,
|
12 |
+
click/__pycache__/_winconsole.cpython-310.pyc,,
|
13 |
+
click/__pycache__/core.cpython-310.pyc,,
|
14 |
+
click/__pycache__/decorators.cpython-310.pyc,,
|
15 |
+
click/__pycache__/exceptions.cpython-310.pyc,,
|
16 |
+
click/__pycache__/formatting.cpython-310.pyc,,
|
17 |
+
click/__pycache__/globals.cpython-310.pyc,,
|
18 |
+
click/__pycache__/parser.cpython-310.pyc,,
|
19 |
+
click/__pycache__/shell_completion.cpython-310.pyc,,
|
20 |
+
click/__pycache__/termui.cpython-310.pyc,,
|
21 |
+
click/__pycache__/testing.cpython-310.pyc,,
|
22 |
+
click/__pycache__/types.cpython-310.pyc,,
|
23 |
+
click/__pycache__/utils.cpython-310.pyc,,
|
24 |
+
click/_compat.py,sha256=5318agQpbt4kroKsbqDOYpTSWzL_YCZVUQiTT04yXmc,18744
|
25 |
+
click/_termui_impl.py,sha256=3dFYv4445Nw-rFvZOTBMBPYwB1bxnmNk9Du6Dm_oBSU,24069
|
26 |
+
click/_textwrap.py,sha256=10fQ64OcBUMuK7mFvh8363_uoOxPlRItZBmKzRJDgoY,1353
|
27 |
+
click/_winconsole.py,sha256=5ju3jQkcZD0W27WEMGqmEP4y_crUVzPCqsX_FYb7BO0,7860
|
28 |
+
click/core.py,sha256=j6oEWtGgGna8JarD6WxhXmNnxLnfRjwXglbBc-8jr7U,114086
|
29 |
+
click/decorators.py,sha256=-ZlbGYgV-oI8jr_oH4RpuL1PFS-5QmeuEAsLDAYgxtw,18719
|
30 |
+
click/exceptions.py,sha256=fyROO-47HWFDjt2qupo7A3J32VlpM-ovJnfowu92K3s,9273
|
31 |
+
click/formatting.py,sha256=Frf0-5W33-loyY_i9qrwXR8-STnW3m5gvyxLVUdyxyk,9706
|
32 |
+
click/globals.py,sha256=TP-qM88STzc7f127h35TD_v920FgfOD2EwzqA0oE8XU,1961
|
33 |
+
click/parser.py,sha256=LKyYQE9ZLj5KgIDXkrcTHQRXIggfoivX14_UVIn56YA,19067
|
34 |
+
click/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
35 |
+
click/shell_completion.py,sha256=Ty3VM_ts0sQhj6u7eFTiLwHPoTgcXTGEAUg2OpLqYKw,18460
|
36 |
+
click/termui.py,sha256=H7Q8FpmPelhJ2ovOhfCRhjMtCpNyjFXryAMLZODqsdc,28324
|
37 |
+
click/testing.py,sha256=1Qd4kS5bucn1hsNIRryd0WtTMuCpkA93grkWxT8POsU,16084
|
38 |
+
click/types.py,sha256=TZvz3hKvBztf-Hpa2enOmP4eznSPLzijjig5b_0XMxE,36391
|
39 |
+
click/utils.py,sha256=1476UduUNY6UePGU4m18uzVHLt1sKM2PP3yWsQhbItM,20298
|
env-llmeval/lib/python3.10/site-packages/click-8.1.7.dist-info/WHEEL
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: bdist_wheel (0.41.1)
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py3-none-any
|
5 |
+
|
env-llmeval/lib/python3.10/site-packages/click-8.1.7.dist-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
click
|
env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (809 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/convert.cpython-310.pyc
ADDED
Binary file (6.07 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/datasets_cli.cpython-310.pyc
ADDED
Binary file (1.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/env.cpython-310.pyc
ADDED
Binary file (1.86 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/run_beam.cpython-310.pyc
ADDED
Binary file (4.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/test.cpython-310.pyc
ADDED
Binary file (5.62 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/features/__init__.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ruff: noqa
|
2 |
+
|
3 |
+
__all__ = [
|
4 |
+
"Audio",
|
5 |
+
"Array2D",
|
6 |
+
"Array3D",
|
7 |
+
"Array4D",
|
8 |
+
"Array5D",
|
9 |
+
"ClassLabel",
|
10 |
+
"Features",
|
11 |
+
"Sequence",
|
12 |
+
"Value",
|
13 |
+
"Image",
|
14 |
+
"Translation",
|
15 |
+
"TranslationVariableLanguages",
|
16 |
+
]
|
17 |
+
from .audio import Audio
|
18 |
+
from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Value
|
19 |
+
from .image import Image
|
20 |
+
from .translation import Translation, TranslationVariableLanguages
|
env-llmeval/lib/python3.10/site-packages/datasets/features/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (588 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/features/__pycache__/audio.cpython-310.pyc
ADDED
Binary file (10.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/features/__pycache__/features.cpython-310.pyc
ADDED
Binary file (75.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/features/__pycache__/image.cpython-310.pyc
ADDED
Binary file (12.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/features/__pycache__/translation.cpython-310.pyc
ADDED
Binary file (5.19 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/features/audio.py
ADDED
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dataclasses import dataclass, field
|
3 |
+
from io import BytesIO
|
4 |
+
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import pyarrow as pa
|
8 |
+
|
9 |
+
from .. import config
|
10 |
+
from ..download.download_config import DownloadConfig
|
11 |
+
from ..download.streaming_download_manager import xopen, xsplitext
|
12 |
+
from ..table import array_cast
|
13 |
+
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
|
14 |
+
|
15 |
+
|
16 |
+
if TYPE_CHECKING:
|
17 |
+
from .features import FeatureType
|
18 |
+
|
19 |
+
|
20 |
+
@dataclass
|
21 |
+
class Audio:
|
22 |
+
"""Audio [`Feature`] to extract audio data from an audio file.
|
23 |
+
|
24 |
+
Input: The Audio feature accepts as input:
|
25 |
+
- A `str`: Absolute path to the audio file (i.e. random access is allowed).
|
26 |
+
- A `dict` with the keys:
|
27 |
+
|
28 |
+
- `path`: String with relative path of the audio file to the archive file.
|
29 |
+
- `bytes`: Bytes content of the audio file.
|
30 |
+
|
31 |
+
This is useful for archived files with sequential access.
|
32 |
+
|
33 |
+
- A `dict` with the keys:
|
34 |
+
|
35 |
+
- `path`: String with relative path of the audio file to the archive file.
|
36 |
+
- `array`: Array containing the audio sample
|
37 |
+
- `sampling_rate`: Integer corresponding to the sampling rate of the audio sample.
|
38 |
+
|
39 |
+
This is useful for archived files with sequential access.
|
40 |
+
|
41 |
+
Args:
|
42 |
+
sampling_rate (`int`, *optional*):
|
43 |
+
Target sampling rate. If `None`, the native sampling rate is used.
|
44 |
+
mono (`bool`, defaults to `True`):
|
45 |
+
Whether to convert the audio signal to mono by averaging samples across
|
46 |
+
channels.
|
47 |
+
decode (`bool`, defaults to `True`):
|
48 |
+
Whether to decode the audio data. If `False`,
|
49 |
+
returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`.
|
50 |
+
|
51 |
+
Example:
|
52 |
+
|
53 |
+
```py
|
54 |
+
>>> from datasets import load_dataset, Audio
|
55 |
+
>>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train")
|
56 |
+
>>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
|
57 |
+
>>> ds[0]["audio"]
|
58 |
+
{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
|
59 |
+
3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
|
60 |
+
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
|
61 |
+
'sampling_rate': 16000}
|
62 |
+
```
|
63 |
+
"""
|
64 |
+
|
65 |
+
sampling_rate: Optional[int] = None
|
66 |
+
mono: bool = True
|
67 |
+
decode: bool = True
|
68 |
+
id: Optional[str] = None
|
69 |
+
# Automatically constructed
|
70 |
+
dtype: ClassVar[str] = "dict"
|
71 |
+
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
|
72 |
+
_type: str = field(default="Audio", init=False, repr=False)
|
73 |
+
|
74 |
+
def __call__(self):
|
75 |
+
return self.pa_type
|
76 |
+
|
77 |
+
def encode_example(self, value: Union[str, bytes, dict]) -> dict:
|
78 |
+
"""Encode example into a format for Arrow.
|
79 |
+
|
80 |
+
Args:
|
81 |
+
value (`str` or `dict`):
|
82 |
+
Data passed as input to Audio feature.
|
83 |
+
|
84 |
+
Returns:
|
85 |
+
`dict`
|
86 |
+
"""
|
87 |
+
try:
|
88 |
+
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
|
89 |
+
except ImportError as err:
|
90 |
+
raise ImportError("To support encoding audio data, please install 'soundfile'.") from err
|
91 |
+
if isinstance(value, str):
|
92 |
+
return {"bytes": None, "path": value}
|
93 |
+
elif isinstance(value, bytes):
|
94 |
+
return {"bytes": value, "path": None}
|
95 |
+
elif "array" in value:
|
96 |
+
# convert the audio array to wav bytes
|
97 |
+
buffer = BytesIO()
|
98 |
+
sf.write(buffer, value["array"], value["sampling_rate"], format="wav")
|
99 |
+
return {"bytes": buffer.getvalue(), "path": None}
|
100 |
+
elif value.get("path") is not None and os.path.isfile(value["path"]):
|
101 |
+
# we set "bytes": None to not duplicate the data if they're already available locally
|
102 |
+
if value["path"].endswith("pcm"):
|
103 |
+
# "PCM" only has raw audio bytes
|
104 |
+
if value.get("sampling_rate") is None:
|
105 |
+
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
|
106 |
+
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object")
|
107 |
+
if value.get("bytes"):
|
108 |
+
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
|
109 |
+
bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767
|
110 |
+
else:
|
111 |
+
bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767
|
112 |
+
|
113 |
+
buffer = BytesIO(bytes())
|
114 |
+
sf.write(buffer, bytes_value, value["sampling_rate"], format="wav")
|
115 |
+
return {"bytes": buffer.getvalue(), "path": None}
|
116 |
+
else:
|
117 |
+
return {"bytes": None, "path": value.get("path")}
|
118 |
+
elif value.get("bytes") is not None or value.get("path") is not None:
|
119 |
+
# store the audio bytes, and path is used to infer the audio format using the file extension
|
120 |
+
return {"bytes": value.get("bytes"), "path": value.get("path")}
|
121 |
+
else:
|
122 |
+
raise ValueError(
|
123 |
+
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
|
124 |
+
)
|
125 |
+
|
126 |
+
def decode_example(
|
127 |
+
self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
|
128 |
+
) -> dict:
|
129 |
+
"""Decode example audio file into audio data.
|
130 |
+
|
131 |
+
Args:
|
132 |
+
value (`dict`):
|
133 |
+
A dictionary with keys:
|
134 |
+
|
135 |
+
- `path`: String with relative audio file path.
|
136 |
+
- `bytes`: Bytes of the audio file.
|
137 |
+
token_per_repo_id (`dict`, *optional*):
|
138 |
+
To access and decode
|
139 |
+
audio files from private repositories on the Hub, you can pass
|
140 |
+
a dictionary repo_id (`str`) -> token (`bool` or `str`)
|
141 |
+
|
142 |
+
Returns:
|
143 |
+
`dict`
|
144 |
+
"""
|
145 |
+
if not self.decode:
|
146 |
+
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
|
147 |
+
|
148 |
+
path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
|
149 |
+
if path is None and file is None:
|
150 |
+
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
|
151 |
+
|
152 |
+
try:
|
153 |
+
import librosa
|
154 |
+
import soundfile as sf
|
155 |
+
except ImportError as err:
|
156 |
+
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
|
157 |
+
|
158 |
+
audio_format = xsplitext(path)[1][1:].lower() if path is not None else None
|
159 |
+
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
|
160 |
+
raise RuntimeError(
|
161 |
+
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
|
162 |
+
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
|
163 |
+
)
|
164 |
+
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
|
165 |
+
raise RuntimeError(
|
166 |
+
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
|
167 |
+
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
|
168 |
+
)
|
169 |
+
|
170 |
+
if file is None:
|
171 |
+
token_per_repo_id = token_per_repo_id or {}
|
172 |
+
source_url = path.split("::")[-1]
|
173 |
+
pattern = (
|
174 |
+
config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL
|
175 |
+
)
|
176 |
+
try:
|
177 |
+
repo_id = string_to_dict(source_url, pattern)["repo_id"]
|
178 |
+
token = token_per_repo_id[repo_id]
|
179 |
+
except (ValueError, KeyError):
|
180 |
+
token = None
|
181 |
+
|
182 |
+
download_config = DownloadConfig(token=token)
|
183 |
+
with xopen(path, "rb", download_config=download_config) as f:
|
184 |
+
array, sampling_rate = sf.read(f)
|
185 |
+
|
186 |
+
else:
|
187 |
+
array, sampling_rate = sf.read(file)
|
188 |
+
|
189 |
+
array = array.T
|
190 |
+
if self.mono:
|
191 |
+
array = librosa.to_mono(array)
|
192 |
+
if self.sampling_rate and self.sampling_rate != sampling_rate:
|
193 |
+
array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate)
|
194 |
+
sampling_rate = self.sampling_rate
|
195 |
+
|
196 |
+
return {"path": path, "array": array, "sampling_rate": sampling_rate}
|
197 |
+
|
198 |
+
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
|
199 |
+
"""If in the decodable state, raise an error, otherwise flatten the feature into a dictionary."""
|
200 |
+
from .features import Value
|
201 |
+
|
202 |
+
if self.decode:
|
203 |
+
raise ValueError("Cannot flatten a decoded Audio feature.")
|
204 |
+
return {
|
205 |
+
"bytes": Value("binary"),
|
206 |
+
"path": Value("string"),
|
207 |
+
}
|
208 |
+
|
209 |
+
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
|
210 |
+
"""Cast an Arrow array to the Audio arrow storage type.
|
211 |
+
The Arrow types that can be converted to the Audio pyarrow storage type are:
|
212 |
+
|
213 |
+
- `pa.string()` - it must contain the "path" data
|
214 |
+
- `pa.binary()` - it must contain the audio bytes
|
215 |
+
- `pa.struct({"bytes": pa.binary()})`
|
216 |
+
- `pa.struct({"path": pa.string()})`
|
217 |
+
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
|
218 |
+
|
219 |
+
Args:
|
220 |
+
storage (`Union[pa.StringArray, pa.StructArray]`):
|
221 |
+
PyArrow array to cast.
|
222 |
+
|
223 |
+
Returns:
|
224 |
+
`pa.StructArray`: Array in the Audio arrow storage type, that is
|
225 |
+
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`
|
226 |
+
"""
|
227 |
+
if pa.types.is_string(storage.type):
|
228 |
+
bytes_array = pa.array([None] * len(storage), type=pa.binary())
|
229 |
+
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
|
230 |
+
elif pa.types.is_binary(storage.type):
|
231 |
+
path_array = pa.array([None] * len(storage), type=pa.string())
|
232 |
+
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
|
233 |
+
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
|
234 |
+
storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()])
|
235 |
+
elif pa.types.is_struct(storage.type):
|
236 |
+
if storage.type.get_field_index("bytes") >= 0:
|
237 |
+
bytes_array = storage.field("bytes")
|
238 |
+
else:
|
239 |
+
bytes_array = pa.array([None] * len(storage), type=pa.binary())
|
240 |
+
if storage.type.get_field_index("path") >= 0:
|
241 |
+
path_array = storage.field("path")
|
242 |
+
else:
|
243 |
+
path_array = pa.array([None] * len(storage), type=pa.string())
|
244 |
+
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
|
245 |
+
return array_cast(storage, self.pa_type)
|
246 |
+
|
247 |
+
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
|
248 |
+
"""Embed audio files into the Arrow array.
|
249 |
+
|
250 |
+
Args:
|
251 |
+
storage (`pa.StructArray`):
|
252 |
+
PyArrow array to embed.
|
253 |
+
|
254 |
+
Returns:
|
255 |
+
`pa.StructArray`: Array in the Audio arrow storage type, that is
|
256 |
+
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
|
257 |
+
"""
|
258 |
+
|
259 |
+
@no_op_if_value_is_null
|
260 |
+
def path_to_bytes(path):
|
261 |
+
with xopen(path, "rb") as f:
|
262 |
+
bytes_ = f.read()
|
263 |
+
return bytes_
|
264 |
+
|
265 |
+
bytes_array = pa.array(
|
266 |
+
[
|
267 |
+
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
|
268 |
+
for x in storage.to_pylist()
|
269 |
+
],
|
270 |
+
type=pa.binary(),
|
271 |
+
)
|
272 |
+
path_array = pa.array(
|
273 |
+
[os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
|
274 |
+
type=pa.string(),
|
275 |
+
)
|
276 |
+
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
|
277 |
+
return array_cast(storage, self.pa_type)
|
env-llmeval/lib/python3.10/site-packages/datasets/features/features.py
ADDED
@@ -0,0 +1,2167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
# Lint as: python3
|
16 |
+
"""This class handle features definition in datasets and some utilities to display table type."""
|
17 |
+
|
18 |
+
import copy
|
19 |
+
import json
|
20 |
+
import re
|
21 |
+
import sys
|
22 |
+
from collections.abc import Iterable, Mapping
|
23 |
+
from collections.abc import Sequence as SequenceABC
|
24 |
+
from dataclasses import InitVar, dataclass, field, fields
|
25 |
+
from functools import reduce, wraps
|
26 |
+
from operator import mul
|
27 |
+
from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
|
28 |
+
from typing import Sequence as Sequence_
|
29 |
+
|
30 |
+
import numpy as np
|
31 |
+
import pandas as pd
|
32 |
+
import pyarrow as pa
|
33 |
+
import pyarrow.compute as pc
|
34 |
+
import pyarrow.types
|
35 |
+
import pyarrow_hotfix # noqa: F401 # to fix vulnerability on pyarrow<14.0.1
|
36 |
+
from pandas.api.extensions import ExtensionArray as PandasExtensionArray
|
37 |
+
from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype
|
38 |
+
|
39 |
+
from .. import config
|
40 |
+
from ..naming import camelcase_to_snakecase, snakecase_to_camelcase
|
41 |
+
from ..table import array_cast
|
42 |
+
from ..utils import logging
|
43 |
+
from ..utils.py_utils import asdict, first_non_null_value, zip_dict
|
44 |
+
from .audio import Audio
|
45 |
+
from .image import Image, encode_pil_image
|
46 |
+
from .translation import Translation, TranslationVariableLanguages
|
47 |
+
|
48 |
+
|
49 |
+
logger = logging.get_logger(__name__)
|
50 |
+
|
51 |
+
|
52 |
+
def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str:
|
53 |
+
"""
|
54 |
+
_arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype.
|
55 |
+
In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
|
56 |
+
"""
|
57 |
+
if pyarrow.types.is_null(arrow_type):
|
58 |
+
return "null"
|
59 |
+
elif pyarrow.types.is_boolean(arrow_type):
|
60 |
+
return "bool"
|
61 |
+
elif pyarrow.types.is_int8(arrow_type):
|
62 |
+
return "int8"
|
63 |
+
elif pyarrow.types.is_int16(arrow_type):
|
64 |
+
return "int16"
|
65 |
+
elif pyarrow.types.is_int32(arrow_type):
|
66 |
+
return "int32"
|
67 |
+
elif pyarrow.types.is_int64(arrow_type):
|
68 |
+
return "int64"
|
69 |
+
elif pyarrow.types.is_uint8(arrow_type):
|
70 |
+
return "uint8"
|
71 |
+
elif pyarrow.types.is_uint16(arrow_type):
|
72 |
+
return "uint16"
|
73 |
+
elif pyarrow.types.is_uint32(arrow_type):
|
74 |
+
return "uint32"
|
75 |
+
elif pyarrow.types.is_uint64(arrow_type):
|
76 |
+
return "uint64"
|
77 |
+
elif pyarrow.types.is_float16(arrow_type):
|
78 |
+
return "float16" # pyarrow dtype is "halffloat"
|
79 |
+
elif pyarrow.types.is_float32(arrow_type):
|
80 |
+
return "float32" # pyarrow dtype is "float"
|
81 |
+
elif pyarrow.types.is_float64(arrow_type):
|
82 |
+
return "float64" # pyarrow dtype is "double"
|
83 |
+
elif pyarrow.types.is_time32(arrow_type):
|
84 |
+
return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]"
|
85 |
+
elif pyarrow.types.is_time64(arrow_type):
|
86 |
+
return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]"
|
87 |
+
elif pyarrow.types.is_timestamp(arrow_type):
|
88 |
+
if arrow_type.tz is None:
|
89 |
+
return f"timestamp[{arrow_type.unit}]"
|
90 |
+
elif arrow_type.tz:
|
91 |
+
return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]"
|
92 |
+
else:
|
93 |
+
raise ValueError(f"Unexpected timestamp object {arrow_type}.")
|
94 |
+
elif pyarrow.types.is_date32(arrow_type):
|
95 |
+
return "date32" # pyarrow dtype is "date32[day]"
|
96 |
+
elif pyarrow.types.is_date64(arrow_type):
|
97 |
+
return "date64" # pyarrow dtype is "date64[ms]"
|
98 |
+
elif pyarrow.types.is_duration(arrow_type):
|
99 |
+
return f"duration[{arrow_type.unit}]"
|
100 |
+
elif pyarrow.types.is_decimal128(arrow_type):
|
101 |
+
return f"decimal128({arrow_type.precision}, {arrow_type.scale})"
|
102 |
+
elif pyarrow.types.is_decimal256(arrow_type):
|
103 |
+
return f"decimal256({arrow_type.precision}, {arrow_type.scale})"
|
104 |
+
elif pyarrow.types.is_binary(arrow_type):
|
105 |
+
return "binary"
|
106 |
+
elif pyarrow.types.is_large_binary(arrow_type):
|
107 |
+
return "large_binary"
|
108 |
+
elif pyarrow.types.is_string(arrow_type):
|
109 |
+
return "string"
|
110 |
+
elif pyarrow.types.is_large_string(arrow_type):
|
111 |
+
return "large_string"
|
112 |
+
else:
|
113 |
+
raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.")
|
114 |
+
|
115 |
+
|
116 |
+
def string_to_arrow(datasets_dtype: str) -> pa.DataType:
|
117 |
+
"""
|
118 |
+
string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType.
|
119 |
+
|
120 |
+
In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
|
121 |
+
|
122 |
+
This is necessary because the datasets.Value() primitive type is constructed using a string dtype
|
123 |
+
|
124 |
+
Value(dtype=str)
|
125 |
+
|
126 |
+
But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema,
|
127 |
+
which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the
|
128 |
+
purpose of this function.
|
129 |
+
"""
|
130 |
+
|
131 |
+
def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None):
|
132 |
+
msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type."
|
133 |
+
if examples:
|
134 |
+
examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0]
|
135 |
+
msg += f"\nValid examples include: {examples}."
|
136 |
+
if urls:
|
137 |
+
urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0]
|
138 |
+
msg += f"\nFor more insformation, see: {urls}."
|
139 |
+
return msg
|
140 |
+
|
141 |
+
if datasets_dtype in pa.__dict__:
|
142 |
+
return pa.__dict__[datasets_dtype]()
|
143 |
+
|
144 |
+
if (datasets_dtype + "_") in pa.__dict__:
|
145 |
+
return pa.__dict__[datasets_dtype + "_"]()
|
146 |
+
|
147 |
+
timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype)
|
148 |
+
if timestamp_matches:
|
149 |
+
timestamp_internals = timestamp_matches.group(1)
|
150 |
+
internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals)
|
151 |
+
if timestamp_internals in ["s", "ms", "us", "ns"]:
|
152 |
+
return pa.timestamp(timestamp_internals)
|
153 |
+
elif internals_matches:
|
154 |
+
return pa.timestamp(internals_matches.group(1), internals_matches.group(2))
|
155 |
+
else:
|
156 |
+
raise ValueError(
|
157 |
+
_dtype_error_msg(
|
158 |
+
datasets_dtype,
|
159 |
+
"timestamp",
|
160 |
+
examples=["timestamp[us]", "timestamp[us, tz=America/New_York"],
|
161 |
+
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"],
|
162 |
+
)
|
163 |
+
)
|
164 |
+
|
165 |
+
duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype)
|
166 |
+
if duration_matches:
|
167 |
+
duration_internals = duration_matches.group(1)
|
168 |
+
if duration_internals in ["s", "ms", "us", "ns"]:
|
169 |
+
return pa.duration(duration_internals)
|
170 |
+
else:
|
171 |
+
raise ValueError(
|
172 |
+
_dtype_error_msg(
|
173 |
+
datasets_dtype,
|
174 |
+
"duration",
|
175 |
+
examples=["duration[s]", "duration[us]"],
|
176 |
+
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"],
|
177 |
+
)
|
178 |
+
)
|
179 |
+
|
180 |
+
time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype)
|
181 |
+
if time_matches:
|
182 |
+
time_internals_bits = time_matches.group(1)
|
183 |
+
if time_internals_bits == "32":
|
184 |
+
time_internals_unit = time_matches.group(2)
|
185 |
+
if time_internals_unit in ["s", "ms"]:
|
186 |
+
return pa.time32(time_internals_unit)
|
187 |
+
else:
|
188 |
+
raise ValueError(
|
189 |
+
f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)."
|
190 |
+
)
|
191 |
+
elif time_internals_bits == "64":
|
192 |
+
time_internals_unit = time_matches.group(2)
|
193 |
+
if time_internals_unit in ["us", "ns"]:
|
194 |
+
return pa.time64(time_internals_unit)
|
195 |
+
else:
|
196 |
+
raise ValueError(
|
197 |
+
f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)."
|
198 |
+
)
|
199 |
+
else:
|
200 |
+
raise ValueError(
|
201 |
+
_dtype_error_msg(
|
202 |
+
datasets_dtype,
|
203 |
+
"time",
|
204 |
+
examples=["time32[s]", "time64[us]"],
|
205 |
+
urls=[
|
206 |
+
"https://arrow.apache.org/docs/python/generated/pyarrow.time32.html",
|
207 |
+
"https://arrow.apache.org/docs/python/generated/pyarrow.time64.html",
|
208 |
+
],
|
209 |
+
)
|
210 |
+
)
|
211 |
+
|
212 |
+
decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype)
|
213 |
+
if decimal_matches:
|
214 |
+
decimal_internals_bits = decimal_matches.group(1)
|
215 |
+
if decimal_internals_bits == "128":
|
216 |
+
decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
|
217 |
+
if decimal_internals_precision_and_scale:
|
218 |
+
precision = decimal_internals_precision_and_scale.group(1)
|
219 |
+
scale = decimal_internals_precision_and_scale.group(2)
|
220 |
+
return pa.decimal128(int(precision), int(scale))
|
221 |
+
else:
|
222 |
+
raise ValueError(
|
223 |
+
_dtype_error_msg(
|
224 |
+
datasets_dtype,
|
225 |
+
"decimal128",
|
226 |
+
examples=["decimal128(10, 2)", "decimal128(4, -2)"],
|
227 |
+
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"],
|
228 |
+
)
|
229 |
+
)
|
230 |
+
elif decimal_internals_bits == "256":
|
231 |
+
decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
|
232 |
+
if decimal_internals_precision_and_scale:
|
233 |
+
precision = decimal_internals_precision_and_scale.group(1)
|
234 |
+
scale = decimal_internals_precision_and_scale.group(2)
|
235 |
+
return pa.decimal256(int(precision), int(scale))
|
236 |
+
else:
|
237 |
+
raise ValueError(
|
238 |
+
_dtype_error_msg(
|
239 |
+
datasets_dtype,
|
240 |
+
"decimal256",
|
241 |
+
examples=["decimal256(30, 2)", "decimal256(38, -4)"],
|
242 |
+
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"],
|
243 |
+
)
|
244 |
+
)
|
245 |
+
else:
|
246 |
+
raise ValueError(
|
247 |
+
_dtype_error_msg(
|
248 |
+
datasets_dtype,
|
249 |
+
"decimal",
|
250 |
+
examples=["decimal128(12, 3)", "decimal256(40, 6)"],
|
251 |
+
urls=[
|
252 |
+
"https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html",
|
253 |
+
"https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html",
|
254 |
+
],
|
255 |
+
)
|
256 |
+
)
|
257 |
+
|
258 |
+
raise ValueError(
|
259 |
+
f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. "
|
260 |
+
f"Please make sure to use a correct data type, see: "
|
261 |
+
f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions"
|
262 |
+
)
|
263 |
+
|
264 |
+
|
265 |
+
def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]:
|
266 |
+
"""
|
267 |
+
Cast pytorch/tensorflow/pandas objects to python numpy array/lists.
|
268 |
+
It works recursively.
|
269 |
+
|
270 |
+
If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
|
271 |
+
If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
|
272 |
+
This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
|
273 |
+
|
274 |
+
Args:
|
275 |
+
obj: the object (nested struct) to cast.
|
276 |
+
only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
|
277 |
+
nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
|
278 |
+
Indeed Arrow only support converting 1-dimensional array values.
|
279 |
+
optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
|
280 |
+
and if it doesn't, not checking the rest of the list elements.
|
281 |
+
|
282 |
+
Returns:
|
283 |
+
casted_obj: the casted object
|
284 |
+
has_changed (bool): True if the object has been changed, False if it is identical
|
285 |
+
"""
|
286 |
+
|
287 |
+
if config.TF_AVAILABLE and "tensorflow" in sys.modules:
|
288 |
+
import tensorflow as tf
|
289 |
+
|
290 |
+
if config.TORCH_AVAILABLE and "torch" in sys.modules:
|
291 |
+
import torch
|
292 |
+
|
293 |
+
if config.JAX_AVAILABLE and "jax" in sys.modules:
|
294 |
+
import jax.numpy as jnp
|
295 |
+
|
296 |
+
if config.PIL_AVAILABLE and "PIL" in sys.modules:
|
297 |
+
import PIL.Image
|
298 |
+
|
299 |
+
if isinstance(obj, np.ndarray):
|
300 |
+
if obj.ndim == 0:
|
301 |
+
return obj[()], True
|
302 |
+
elif not only_1d_for_numpy or obj.ndim == 1:
|
303 |
+
return obj, False
|
304 |
+
else:
|
305 |
+
return (
|
306 |
+
[
|
307 |
+
_cast_to_python_objects(
|
308 |
+
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
309 |
+
)[0]
|
310 |
+
for x in obj
|
311 |
+
],
|
312 |
+
True,
|
313 |
+
)
|
314 |
+
elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor):
|
315 |
+
if obj.ndim == 0:
|
316 |
+
return obj.detach().cpu().numpy()[()], True
|
317 |
+
elif not only_1d_for_numpy or obj.ndim == 1:
|
318 |
+
return obj.detach().cpu().numpy(), True
|
319 |
+
else:
|
320 |
+
return (
|
321 |
+
[
|
322 |
+
_cast_to_python_objects(
|
323 |
+
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
324 |
+
)[0]
|
325 |
+
for x in obj.detach().cpu().numpy()
|
326 |
+
],
|
327 |
+
True,
|
328 |
+
)
|
329 |
+
elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor):
|
330 |
+
if obj.ndim == 0:
|
331 |
+
return obj.numpy()[()], True
|
332 |
+
elif not only_1d_for_numpy or obj.ndim == 1:
|
333 |
+
return obj.numpy(), True
|
334 |
+
else:
|
335 |
+
return (
|
336 |
+
[
|
337 |
+
_cast_to_python_objects(
|
338 |
+
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
339 |
+
)[0]
|
340 |
+
for x in obj.numpy()
|
341 |
+
],
|
342 |
+
True,
|
343 |
+
)
|
344 |
+
elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray):
|
345 |
+
if obj.ndim == 0:
|
346 |
+
return np.asarray(obj)[()], True
|
347 |
+
elif not only_1d_for_numpy or obj.ndim == 1:
|
348 |
+
return np.asarray(obj), True
|
349 |
+
else:
|
350 |
+
return (
|
351 |
+
[
|
352 |
+
_cast_to_python_objects(
|
353 |
+
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
354 |
+
)[0]
|
355 |
+
for x in np.asarray(obj)
|
356 |
+
],
|
357 |
+
True,
|
358 |
+
)
|
359 |
+
elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image):
|
360 |
+
return encode_pil_image(obj), True
|
361 |
+
elif isinstance(obj, pd.Series):
|
362 |
+
return (
|
363 |
+
_cast_to_python_objects(
|
364 |
+
obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
365 |
+
)[0],
|
366 |
+
True,
|
367 |
+
)
|
368 |
+
elif isinstance(obj, pd.DataFrame):
|
369 |
+
return (
|
370 |
+
{
|
371 |
+
key: _cast_to_python_objects(
|
372 |
+
value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
373 |
+
)[0]
|
374 |
+
for key, value in obj.to_dict("series").items()
|
375 |
+
},
|
376 |
+
True,
|
377 |
+
)
|
378 |
+
elif isinstance(obj, pd.Timestamp):
|
379 |
+
return obj.to_pydatetime(), True
|
380 |
+
elif isinstance(obj, pd.Timedelta):
|
381 |
+
return obj.to_pytimedelta(), True
|
382 |
+
elif isinstance(obj, Mapping):
|
383 |
+
has_changed = not isinstance(obj, dict)
|
384 |
+
output = {}
|
385 |
+
for k, v in obj.items():
|
386 |
+
casted_v, has_changed_v = _cast_to_python_objects(
|
387 |
+
v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
388 |
+
)
|
389 |
+
has_changed |= has_changed_v
|
390 |
+
output[k] = casted_v
|
391 |
+
return output if has_changed else obj, has_changed
|
392 |
+
elif hasattr(obj, "__array__"):
|
393 |
+
return (
|
394 |
+
_cast_to_python_objects(
|
395 |
+
obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
396 |
+
)[0],
|
397 |
+
True,
|
398 |
+
)
|
399 |
+
elif isinstance(obj, (list, tuple)):
|
400 |
+
if len(obj) > 0:
|
401 |
+
for first_elmt in obj:
|
402 |
+
if _check_non_null_non_empty_recursive(first_elmt):
|
403 |
+
break
|
404 |
+
casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects(
|
405 |
+
first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
406 |
+
)
|
407 |
+
if has_changed_first_elmt or not optimize_list_casting:
|
408 |
+
return (
|
409 |
+
[
|
410 |
+
_cast_to_python_objects(
|
411 |
+
elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
412 |
+
)[0]
|
413 |
+
for elmt in obj
|
414 |
+
],
|
415 |
+
True,
|
416 |
+
)
|
417 |
+
else:
|
418 |
+
if isinstance(obj, (list, tuple)):
|
419 |
+
return obj, False
|
420 |
+
else:
|
421 |
+
return list(obj), True
|
422 |
+
else:
|
423 |
+
return obj, False
|
424 |
+
else:
|
425 |
+
return obj, False
|
426 |
+
|
427 |
+
|
428 |
+
def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any:
|
429 |
+
"""
|
430 |
+
Cast numpy/pytorch/tensorflow/pandas objects to python lists.
|
431 |
+
It works recursively.
|
432 |
+
|
433 |
+
If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
|
434 |
+
If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
|
435 |
+
This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
|
436 |
+
|
437 |
+
Args:
|
438 |
+
obj: the object (nested struct) to cast
|
439 |
+
only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
|
440 |
+
nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
|
441 |
+
Indeed Arrow only support converting 1-dimensional array values.
|
442 |
+
optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
|
443 |
+
and if it doesn't, not checking the rest of the list elements.
|
444 |
+
|
445 |
+
Returns:
|
446 |
+
casted_obj: the casted object
|
447 |
+
"""
|
448 |
+
return _cast_to_python_objects(
|
449 |
+
obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
450 |
+
)[0]
|
451 |
+
|
452 |
+
|
453 |
+
@dataclass
|
454 |
+
class Value:
|
455 |
+
"""
|
456 |
+
The `Value` dtypes are as follows:
|
457 |
+
|
458 |
+
- `null`
|
459 |
+
- `bool`
|
460 |
+
- `int8`
|
461 |
+
- `int16`
|
462 |
+
- `int32`
|
463 |
+
- `int64`
|
464 |
+
- `uint8`
|
465 |
+
- `uint16`
|
466 |
+
- `uint32`
|
467 |
+
- `uint64`
|
468 |
+
- `float16`
|
469 |
+
- `float32` (alias float)
|
470 |
+
- `float64` (alias double)
|
471 |
+
- `time32[(s|ms)]`
|
472 |
+
- `time64[(us|ns)]`
|
473 |
+
- `timestamp[(s|ms|us|ns)]`
|
474 |
+
- `timestamp[(s|ms|us|ns), tz=(tzstring)]`
|
475 |
+
- `date32`
|
476 |
+
- `date64`
|
477 |
+
- `duration[(s|ms|us|ns)]`
|
478 |
+
- `decimal128(precision, scale)`
|
479 |
+
- `decimal256(precision, scale)`
|
480 |
+
- `binary`
|
481 |
+
- `large_binary`
|
482 |
+
- `string`
|
483 |
+
- `large_string`
|
484 |
+
|
485 |
+
Example:
|
486 |
+
|
487 |
+
```py
|
488 |
+
>>> from datasets import Features
|
489 |
+
>>> features = Features({'stars': Value(dtype='int32')})
|
490 |
+
>>> features
|
491 |
+
{'stars': Value(dtype='int32', id=None)}
|
492 |
+
```
|
493 |
+
"""
|
494 |
+
|
495 |
+
dtype: str
|
496 |
+
id: Optional[str] = None
|
497 |
+
# Automatically constructed
|
498 |
+
pa_type: ClassVar[Any] = None
|
499 |
+
_type: str = field(default="Value", init=False, repr=False)
|
500 |
+
|
501 |
+
def __post_init__(self):
|
502 |
+
if self.dtype == "double": # fix inferred type
|
503 |
+
self.dtype = "float64"
|
504 |
+
if self.dtype == "float": # fix inferred type
|
505 |
+
self.dtype = "float32"
|
506 |
+
self.pa_type = string_to_arrow(self.dtype)
|
507 |
+
|
508 |
+
def __call__(self):
|
509 |
+
return self.pa_type
|
510 |
+
|
511 |
+
def encode_example(self, value):
|
512 |
+
if pa.types.is_boolean(self.pa_type):
|
513 |
+
return bool(value)
|
514 |
+
elif pa.types.is_integer(self.pa_type):
|
515 |
+
return int(value)
|
516 |
+
elif pa.types.is_floating(self.pa_type):
|
517 |
+
return float(value)
|
518 |
+
elif pa.types.is_string(self.pa_type):
|
519 |
+
return str(value)
|
520 |
+
else:
|
521 |
+
return value
|
522 |
+
|
523 |
+
|
524 |
+
class _ArrayXD:
|
525 |
+
def __post_init__(self):
|
526 |
+
self.shape = tuple(self.shape)
|
527 |
+
|
528 |
+
def __call__(self):
|
529 |
+
pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype)
|
530 |
+
return pa_type
|
531 |
+
|
532 |
+
def encode_example(self, value):
|
533 |
+
return value
|
534 |
+
|
535 |
+
|
536 |
+
@dataclass
|
537 |
+
class Array2D(_ArrayXD):
|
538 |
+
"""Create a two-dimensional array.
|
539 |
+
|
540 |
+
Args:
|
541 |
+
shape (`tuple`):
|
542 |
+
The size of each dimension.
|
543 |
+
dtype (`str`):
|
544 |
+
The value of the data type.
|
545 |
+
|
546 |
+
Example:
|
547 |
+
|
548 |
+
```py
|
549 |
+
>>> from datasets import Features
|
550 |
+
>>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')})
|
551 |
+
```
|
552 |
+
"""
|
553 |
+
|
554 |
+
shape: tuple
|
555 |
+
dtype: str
|
556 |
+
id: Optional[str] = None
|
557 |
+
# Automatically constructed
|
558 |
+
_type: str = field(default="Array2D", init=False, repr=False)
|
559 |
+
|
560 |
+
|
561 |
+
@dataclass
|
562 |
+
class Array3D(_ArrayXD):
|
563 |
+
"""Create a three-dimensional array.
|
564 |
+
|
565 |
+
Args:
|
566 |
+
shape (`tuple`):
|
567 |
+
The size of each dimension.
|
568 |
+
dtype (`str`):
|
569 |
+
The value of the data type.
|
570 |
+
|
571 |
+
Example:
|
572 |
+
|
573 |
+
```py
|
574 |
+
>>> from datasets import Features
|
575 |
+
>>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')})
|
576 |
+
```
|
577 |
+
"""
|
578 |
+
|
579 |
+
shape: tuple
|
580 |
+
dtype: str
|
581 |
+
id: Optional[str] = None
|
582 |
+
# Automatically constructed
|
583 |
+
_type: str = field(default="Array3D", init=False, repr=False)
|
584 |
+
|
585 |
+
|
586 |
+
@dataclass
|
587 |
+
class Array4D(_ArrayXD):
|
588 |
+
"""Create a four-dimensional array.
|
589 |
+
|
590 |
+
Args:
|
591 |
+
shape (`tuple`):
|
592 |
+
The size of each dimension.
|
593 |
+
dtype (`str`):
|
594 |
+
The value of the data type.
|
595 |
+
|
596 |
+
Example:
|
597 |
+
|
598 |
+
```py
|
599 |
+
>>> from datasets import Features
|
600 |
+
>>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')})
|
601 |
+
```
|
602 |
+
"""
|
603 |
+
|
604 |
+
shape: tuple
|
605 |
+
dtype: str
|
606 |
+
id: Optional[str] = None
|
607 |
+
# Automatically constructed
|
608 |
+
_type: str = field(default="Array4D", init=False, repr=False)
|
609 |
+
|
610 |
+
|
611 |
+
@dataclass
|
612 |
+
class Array5D(_ArrayXD):
|
613 |
+
"""Create a five-dimensional array.
|
614 |
+
|
615 |
+
Args:
|
616 |
+
shape (`tuple`):
|
617 |
+
The size of each dimension.
|
618 |
+
dtype (`str`):
|
619 |
+
The value of the data type.
|
620 |
+
|
621 |
+
Example:
|
622 |
+
|
623 |
+
```py
|
624 |
+
>>> from datasets import Features
|
625 |
+
>>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')})
|
626 |
+
```
|
627 |
+
"""
|
628 |
+
|
629 |
+
shape: tuple
|
630 |
+
dtype: str
|
631 |
+
id: Optional[str] = None
|
632 |
+
# Automatically constructed
|
633 |
+
_type: str = field(default="Array5D", init=False, repr=False)
|
634 |
+
|
635 |
+
|
636 |
+
class _ArrayXDExtensionType(pa.ExtensionType):
|
637 |
+
ndims: Optional[int] = None
|
638 |
+
|
639 |
+
def __init__(self, shape: tuple, dtype: str):
|
640 |
+
if self.ndims is None or self.ndims <= 1:
|
641 |
+
raise ValueError("You must instantiate an array type with a value for dim that is > 1")
|
642 |
+
if len(shape) != self.ndims:
|
643 |
+
raise ValueError(f"shape={shape} and ndims={self.ndims} don't match")
|
644 |
+
for dim in range(1, self.ndims):
|
645 |
+
if shape[dim] is None:
|
646 |
+
raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}")
|
647 |
+
self.shape = tuple(shape)
|
648 |
+
self.value_type = dtype
|
649 |
+
self.storage_dtype = self._generate_dtype(self.value_type)
|
650 |
+
pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}")
|
651 |
+
|
652 |
+
def __arrow_ext_serialize__(self):
|
653 |
+
return json.dumps((self.shape, self.value_type)).encode()
|
654 |
+
|
655 |
+
@classmethod
|
656 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
657 |
+
args = json.loads(serialized)
|
658 |
+
return cls(*args)
|
659 |
+
|
660 |
+
# This was added to pa.ExtensionType in pyarrow >= 13.0.0
|
661 |
+
def __reduce__(self):
|
662 |
+
return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__())
|
663 |
+
|
664 |
+
def __hash__(self):
|
665 |
+
return hash((self.__class__, self.shape, self.value_type))
|
666 |
+
|
667 |
+
def __arrow_ext_class__(self):
|
668 |
+
return ArrayExtensionArray
|
669 |
+
|
670 |
+
def _generate_dtype(self, dtype):
|
671 |
+
dtype = string_to_arrow(dtype)
|
672 |
+
for d in reversed(self.shape):
|
673 |
+
dtype = pa.list_(dtype)
|
674 |
+
# Don't specify the size of the list, since fixed length list arrays have issues
|
675 |
+
# being validated after slicing in pyarrow 0.17.1
|
676 |
+
return dtype
|
677 |
+
|
678 |
+
def to_pandas_dtype(self):
|
679 |
+
return PandasArrayExtensionDtype(self.value_type)
|
680 |
+
|
681 |
+
|
682 |
+
class Array2DExtensionType(_ArrayXDExtensionType):
|
683 |
+
ndims = 2
|
684 |
+
|
685 |
+
|
686 |
+
class Array3DExtensionType(_ArrayXDExtensionType):
|
687 |
+
ndims = 3
|
688 |
+
|
689 |
+
|
690 |
+
class Array4DExtensionType(_ArrayXDExtensionType):
|
691 |
+
ndims = 4
|
692 |
+
|
693 |
+
|
694 |
+
class Array5DExtensionType(_ArrayXDExtensionType):
|
695 |
+
ndims = 5
|
696 |
+
|
697 |
+
|
698 |
+
# Register the extension types for deserialization
|
699 |
+
pa.register_extension_type(Array2DExtensionType((1, 2), "int64"))
|
700 |
+
pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64"))
|
701 |
+
pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64"))
|
702 |
+
pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64"))
|
703 |
+
|
704 |
+
|
705 |
+
def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool:
|
706 |
+
"""
|
707 |
+
When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not.
|
708 |
+
This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array.
|
709 |
+
|
710 |
+
# zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration)
|
711 |
+
# primitive types are types for which the physical representation in arrow and in numpy
|
712 |
+
# https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821
|
713 |
+
# see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy
|
714 |
+
# and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22
|
715 |
+
"""
|
716 |
+
|
717 |
+
def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType:
|
718 |
+
if pa.types.is_list(pa_type):
|
719 |
+
return _unnest_pa_type(pa_type.value_type)
|
720 |
+
return pa_type
|
721 |
+
|
722 |
+
if unnest:
|
723 |
+
pa_type = _unnest_pa_type(pa_type)
|
724 |
+
return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type))
|
725 |
+
|
726 |
+
|
727 |
+
class ArrayExtensionArray(pa.ExtensionArray):
|
728 |
+
def __array__(self):
|
729 |
+
zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
|
730 |
+
return self.to_numpy(zero_copy_only=zero_copy_only)
|
731 |
+
|
732 |
+
def __getitem__(self, i):
|
733 |
+
return self.storage[i]
|
734 |
+
|
735 |
+
def to_numpy(self, zero_copy_only=True):
|
736 |
+
storage: pa.ListArray = self.storage
|
737 |
+
null_mask = storage.is_null().to_numpy(zero_copy_only=False)
|
738 |
+
|
739 |
+
if self.type.shape[0] is not None:
|
740 |
+
size = 1
|
741 |
+
null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask))
|
742 |
+
|
743 |
+
for i in range(self.type.ndims):
|
744 |
+
size *= self.type.shape[i]
|
745 |
+
storage = storage.flatten()
|
746 |
+
numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only)
|
747 |
+
numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape)
|
748 |
+
|
749 |
+
if len(null_indices):
|
750 |
+
numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0)
|
751 |
+
|
752 |
+
else:
|
753 |
+
shape = self.type.shape
|
754 |
+
ndims = self.type.ndims
|
755 |
+
arrays = []
|
756 |
+
first_dim_offsets = np.array([off.as_py() for off in storage.offsets])
|
757 |
+
for i, is_null in enumerate(null_mask):
|
758 |
+
if is_null:
|
759 |
+
arrays.append(np.nan)
|
760 |
+
else:
|
761 |
+
storage_el = storage[i : i + 1]
|
762 |
+
first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i]
|
763 |
+
# flatten storage
|
764 |
+
for _ in range(ndims):
|
765 |
+
storage_el = storage_el.flatten()
|
766 |
+
|
767 |
+
numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only)
|
768 |
+
arrays.append(numpy_arr.reshape(first_dim, *shape[1:]))
|
769 |
+
|
770 |
+
if len(np.unique(np.diff(first_dim_offsets))) > 1:
|
771 |
+
# ragged
|
772 |
+
numpy_arr = np.empty(len(arrays), dtype=object)
|
773 |
+
numpy_arr[:] = arrays
|
774 |
+
else:
|
775 |
+
numpy_arr = np.array(arrays)
|
776 |
+
|
777 |
+
return numpy_arr
|
778 |
+
|
779 |
+
def to_pylist(self):
|
780 |
+
zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
|
781 |
+
numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only)
|
782 |
+
if self.type.shape[0] is None and numpy_arr.dtype == object:
|
783 |
+
return [arr.tolist() for arr in numpy_arr.tolist()]
|
784 |
+
else:
|
785 |
+
return numpy_arr.tolist()
|
786 |
+
|
787 |
+
|
788 |
+
class PandasArrayExtensionDtype(PandasExtensionDtype):
|
789 |
+
_metadata = "value_type"
|
790 |
+
|
791 |
+
def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]):
|
792 |
+
self._value_type = value_type
|
793 |
+
|
794 |
+
def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
|
795 |
+
if isinstance(array, pa.ChunkedArray):
|
796 |
+
array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks]))
|
797 |
+
zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True)
|
798 |
+
numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only)
|
799 |
+
return PandasArrayExtensionArray(numpy_arr)
|
800 |
+
|
801 |
+
@classmethod
|
802 |
+
def construct_array_type(cls):
|
803 |
+
return PandasArrayExtensionArray
|
804 |
+
|
805 |
+
@property
|
806 |
+
def type(self) -> type:
|
807 |
+
return np.ndarray
|
808 |
+
|
809 |
+
@property
|
810 |
+
def kind(self) -> str:
|
811 |
+
return "O"
|
812 |
+
|
813 |
+
@property
|
814 |
+
def name(self) -> str:
|
815 |
+
return f"array[{self.value_type}]"
|
816 |
+
|
817 |
+
@property
|
818 |
+
def value_type(self) -> np.dtype:
|
819 |
+
return self._value_type
|
820 |
+
|
821 |
+
|
822 |
+
class PandasArrayExtensionArray(PandasExtensionArray):
|
823 |
+
def __init__(self, data: np.ndarray, copy: bool = False):
|
824 |
+
self._data = data if not copy else np.array(data)
|
825 |
+
self._dtype = PandasArrayExtensionDtype(data.dtype)
|
826 |
+
|
827 |
+
def __array__(self, dtype=None):
|
828 |
+
"""
|
829 |
+
Convert to NumPy Array.
|
830 |
+
Note that Pandas expects a 1D array when dtype is set to object.
|
831 |
+
But for other dtypes, the returned shape is the same as the one of ``data``.
|
832 |
+
|
833 |
+
More info about pandas 1D requirement for PandasExtensionArray here:
|
834 |
+
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray
|
835 |
+
|
836 |
+
"""
|
837 |
+
if dtype == object:
|
838 |
+
out = np.empty(len(self._data), dtype=object)
|
839 |
+
for i in range(len(self._data)):
|
840 |
+
out[i] = self._data[i]
|
841 |
+
return out
|
842 |
+
if dtype is None:
|
843 |
+
return self._data
|
844 |
+
else:
|
845 |
+
return self._data.astype(dtype)
|
846 |
+
|
847 |
+
def copy(self, deep: bool = False) -> "PandasArrayExtensionArray":
|
848 |
+
return PandasArrayExtensionArray(self._data, copy=True)
|
849 |
+
|
850 |
+
@classmethod
|
851 |
+
def _from_sequence(
|
852 |
+
cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False
|
853 |
+
) -> "PandasArrayExtensionArray":
|
854 |
+
if len(scalars) > 1 and all(
|
855 |
+
isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars
|
856 |
+
):
|
857 |
+
data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy)
|
858 |
+
else:
|
859 |
+
data = np.empty(len(scalars), dtype=object)
|
860 |
+
data[:] = scalars
|
861 |
+
return cls(data, copy=copy)
|
862 |
+
|
863 |
+
@classmethod
|
864 |
+
def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray":
|
865 |
+
if len(to_concat) > 1 and all(
|
866 |
+
va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype
|
867 |
+
for va in to_concat
|
868 |
+
):
|
869 |
+
data = np.vstack([va._data for va in to_concat])
|
870 |
+
else:
|
871 |
+
data = np.empty(len(to_concat), dtype=object)
|
872 |
+
data[:] = [va._data for va in to_concat]
|
873 |
+
return cls(data, copy=False)
|
874 |
+
|
875 |
+
@property
|
876 |
+
def dtype(self) -> PandasArrayExtensionDtype:
|
877 |
+
return self._dtype
|
878 |
+
|
879 |
+
@property
|
880 |
+
def nbytes(self) -> int:
|
881 |
+
return self._data.nbytes
|
882 |
+
|
883 |
+
def isna(self) -> np.ndarray:
|
884 |
+
return np.array([pd.isna(arr).any() for arr in self._data])
|
885 |
+
|
886 |
+
def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None:
|
887 |
+
raise NotImplementedError()
|
888 |
+
|
889 |
+
def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]:
|
890 |
+
if isinstance(item, int):
|
891 |
+
return self._data[item]
|
892 |
+
return PandasArrayExtensionArray(self._data[item], copy=False)
|
893 |
+
|
894 |
+
def take(
|
895 |
+
self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None
|
896 |
+
) -> "PandasArrayExtensionArray":
|
897 |
+
indices: np.ndarray = np.asarray(indices, dtype=int)
|
898 |
+
if allow_fill:
|
899 |
+
fill_value = (
|
900 |
+
self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type)
|
901 |
+
)
|
902 |
+
mask = indices == -1
|
903 |
+
if (indices < -1).any():
|
904 |
+
raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True")
|
905 |
+
elif len(self) > 0:
|
906 |
+
pass
|
907 |
+
elif not np.all(mask):
|
908 |
+
raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.")
|
909 |
+
else:
|
910 |
+
data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type)
|
911 |
+
return PandasArrayExtensionArray(data, copy=False)
|
912 |
+
took = self._data.take(indices, axis=0)
|
913 |
+
if allow_fill and mask.any():
|
914 |
+
took[mask] = [fill_value] * np.sum(mask)
|
915 |
+
return PandasArrayExtensionArray(took, copy=False)
|
916 |
+
|
917 |
+
def __len__(self) -> int:
|
918 |
+
return len(self._data)
|
919 |
+
|
920 |
+
def __eq__(self, other) -> np.ndarray:
|
921 |
+
if not isinstance(other, PandasArrayExtensionArray):
|
922 |
+
raise NotImplementedError(f"Invalid type to compare to: {type(other)}")
|
923 |
+
return (self._data == other._data).all()
|
924 |
+
|
925 |
+
|
926 |
+
def pandas_types_mapper(dtype):
|
927 |
+
if isinstance(dtype, _ArrayXDExtensionType):
|
928 |
+
return PandasArrayExtensionDtype(dtype.value_type)
|
929 |
+
|
930 |
+
|
931 |
+
@dataclass
|
932 |
+
class ClassLabel:
|
933 |
+
"""Feature type for integer class labels.
|
934 |
+
|
935 |
+
There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments:
|
936 |
+
|
937 |
+
* `num_classes`: Create 0 to (num_classes-1) labels.
|
938 |
+
* `names`: List of label strings.
|
939 |
+
* `names_file`: File containing the list of labels.
|
940 |
+
|
941 |
+
Under the hood the labels are stored as integers.
|
942 |
+
You can use negative integers to represent unknown/missing labels.
|
943 |
+
|
944 |
+
Args:
|
945 |
+
num_classes (`int`, *optional*):
|
946 |
+
Number of classes. All labels must be < `num_classes`.
|
947 |
+
names (`list` of `str`, *optional*):
|
948 |
+
String names for the integer classes.
|
949 |
+
The order in which the names are provided is kept.
|
950 |
+
names_file (`str`, *optional*):
|
951 |
+
Path to a file with names for the integer classes, one per line.
|
952 |
+
|
953 |
+
Example:
|
954 |
+
|
955 |
+
```py
|
956 |
+
>>> from datasets import Features
|
957 |
+
>>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])})
|
958 |
+
>>> features
|
959 |
+
{'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)}
|
960 |
+
```
|
961 |
+
"""
|
962 |
+
|
963 |
+
num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
|
964 |
+
names: List[str] = None
|
965 |
+
names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
|
966 |
+
id: Optional[str] = None
|
967 |
+
# Automatically constructed
|
968 |
+
dtype: ClassVar[str] = "int64"
|
969 |
+
pa_type: ClassVar[Any] = pa.int64()
|
970 |
+
_str2int: ClassVar[Dict[str, int]] = None
|
971 |
+
_int2str: ClassVar[Dict[int, int]] = None
|
972 |
+
_type: str = field(default="ClassLabel", init=False, repr=False)
|
973 |
+
|
974 |
+
def __post_init__(self, num_classes, names_file):
|
975 |
+
self.num_classes = num_classes
|
976 |
+
self.names_file = names_file
|
977 |
+
if self.names_file is not None and self.names is not None:
|
978 |
+
raise ValueError("Please provide either names or names_file but not both.")
|
979 |
+
# Set self.names
|
980 |
+
if self.names is None:
|
981 |
+
if self.names_file is not None:
|
982 |
+
self.names = self._load_names_from_file(self.names_file)
|
983 |
+
elif self.num_classes is not None:
|
984 |
+
self.names = [str(i) for i in range(self.num_classes)]
|
985 |
+
else:
|
986 |
+
raise ValueError("Please provide either num_classes, names or names_file.")
|
987 |
+
elif not isinstance(self.names, SequenceABC):
|
988 |
+
raise TypeError(f"Please provide names as a list, is {type(self.names)}")
|
989 |
+
# Set self.num_classes
|
990 |
+
if self.num_classes is None:
|
991 |
+
self.num_classes = len(self.names)
|
992 |
+
elif self.num_classes != len(self.names):
|
993 |
+
raise ValueError(
|
994 |
+
"ClassLabel number of names do not match the defined num_classes. "
|
995 |
+
f"Got {len(self.names)} names VS {self.num_classes} num_classes"
|
996 |
+
)
|
997 |
+
# Prepare mappings
|
998 |
+
self._int2str = [str(name) for name in self.names]
|
999 |
+
self._str2int = {name: i for i, name in enumerate(self._int2str)}
|
1000 |
+
if len(self._int2str) != len(self._str2int):
|
1001 |
+
raise ValueError("Some label names are duplicated. Each label name should be unique.")
|
1002 |
+
|
1003 |
+
def __call__(self):
|
1004 |
+
return self.pa_type
|
1005 |
+
|
1006 |
+
def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]:
|
1007 |
+
"""Conversion class name `string` => `integer`.
|
1008 |
+
|
1009 |
+
Example:
|
1010 |
+
|
1011 |
+
```py
|
1012 |
+
>>> from datasets import load_dataset
|
1013 |
+
>>> ds = load_dataset("rotten_tomatoes", split="train")
|
1014 |
+
>>> ds.features["label"].str2int('neg')
|
1015 |
+
0
|
1016 |
+
```
|
1017 |
+
"""
|
1018 |
+
if not isinstance(values, str) and not isinstance(values, Iterable):
|
1019 |
+
raise ValueError(
|
1020 |
+
f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
|
1021 |
+
)
|
1022 |
+
return_list = True
|
1023 |
+
if isinstance(values, str):
|
1024 |
+
values = [values]
|
1025 |
+
return_list = False
|
1026 |
+
|
1027 |
+
output = [self._strval2int(value) for value in values]
|
1028 |
+
return output if return_list else output[0]
|
1029 |
+
|
1030 |
+
def _strval2int(self, value: str) -> int:
|
1031 |
+
failed_parse = False
|
1032 |
+
value = str(value)
|
1033 |
+
# first attempt - raw string value
|
1034 |
+
int_value = self._str2int.get(value)
|
1035 |
+
if int_value is None:
|
1036 |
+
# second attempt - strip whitespace
|
1037 |
+
int_value = self._str2int.get(value.strip())
|
1038 |
+
if int_value is None:
|
1039 |
+
# third attempt - convert str to int
|
1040 |
+
try:
|
1041 |
+
int_value = int(value)
|
1042 |
+
except ValueError:
|
1043 |
+
failed_parse = True
|
1044 |
+
else:
|
1045 |
+
if int_value < -1 or int_value >= self.num_classes:
|
1046 |
+
failed_parse = True
|
1047 |
+
if failed_parse:
|
1048 |
+
raise ValueError(f"Invalid string class label {value}")
|
1049 |
+
return int_value
|
1050 |
+
|
1051 |
+
def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]:
|
1052 |
+
"""Conversion `integer` => class name `string`.
|
1053 |
+
|
1054 |
+
Regarding unknown/missing labels: passing negative integers raises `ValueError`.
|
1055 |
+
|
1056 |
+
Example:
|
1057 |
+
|
1058 |
+
```py
|
1059 |
+
>>> from datasets import load_dataset
|
1060 |
+
>>> ds = load_dataset("rotten_tomatoes", split="train")
|
1061 |
+
>>> ds.features["label"].int2str(0)
|
1062 |
+
'neg'
|
1063 |
+
```
|
1064 |
+
"""
|
1065 |
+
if not isinstance(values, int) and not isinstance(values, Iterable):
|
1066 |
+
raise ValueError(
|
1067 |
+
f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
|
1068 |
+
)
|
1069 |
+
return_list = True
|
1070 |
+
if isinstance(values, int):
|
1071 |
+
values = [values]
|
1072 |
+
return_list = False
|
1073 |
+
|
1074 |
+
for v in values:
|
1075 |
+
if not 0 <= v < self.num_classes:
|
1076 |
+
raise ValueError(f"Invalid integer class label {v:d}")
|
1077 |
+
|
1078 |
+
output = [self._int2str[int(v)] for v in values]
|
1079 |
+
return output if return_list else output[0]
|
1080 |
+
|
1081 |
+
def encode_example(self, example_data):
|
1082 |
+
if self.num_classes is None:
|
1083 |
+
raise ValueError(
|
1084 |
+
"Trying to use ClassLabel feature with undefined number of class. "
|
1085 |
+
"Please set ClassLabel.names or num_classes."
|
1086 |
+
)
|
1087 |
+
|
1088 |
+
# If a string is given, convert to associated integer
|
1089 |
+
if isinstance(example_data, str):
|
1090 |
+
example_data = self.str2int(example_data)
|
1091 |
+
|
1092 |
+
# Allowing -1 to mean no label.
|
1093 |
+
if not -1 <= example_data < self.num_classes:
|
1094 |
+
raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}")
|
1095 |
+
return example_data
|
1096 |
+
|
1097 |
+
def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array:
|
1098 |
+
"""Cast an Arrow array to the `ClassLabel` arrow storage type.
|
1099 |
+
The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are:
|
1100 |
+
|
1101 |
+
- `pa.string()`
|
1102 |
+
- `pa.int()`
|
1103 |
+
|
1104 |
+
Args:
|
1105 |
+
storage (`Union[pa.StringArray, pa.IntegerArray]`):
|
1106 |
+
PyArrow array to cast.
|
1107 |
+
|
1108 |
+
Returns:
|
1109 |
+
`pa.Int64Array`: Array in the `ClassLabel` arrow storage type.
|
1110 |
+
"""
|
1111 |
+
if isinstance(storage, pa.IntegerArray) and len(storage) > 0:
|
1112 |
+
min_max = pc.min_max(storage).as_py()
|
1113 |
+
if min_max["max"] is not None and min_max["max"] >= self.num_classes:
|
1114 |
+
raise ValueError(
|
1115 |
+
f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}"
|
1116 |
+
)
|
1117 |
+
elif isinstance(storage, pa.StringArray):
|
1118 |
+
storage = pa.array(
|
1119 |
+
[self._strval2int(label) if label is not None else None for label in storage.to_pylist()]
|
1120 |
+
)
|
1121 |
+
return array_cast(storage, self.pa_type)
|
1122 |
+
|
1123 |
+
@staticmethod
|
1124 |
+
def _load_names_from_file(names_filepath):
|
1125 |
+
with open(names_filepath, encoding="utf-8") as f:
|
1126 |
+
return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names
|
1127 |
+
|
1128 |
+
|
1129 |
+
@dataclass
|
1130 |
+
class Sequence:
|
1131 |
+
"""Construct a list of feature from a single type or a dict of types.
|
1132 |
+
Mostly here for compatiblity with tfds.
|
1133 |
+
|
1134 |
+
Args:
|
1135 |
+
feature:
|
1136 |
+
A list of features of a single type or a dictionary of types.
|
1137 |
+
length (`int`):
|
1138 |
+
Length of the sequence.
|
1139 |
+
|
1140 |
+
Example:
|
1141 |
+
|
1142 |
+
```py
|
1143 |
+
>>> from datasets import Features, Sequence, Value, ClassLabel
|
1144 |
+
>>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})})
|
1145 |
+
>>> features
|
1146 |
+
{'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)}
|
1147 |
+
```
|
1148 |
+
"""
|
1149 |
+
|
1150 |
+
feature: Any
|
1151 |
+
length: int = -1
|
1152 |
+
id: Optional[str] = None
|
1153 |
+
# Automatically constructed
|
1154 |
+
dtype: ClassVar[str] = "list"
|
1155 |
+
pa_type: ClassVar[Any] = None
|
1156 |
+
_type: str = field(default="Sequence", init=False, repr=False)
|
1157 |
+
|
1158 |
+
|
1159 |
+
FeatureType = Union[
|
1160 |
+
dict,
|
1161 |
+
list,
|
1162 |
+
tuple,
|
1163 |
+
Value,
|
1164 |
+
ClassLabel,
|
1165 |
+
Translation,
|
1166 |
+
TranslationVariableLanguages,
|
1167 |
+
Sequence,
|
1168 |
+
Array2D,
|
1169 |
+
Array3D,
|
1170 |
+
Array4D,
|
1171 |
+
Array5D,
|
1172 |
+
Audio,
|
1173 |
+
Image,
|
1174 |
+
]
|
1175 |
+
|
1176 |
+
|
1177 |
+
def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool:
|
1178 |
+
"""
|
1179 |
+
Check if the object is not None.
|
1180 |
+
If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence.
|
1181 |
+
"""
|
1182 |
+
if obj is None:
|
1183 |
+
return False
|
1184 |
+
elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))):
|
1185 |
+
if len(obj) > 0:
|
1186 |
+
if schema is None:
|
1187 |
+
pass
|
1188 |
+
elif isinstance(schema, (list, tuple)):
|
1189 |
+
schema = schema[0]
|
1190 |
+
else:
|
1191 |
+
schema = schema.feature
|
1192 |
+
return _check_non_null_non_empty_recursive(obj[0], schema)
|
1193 |
+
else:
|
1194 |
+
return False
|
1195 |
+
else:
|
1196 |
+
return True
|
1197 |
+
|
1198 |
+
|
1199 |
+
def get_nested_type(schema: FeatureType) -> pa.DataType:
|
1200 |
+
"""
|
1201 |
+
get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of
|
1202 |
+
generate_from_arrow_type().
|
1203 |
+
|
1204 |
+
It performs double-duty as the implementation of Features.type and handles the conversion of
|
1205 |
+
datasets.Feature->pa.struct
|
1206 |
+
"""
|
1207 |
+
# Nested structures: we allow dict, list/tuples, sequences
|
1208 |
+
if isinstance(schema, Features):
|
1209 |
+
return pa.struct(
|
1210 |
+
{key: get_nested_type(schema[key]) for key in schema}
|
1211 |
+
) # Features is subclass of dict, and dict order is deterministic since Python 3.6
|
1212 |
+
elif isinstance(schema, dict):
|
1213 |
+
return pa.struct(
|
1214 |
+
{key: get_nested_type(schema[key]) for key in schema}
|
1215 |
+
) # however don't sort on struct types since the order matters
|
1216 |
+
elif isinstance(schema, (list, tuple)):
|
1217 |
+
if len(schema) != 1:
|
1218 |
+
raise ValueError("When defining list feature, you should just provide one example of the inner type")
|
1219 |
+
value_type = get_nested_type(schema[0])
|
1220 |
+
return pa.list_(value_type)
|
1221 |
+
elif isinstance(schema, Sequence):
|
1222 |
+
value_type = get_nested_type(schema.feature)
|
1223 |
+
# We allow to reverse list of dict => dict of list for compatibility with tfds
|
1224 |
+
if isinstance(schema.feature, dict):
|
1225 |
+
return pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type})
|
1226 |
+
return pa.list_(value_type, schema.length)
|
1227 |
+
|
1228 |
+
# Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods)
|
1229 |
+
return schema()
|
1230 |
+
|
1231 |
+
|
1232 |
+
def encode_nested_example(schema, obj, level=0):
|
1233 |
+
"""Encode a nested example.
|
1234 |
+
This is used since some features (in particular ClassLabel) have some logic during encoding.
|
1235 |
+
|
1236 |
+
To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded.
|
1237 |
+
If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same.
|
1238 |
+
"""
|
1239 |
+
# Nested structures: we allow dict, list/tuples, sequences
|
1240 |
+
if isinstance(schema, dict):
|
1241 |
+
if level == 0 and obj is None:
|
1242 |
+
raise ValueError("Got None but expected a dictionary instead")
|
1243 |
+
return (
|
1244 |
+
{k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema}
|
1245 |
+
if obj is not None
|
1246 |
+
else None
|
1247 |
+
)
|
1248 |
+
|
1249 |
+
elif isinstance(schema, (list, tuple)):
|
1250 |
+
sub_schema = schema[0]
|
1251 |
+
if obj is None:
|
1252 |
+
return None
|
1253 |
+
else:
|
1254 |
+
if len(obj) > 0:
|
1255 |
+
for first_elmt in obj:
|
1256 |
+
if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
|
1257 |
+
break
|
1258 |
+
if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt:
|
1259 |
+
return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj]
|
1260 |
+
return list(obj)
|
1261 |
+
elif isinstance(schema, Sequence):
|
1262 |
+
if obj is None:
|
1263 |
+
return None
|
1264 |
+
# We allow to reverse list of dict => dict of list for compatiblity with tfds
|
1265 |
+
if isinstance(schema.feature, dict):
|
1266 |
+
# dict of list to fill
|
1267 |
+
list_dict = {}
|
1268 |
+
if isinstance(obj, (list, tuple)):
|
1269 |
+
# obj is a list of dict
|
1270 |
+
for k in schema.feature:
|
1271 |
+
list_dict[k] = [encode_nested_example(schema.feature[k], o.get(k), level=level + 1) for o in obj]
|
1272 |
+
return list_dict
|
1273 |
+
else:
|
1274 |
+
# obj is a single dict
|
1275 |
+
for k in schema.feature:
|
1276 |
+
list_dict[k] = (
|
1277 |
+
[encode_nested_example(schema.feature[k], o, level=level + 1) for o in obj[k]]
|
1278 |
+
if k in obj
|
1279 |
+
else None
|
1280 |
+
)
|
1281 |
+
return list_dict
|
1282 |
+
# schema.feature is not a dict
|
1283 |
+
if isinstance(obj, str): # don't interpret a string as a list
|
1284 |
+
raise ValueError(f"Got a string but expected a list instead: '{obj}'")
|
1285 |
+
else:
|
1286 |
+
if len(obj) > 0:
|
1287 |
+
for first_elmt in obj:
|
1288 |
+
if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
|
1289 |
+
break
|
1290 |
+
# be careful when comparing tensors here
|
1291 |
+
if (
|
1292 |
+
not isinstance(first_elmt, list)
|
1293 |
+
or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt
|
1294 |
+
):
|
1295 |
+
return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj]
|
1296 |
+
return list(obj)
|
1297 |
+
# Object with special encoding:
|
1298 |
+
# ClassLabel will convert from string to int, TranslationVariableLanguages does some checks
|
1299 |
+
elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)):
|
1300 |
+
return schema.encode_example(obj) if obj is not None else None
|
1301 |
+
# Other object should be directly convertible to a native Arrow type (like Translation and Translation)
|
1302 |
+
return obj
|
1303 |
+
|
1304 |
+
|
1305 |
+
def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
|
1306 |
+
"""Decode a nested example.
|
1307 |
+
This is used since some features (in particular Audio and Image) have some logic during decoding.
|
1308 |
+
|
1309 |
+
To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded.
|
1310 |
+
If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same.
|
1311 |
+
"""
|
1312 |
+
# Nested structures: we allow dict, list/tuples, sequences
|
1313 |
+
if isinstance(schema, dict):
|
1314 |
+
return (
|
1315 |
+
{k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)}
|
1316 |
+
if obj is not None
|
1317 |
+
else None
|
1318 |
+
)
|
1319 |
+
elif isinstance(schema, (list, tuple)):
|
1320 |
+
sub_schema = schema[0]
|
1321 |
+
if obj is None:
|
1322 |
+
return None
|
1323 |
+
else:
|
1324 |
+
if len(obj) > 0:
|
1325 |
+
for first_elmt in obj:
|
1326 |
+
if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
|
1327 |
+
break
|
1328 |
+
if decode_nested_example(sub_schema, first_elmt) != first_elmt:
|
1329 |
+
return [decode_nested_example(sub_schema, o) for o in obj]
|
1330 |
+
return list(obj)
|
1331 |
+
elif isinstance(schema, Sequence):
|
1332 |
+
# We allow to reverse list of dict => dict of list for compatiblity with tfds
|
1333 |
+
if isinstance(schema.feature, dict):
|
1334 |
+
return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature}
|
1335 |
+
else:
|
1336 |
+
return decode_nested_example([schema.feature], obj)
|
1337 |
+
# Object with special decoding:
|
1338 |
+
elif isinstance(schema, (Audio, Image)):
|
1339 |
+
# we pass the token to read and decode files from private repositories in streaming mode
|
1340 |
+
if obj is not None and schema.decode:
|
1341 |
+
return schema.decode_example(obj, token_per_repo_id=token_per_repo_id)
|
1342 |
+
return obj
|
1343 |
+
|
1344 |
+
|
1345 |
+
def generate_from_dict(obj: Any):
|
1346 |
+
"""Regenerate the nested feature object from a deserialized dict.
|
1347 |
+
We use the '_type' fields to get the dataclass name to load.
|
1348 |
+
|
1349 |
+
generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax
|
1350 |
+
to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
|
1351 |
+
a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to
|
1352 |
+
:meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any
|
1353 |
+
mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes
|
1354 |
+
that :class:`Value` automatically performs.
|
1355 |
+
"""
|
1356 |
+
# Nested structures: we allow dict, list/tuples, sequences
|
1357 |
+
if isinstance(obj, list):
|
1358 |
+
return [generate_from_dict(value) for value in obj]
|
1359 |
+
# Otherwise we have a dict or a dataclass
|
1360 |
+
if "_type" not in obj or isinstance(obj["_type"], dict):
|
1361 |
+
return {key: generate_from_dict(value) for key, value in obj.items()}
|
1362 |
+
obj = dict(obj)
|
1363 |
+
class_type = globals()[obj.pop("_type")]
|
1364 |
+
|
1365 |
+
if class_type == Sequence:
|
1366 |
+
return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1))
|
1367 |
+
|
1368 |
+
field_names = {f.name for f in fields(class_type)}
|
1369 |
+
return class_type(**{k: v for k, v in obj.items() if k in field_names})
|
1370 |
+
|
1371 |
+
|
1372 |
+
def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType:
|
1373 |
+
"""
|
1374 |
+
generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for
|
1375 |
+
a single field.
|
1376 |
+
|
1377 |
+
This is the high-level arrow->datasets type conversion and is inverted by get_nested_type().
|
1378 |
+
|
1379 |
+
This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the
|
1380 |
+
full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema
|
1381 |
+
"""
|
1382 |
+
if isinstance(pa_type, pa.StructType):
|
1383 |
+
return {field.name: generate_from_arrow_type(field.type) for field in pa_type}
|
1384 |
+
elif isinstance(pa_type, pa.FixedSizeListType):
|
1385 |
+
return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size)
|
1386 |
+
elif isinstance(pa_type, pa.ListType):
|
1387 |
+
feature = generate_from_arrow_type(pa_type.value_type)
|
1388 |
+
if isinstance(feature, (dict, tuple, list)):
|
1389 |
+
return [feature]
|
1390 |
+
return Sequence(feature=feature)
|
1391 |
+
elif isinstance(pa_type, _ArrayXDExtensionType):
|
1392 |
+
array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims]
|
1393 |
+
return array_feature(shape=pa_type.shape, dtype=pa_type.value_type)
|
1394 |
+
elif isinstance(pa_type, pa.DictionaryType):
|
1395 |
+
raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table
|
1396 |
+
elif isinstance(pa_type, pa.DataType):
|
1397 |
+
return Value(dtype=_arrow_to_datasets_dtype(pa_type))
|
1398 |
+
else:
|
1399 |
+
raise ValueError(f"Cannot convert {pa_type} to a Feature type.")
|
1400 |
+
|
1401 |
+
|
1402 |
+
def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray:
|
1403 |
+
"""Build a PyArrow ListArray from a multidimensional NumPy array"""
|
1404 |
+
arr = np.array(arr)
|
1405 |
+
values = pa.array(arr.flatten(), type=type)
|
1406 |
+
for i in range(arr.ndim - 1):
|
1407 |
+
n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1)
|
1408 |
+
step_offsets = arr.shape[arr.ndim - i - 1]
|
1409 |
+
offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32())
|
1410 |
+
values = pa.ListArray.from_arrays(offsets, values)
|
1411 |
+
return values
|
1412 |
+
|
1413 |
+
|
1414 |
+
def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray:
|
1415 |
+
null_mask = np.array([arr is None for arr in l_arr])
|
1416 |
+
null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask))
|
1417 |
+
l_arr = [arr for arr in l_arr if arr is not None]
|
1418 |
+
offsets = np.cumsum(
|
1419 |
+
[0] + [len(arr) for arr in l_arr], dtype=object
|
1420 |
+
) # convert to dtype object to allow None insertion
|
1421 |
+
offsets = np.insert(offsets, null_indices, None)
|
1422 |
+
offsets = pa.array(offsets, type=pa.int32())
|
1423 |
+
values = pa.concat_arrays(l_arr)
|
1424 |
+
return pa.ListArray.from_arrays(offsets, values)
|
1425 |
+
|
1426 |
+
|
1427 |
+
def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray:
|
1428 |
+
"""Build a PyArrow ListArray from a possibly nested list of NumPy arrays"""
|
1429 |
+
if len(l_arr) > 0:
|
1430 |
+
return list_of_pa_arrays_to_pyarrow_listarray(
|
1431 |
+
[numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr]
|
1432 |
+
)
|
1433 |
+
else:
|
1434 |
+
return pa.array([], type=type)
|
1435 |
+
|
1436 |
+
|
1437 |
+
def contains_any_np_array(data: Any):
|
1438 |
+
"""Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray.
|
1439 |
+
|
1440 |
+
Args:
|
1441 |
+
data (Any): Data.
|
1442 |
+
|
1443 |
+
Returns:
|
1444 |
+
bool
|
1445 |
+
"""
|
1446 |
+
if isinstance(data, np.ndarray):
|
1447 |
+
return True
|
1448 |
+
elif isinstance(data, list):
|
1449 |
+
return contains_any_np_array(first_non_null_value(data)[1])
|
1450 |
+
else:
|
1451 |
+
return False
|
1452 |
+
|
1453 |
+
|
1454 |
+
def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray:
|
1455 |
+
"""Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray.
|
1456 |
+
|
1457 |
+
Args:
|
1458 |
+
data (Union[np.ndarray, List]): Data.
|
1459 |
+
type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type.
|
1460 |
+
|
1461 |
+
Returns:
|
1462 |
+
pa.ListArray
|
1463 |
+
"""
|
1464 |
+
if isinstance(data, np.ndarray):
|
1465 |
+
return numpy_to_pyarrow_listarray(data, type=type)
|
1466 |
+
elif isinstance(data, list):
|
1467 |
+
return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data])
|
1468 |
+
|
1469 |
+
|
1470 |
+
def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array:
|
1471 |
+
"""Convert to PyArrow ListArray.
|
1472 |
+
|
1473 |
+
Args:
|
1474 |
+
data (Any): Sequence, iterable, np.ndarray or pd.Series.
|
1475 |
+
pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType.
|
1476 |
+
|
1477 |
+
Returns:
|
1478 |
+
pyarrow.Array
|
1479 |
+
"""
|
1480 |
+
if contains_any_np_array(data):
|
1481 |
+
return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type)
|
1482 |
+
else:
|
1483 |
+
return pa.array(data, pa_type.storage_dtype)
|
1484 |
+
|
1485 |
+
|
1486 |
+
def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType:
|
1487 |
+
"""Visit a (possibly nested) feature.
|
1488 |
+
|
1489 |
+
Args:
|
1490 |
+
feature (FeatureType): the feature type to be checked
|
1491 |
+
Returns:
|
1492 |
+
visited feature (FeatureType)
|
1493 |
+
"""
|
1494 |
+
if isinstance(feature, dict):
|
1495 |
+
out = func({k: _visit(f, func) for k, f in feature.items()})
|
1496 |
+
elif isinstance(feature, (list, tuple)):
|
1497 |
+
out = func([_visit(feature[0], func)])
|
1498 |
+
elif isinstance(feature, Sequence):
|
1499 |
+
out = func(Sequence(_visit(feature.feature, func), length=feature.length))
|
1500 |
+
else:
|
1501 |
+
out = func(feature)
|
1502 |
+
return feature if out is None else out
|
1503 |
+
|
1504 |
+
|
1505 |
+
def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool:
|
1506 |
+
"""Check if a (possibly nested) feature requires decoding.
|
1507 |
+
|
1508 |
+
Args:
|
1509 |
+
feature (FeatureType): the feature type to be checked
|
1510 |
+
ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value
|
1511 |
+
of the `decode` attribute of the decodable feature types.
|
1512 |
+
Returns:
|
1513 |
+
:obj:`bool`
|
1514 |
+
"""
|
1515 |
+
if isinstance(feature, dict):
|
1516 |
+
return any(require_decoding(f) for f in feature.values())
|
1517 |
+
elif isinstance(feature, (list, tuple)):
|
1518 |
+
return require_decoding(feature[0])
|
1519 |
+
elif isinstance(feature, Sequence):
|
1520 |
+
return require_decoding(feature.feature)
|
1521 |
+
else:
|
1522 |
+
return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True)
|
1523 |
+
|
1524 |
+
|
1525 |
+
def require_storage_cast(feature: FeatureType) -> bool:
|
1526 |
+
"""Check if a (possibly nested) feature requires storage casting.
|
1527 |
+
|
1528 |
+
Args:
|
1529 |
+
feature (FeatureType): the feature type to be checked
|
1530 |
+
Returns:
|
1531 |
+
:obj:`bool`
|
1532 |
+
"""
|
1533 |
+
if isinstance(feature, dict):
|
1534 |
+
return any(require_storage_cast(f) for f in feature.values())
|
1535 |
+
elif isinstance(feature, (list, tuple)):
|
1536 |
+
return require_storage_cast(feature[0])
|
1537 |
+
elif isinstance(feature, Sequence):
|
1538 |
+
return require_storage_cast(feature.feature)
|
1539 |
+
else:
|
1540 |
+
return hasattr(feature, "cast_storage")
|
1541 |
+
|
1542 |
+
|
1543 |
+
def require_storage_embed(feature: FeatureType) -> bool:
|
1544 |
+
"""Check if a (possibly nested) feature requires embedding data into storage.
|
1545 |
+
|
1546 |
+
Args:
|
1547 |
+
feature (FeatureType): the feature type to be checked
|
1548 |
+
Returns:
|
1549 |
+
:obj:`bool`
|
1550 |
+
"""
|
1551 |
+
if isinstance(feature, dict):
|
1552 |
+
return any(require_storage_cast(f) for f in feature.values())
|
1553 |
+
elif isinstance(feature, (list, tuple)):
|
1554 |
+
return require_storage_cast(feature[0])
|
1555 |
+
elif isinstance(feature, Sequence):
|
1556 |
+
return require_storage_cast(feature.feature)
|
1557 |
+
else:
|
1558 |
+
return hasattr(feature, "embed_storage")
|
1559 |
+
|
1560 |
+
|
1561 |
+
def keep_features_dicts_synced(func):
|
1562 |
+
"""
|
1563 |
+
Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object
|
1564 |
+
in sync with the main dictionary.
|
1565 |
+
"""
|
1566 |
+
|
1567 |
+
@wraps(func)
|
1568 |
+
def wrapper(*args, **kwargs):
|
1569 |
+
if args:
|
1570 |
+
self: "Features" = args[0]
|
1571 |
+
args = args[1:]
|
1572 |
+
else:
|
1573 |
+
self: "Features" = kwargs.pop("self")
|
1574 |
+
out = func(self, *args, **kwargs)
|
1575 |
+
assert hasattr(self, "_column_requires_decoding")
|
1576 |
+
self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()}
|
1577 |
+
return out
|
1578 |
+
|
1579 |
+
wrapper._decorator_name_ = "_keep_dicts_synced"
|
1580 |
+
return wrapper
|
1581 |
+
|
1582 |
+
|
1583 |
+
class Features(dict):
|
1584 |
+
"""A special dictionary that defines the internal structure of a dataset.
|
1585 |
+
|
1586 |
+
Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names,
|
1587 |
+
and values are the type of that column.
|
1588 |
+
|
1589 |
+
`FieldType` can be one of the following:
|
1590 |
+
- a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`.
|
1591 |
+
- a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels
|
1592 |
+
associated to them and will be stored as integers in the dataset.
|
1593 |
+
- a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields
|
1594 |
+
features. It's possible to have nested fields of nested fields in an arbitrary manner.
|
1595 |
+
- a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python
|
1596 |
+
`list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature
|
1597 |
+
type hosted in this list.
|
1598 |
+
|
1599 |
+
<Tip>
|
1600 |
+
|
1601 |
+
A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of
|
1602 |
+
lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be
|
1603 |
+
un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the
|
1604 |
+
[`~datasets.Sequence`].
|
1605 |
+
|
1606 |
+
</Tip>
|
1607 |
+
|
1608 |
+
- a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays.
|
1609 |
+
- an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path
|
1610 |
+
to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data.
|
1611 |
+
- an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object
|
1612 |
+
or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data.
|
1613 |
+
- [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation.
|
1614 |
+
"""
|
1615 |
+
|
1616 |
+
def __init__(*args, **kwargs):
|
1617 |
+
# self not in the signature to allow passing self as a kwarg
|
1618 |
+
if not args:
|
1619 |
+
raise TypeError("descriptor '__init__' of 'Features' object needs an argument")
|
1620 |
+
self, *args = args
|
1621 |
+
super(Features, self).__init__(*args, **kwargs)
|
1622 |
+
self._column_requires_decoding: Dict[str, bool] = {
|
1623 |
+
col: require_decoding(feature) for col, feature in self.items()
|
1624 |
+
}
|
1625 |
+
|
1626 |
+
__setitem__ = keep_features_dicts_synced(dict.__setitem__)
|
1627 |
+
__delitem__ = keep_features_dicts_synced(dict.__delitem__)
|
1628 |
+
update = keep_features_dicts_synced(dict.update)
|
1629 |
+
setdefault = keep_features_dicts_synced(dict.setdefault)
|
1630 |
+
pop = keep_features_dicts_synced(dict.pop)
|
1631 |
+
popitem = keep_features_dicts_synced(dict.popitem)
|
1632 |
+
clear = keep_features_dicts_synced(dict.clear)
|
1633 |
+
|
1634 |
+
def __reduce__(self):
|
1635 |
+
return Features, (dict(self),)
|
1636 |
+
|
1637 |
+
@property
|
1638 |
+
def type(self):
|
1639 |
+
"""
|
1640 |
+
Features field types.
|
1641 |
+
|
1642 |
+
Returns:
|
1643 |
+
:obj:`pyarrow.DataType`
|
1644 |
+
"""
|
1645 |
+
return get_nested_type(self)
|
1646 |
+
|
1647 |
+
@property
|
1648 |
+
def arrow_schema(self):
|
1649 |
+
"""
|
1650 |
+
Features schema.
|
1651 |
+
|
1652 |
+
Returns:
|
1653 |
+
:obj:`pyarrow.Schema`
|
1654 |
+
"""
|
1655 |
+
hf_metadata = {"info": {"features": self.to_dict()}}
|
1656 |
+
return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)})
|
1657 |
+
|
1658 |
+
@classmethod
|
1659 |
+
def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features":
|
1660 |
+
"""
|
1661 |
+
Construct [`Features`] from Arrow Schema.
|
1662 |
+
It also checks the schema metadata for Hugging Face Datasets features.
|
1663 |
+
Non-nullable fields are not supported and set to nullable.
|
1664 |
+
|
1665 |
+
Args:
|
1666 |
+
pa_schema (`pyarrow.Schema`):
|
1667 |
+
Arrow Schema.
|
1668 |
+
|
1669 |
+
Returns:
|
1670 |
+
[`Features`]
|
1671 |
+
"""
|
1672 |
+
# try to load features from the arrow schema metadata
|
1673 |
+
metadata_features = Features()
|
1674 |
+
if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata:
|
1675 |
+
metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode())
|
1676 |
+
if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None:
|
1677 |
+
metadata_features = Features.from_dict(metadata["info"]["features"])
|
1678 |
+
metadata_features_schema = metadata_features.arrow_schema
|
1679 |
+
obj = {
|
1680 |
+
field.name: (
|
1681 |
+
metadata_features[field.name]
|
1682 |
+
if field.name in metadata_features and metadata_features_schema.field(field.name) == field
|
1683 |
+
else generate_from_arrow_type(field.type)
|
1684 |
+
)
|
1685 |
+
for field in pa_schema
|
1686 |
+
}
|
1687 |
+
return cls(**obj)
|
1688 |
+
|
1689 |
+
@classmethod
|
1690 |
+
def from_dict(cls, dic) -> "Features":
|
1691 |
+
"""
|
1692 |
+
Construct [`Features`] from dict.
|
1693 |
+
|
1694 |
+
Regenerate the nested feature object from a deserialized dict.
|
1695 |
+
We use the `_type` key to infer the dataclass name of the feature `FieldType`.
|
1696 |
+
|
1697 |
+
It allows for a convenient constructor syntax
|
1698 |
+
to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
|
1699 |
+
a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to
|
1700 |
+
[`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require
|
1701 |
+
any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive
|
1702 |
+
dtypes that [`Value`] automatically performs.
|
1703 |
+
|
1704 |
+
Args:
|
1705 |
+
dic (`dict[str, Any]`):
|
1706 |
+
Python dictionary.
|
1707 |
+
|
1708 |
+
Returns:
|
1709 |
+
`Features`
|
1710 |
+
|
1711 |
+
Example::
|
1712 |
+
>>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}})
|
1713 |
+
{'_type': Value(dtype='string', id=None)}
|
1714 |
+
"""
|
1715 |
+
obj = generate_from_dict(dic)
|
1716 |
+
return cls(**obj)
|
1717 |
+
|
1718 |
+
def to_dict(self):
|
1719 |
+
return asdict(self)
|
1720 |
+
|
1721 |
+
def _to_yaml_list(self) -> list:
|
1722 |
+
# we compute the YAML list from the dict representation that is used for JSON dump
|
1723 |
+
yaml_data = self.to_dict()
|
1724 |
+
|
1725 |
+
def simplify(feature: dict) -> dict:
|
1726 |
+
if not isinstance(feature, dict):
|
1727 |
+
raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
|
1728 |
+
|
1729 |
+
#
|
1730 |
+
# sequence: -> sequence: int32
|
1731 |
+
# dtype: int32 ->
|
1732 |
+
#
|
1733 |
+
if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]:
|
1734 |
+
feature["sequence"] = feature["sequence"]["dtype"]
|
1735 |
+
|
1736 |
+
#
|
1737 |
+
# sequence: -> sequence:
|
1738 |
+
# struct: -> - name: foo
|
1739 |
+
# - name: foo -> dtype: int32
|
1740 |
+
# dtype: int32 ->
|
1741 |
+
#
|
1742 |
+
if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]:
|
1743 |
+
feature["sequence"] = feature["sequence"]["struct"]
|
1744 |
+
|
1745 |
+
#
|
1746 |
+
# list: -> list: int32
|
1747 |
+
# dtype: int32 ->
|
1748 |
+
#
|
1749 |
+
if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]:
|
1750 |
+
feature["list"] = feature["list"]["dtype"]
|
1751 |
+
|
1752 |
+
#
|
1753 |
+
# list: -> list:
|
1754 |
+
# struct: -> - name: foo
|
1755 |
+
# - name: foo -> dtype: int32
|
1756 |
+
# dtype: int32 ->
|
1757 |
+
#
|
1758 |
+
if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]:
|
1759 |
+
feature["list"] = feature["list"]["struct"]
|
1760 |
+
|
1761 |
+
#
|
1762 |
+
# class_label: -> class_label:
|
1763 |
+
# names: -> names:
|
1764 |
+
# - negative -> '0': negative
|
1765 |
+
# - positive -> '1': positive
|
1766 |
+
#
|
1767 |
+
if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list):
|
1768 |
+
# server-side requirement: keys must be strings
|
1769 |
+
feature["class_label"]["names"] = {
|
1770 |
+
str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"])
|
1771 |
+
}
|
1772 |
+
return feature
|
1773 |
+
|
1774 |
+
def to_yaml_inner(obj: Union[dict, list]) -> dict:
|
1775 |
+
if isinstance(obj, dict):
|
1776 |
+
_type = obj.pop("_type", None)
|
1777 |
+
if _type == "Sequence":
|
1778 |
+
_feature = obj.pop("feature")
|
1779 |
+
return simplify({"sequence": to_yaml_inner(_feature), **obj})
|
1780 |
+
elif _type == "Value":
|
1781 |
+
return obj
|
1782 |
+
elif _type and not obj:
|
1783 |
+
return {"dtype": camelcase_to_snakecase(_type)}
|
1784 |
+
elif _type:
|
1785 |
+
return {"dtype": simplify({camelcase_to_snakecase(_type): obj})}
|
1786 |
+
else:
|
1787 |
+
return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]}
|
1788 |
+
elif isinstance(obj, list):
|
1789 |
+
return simplify({"list": simplify(to_yaml_inner(obj[0]))})
|
1790 |
+
elif isinstance(obj, tuple):
|
1791 |
+
return to_yaml_inner(list(obj))
|
1792 |
+
else:
|
1793 |
+
raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
|
1794 |
+
|
1795 |
+
def to_yaml_types(obj: dict) -> dict:
|
1796 |
+
if isinstance(obj, dict):
|
1797 |
+
return {k: to_yaml_types(v) for k, v in obj.items()}
|
1798 |
+
elif isinstance(obj, list):
|
1799 |
+
return [to_yaml_types(v) for v in obj]
|
1800 |
+
elif isinstance(obj, tuple):
|
1801 |
+
return to_yaml_types(list(obj))
|
1802 |
+
else:
|
1803 |
+
return obj
|
1804 |
+
|
1805 |
+
return to_yaml_types(to_yaml_inner(yaml_data)["struct"])
|
1806 |
+
|
1807 |
+
@classmethod
|
1808 |
+
def _from_yaml_list(cls, yaml_data: list) -> "Features":
|
1809 |
+
yaml_data = copy.deepcopy(yaml_data)
|
1810 |
+
|
1811 |
+
# we convert the list obtained from YAML data into the dict representation that is used for JSON dump
|
1812 |
+
|
1813 |
+
def unsimplify(feature: dict) -> dict:
|
1814 |
+
if not isinstance(feature, dict):
|
1815 |
+
raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
|
1816 |
+
#
|
1817 |
+
# sequence: int32 -> sequence:
|
1818 |
+
# -> dtype: int32
|
1819 |
+
#
|
1820 |
+
if isinstance(feature.get("sequence"), str):
|
1821 |
+
feature["sequence"] = {"dtype": feature["sequence"]}
|
1822 |
+
#
|
1823 |
+
# list: int32 -> list:
|
1824 |
+
# -> dtype: int32
|
1825 |
+
#
|
1826 |
+
if isinstance(feature.get("list"), str):
|
1827 |
+
feature["list"] = {"dtype": feature["list"]}
|
1828 |
+
|
1829 |
+
#
|
1830 |
+
# class_label: -> class_label:
|
1831 |
+
# names: -> names:
|
1832 |
+
# '0': negative -> - negative
|
1833 |
+
# '1': positive -> - positive
|
1834 |
+
#
|
1835 |
+
if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict):
|
1836 |
+
label_ids = sorted(feature["class_label"]["names"], key=int)
|
1837 |
+
if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)):
|
1838 |
+
raise ValueError(
|
1839 |
+
f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing."
|
1840 |
+
)
|
1841 |
+
feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids]
|
1842 |
+
return feature
|
1843 |
+
|
1844 |
+
def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]:
|
1845 |
+
if isinstance(obj, dict):
|
1846 |
+
if not obj:
|
1847 |
+
return {}
|
1848 |
+
_type = next(iter(obj))
|
1849 |
+
if _type == "sequence":
|
1850 |
+
_feature = unsimplify(obj).pop(_type)
|
1851 |
+
return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"}
|
1852 |
+
if _type == "list":
|
1853 |
+
return [from_yaml_inner(unsimplify(obj)[_type])]
|
1854 |
+
if _type == "struct":
|
1855 |
+
return from_yaml_inner(obj["struct"])
|
1856 |
+
elif _type == "dtype":
|
1857 |
+
if isinstance(obj["dtype"], str):
|
1858 |
+
# e.g. int32, float64, string, audio, image
|
1859 |
+
try:
|
1860 |
+
Value(obj["dtype"])
|
1861 |
+
return {**obj, "_type": "Value"}
|
1862 |
+
except ValueError:
|
1863 |
+
# e.g. Audio, Image, ArrayXD
|
1864 |
+
return {"_type": snakecase_to_camelcase(obj["dtype"])}
|
1865 |
+
else:
|
1866 |
+
return from_yaml_inner(obj["dtype"])
|
1867 |
+
else:
|
1868 |
+
return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]}
|
1869 |
+
elif isinstance(obj, list):
|
1870 |
+
names = [_feature.pop("name") for _feature in obj]
|
1871 |
+
return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)}
|
1872 |
+
else:
|
1873 |
+
raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
|
1874 |
+
|
1875 |
+
return cls.from_dict(from_yaml_inner(yaml_data))
|
1876 |
+
|
1877 |
+
def encode_example(self, example):
|
1878 |
+
"""
|
1879 |
+
Encode example into a format for Arrow.
|
1880 |
+
|
1881 |
+
Args:
|
1882 |
+
example (`dict[str, Any]`):
|
1883 |
+
Data in a Dataset row.
|
1884 |
+
|
1885 |
+
Returns:
|
1886 |
+
`dict[str, Any]`
|
1887 |
+
"""
|
1888 |
+
example = cast_to_python_objects(example)
|
1889 |
+
return encode_nested_example(self, example)
|
1890 |
+
|
1891 |
+
def encode_column(self, column, column_name: str):
|
1892 |
+
"""
|
1893 |
+
Encode column into a format for Arrow.
|
1894 |
+
|
1895 |
+
Args:
|
1896 |
+
column (`list[Any]`):
|
1897 |
+
Data in a Dataset column.
|
1898 |
+
column_name (`str`):
|
1899 |
+
Dataset column name.
|
1900 |
+
|
1901 |
+
Returns:
|
1902 |
+
`list[Any]`
|
1903 |
+
"""
|
1904 |
+
column = cast_to_python_objects(column)
|
1905 |
+
return [encode_nested_example(self[column_name], obj) for obj in column]
|
1906 |
+
|
1907 |
+
def encode_batch(self, batch):
|
1908 |
+
"""
|
1909 |
+
Encode batch into a format for Arrow.
|
1910 |
+
|
1911 |
+
Args:
|
1912 |
+
batch (`dict[str, list[Any]]`):
|
1913 |
+
Data in a Dataset batch.
|
1914 |
+
|
1915 |
+
Returns:
|
1916 |
+
`dict[str, list[Any]]`
|
1917 |
+
"""
|
1918 |
+
encoded_batch = {}
|
1919 |
+
if set(batch) != set(self):
|
1920 |
+
raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}")
|
1921 |
+
for key, column in batch.items():
|
1922 |
+
column = cast_to_python_objects(column)
|
1923 |
+
encoded_batch[key] = [encode_nested_example(self[key], obj) for obj in column]
|
1924 |
+
return encoded_batch
|
1925 |
+
|
1926 |
+
def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
|
1927 |
+
"""Decode example with custom feature decoding.
|
1928 |
+
|
1929 |
+
Args:
|
1930 |
+
example (`dict[str, Any]`):
|
1931 |
+
Dataset row data.
|
1932 |
+
token_per_repo_id (`dict`, *optional*):
|
1933 |
+
To access and decode audio or image files from private repositories on the Hub, you can pass
|
1934 |
+
a dictionary `repo_id (str) -> token (bool or str)`.
|
1935 |
+
|
1936 |
+
Returns:
|
1937 |
+
`dict[str, Any]`
|
1938 |
+
"""
|
1939 |
+
|
1940 |
+
return {
|
1941 |
+
column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id)
|
1942 |
+
if self._column_requires_decoding[column_name]
|
1943 |
+
else value
|
1944 |
+
for column_name, (feature, value) in zip_dict(
|
1945 |
+
{key: value for key, value in self.items() if key in example}, example
|
1946 |
+
)
|
1947 |
+
}
|
1948 |
+
|
1949 |
+
def decode_column(self, column: list, column_name: str):
|
1950 |
+
"""Decode column with custom feature decoding.
|
1951 |
+
|
1952 |
+
Args:
|
1953 |
+
column (`list[Any]`):
|
1954 |
+
Dataset column data.
|
1955 |
+
column_name (`str`):
|
1956 |
+
Dataset column name.
|
1957 |
+
|
1958 |
+
Returns:
|
1959 |
+
`list[Any]`
|
1960 |
+
"""
|
1961 |
+
return (
|
1962 |
+
[decode_nested_example(self[column_name], value) if value is not None else None for value in column]
|
1963 |
+
if self._column_requires_decoding[column_name]
|
1964 |
+
else column
|
1965 |
+
)
|
1966 |
+
|
1967 |
+
def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
|
1968 |
+
"""Decode batch with custom feature decoding.
|
1969 |
+
|
1970 |
+
Args:
|
1971 |
+
batch (`dict[str, list[Any]]`):
|
1972 |
+
Dataset batch data.
|
1973 |
+
token_per_repo_id (`dict`, *optional*):
|
1974 |
+
To access and decode audio or image files from private repositories on the Hub, you can pass
|
1975 |
+
a dictionary repo_id (str) -> token (bool or str)
|
1976 |
+
|
1977 |
+
Returns:
|
1978 |
+
`dict[str, list[Any]]`
|
1979 |
+
"""
|
1980 |
+
decoded_batch = {}
|
1981 |
+
for column_name, column in batch.items():
|
1982 |
+
decoded_batch[column_name] = (
|
1983 |
+
[
|
1984 |
+
decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id)
|
1985 |
+
if value is not None
|
1986 |
+
else None
|
1987 |
+
for value in column
|
1988 |
+
]
|
1989 |
+
if self._column_requires_decoding[column_name]
|
1990 |
+
else column
|
1991 |
+
)
|
1992 |
+
return decoded_batch
|
1993 |
+
|
1994 |
+
def copy(self) -> "Features":
|
1995 |
+
"""
|
1996 |
+
Make a deep copy of [`Features`].
|
1997 |
+
|
1998 |
+
Returns:
|
1999 |
+
[`Features`]
|
2000 |
+
|
2001 |
+
Example:
|
2002 |
+
|
2003 |
+
```py
|
2004 |
+
>>> from datasets import load_dataset
|
2005 |
+
>>> ds = load_dataset("rotten_tomatoes", split="train")
|
2006 |
+
>>> copy_of_features = ds.features.copy()
|
2007 |
+
>>> copy_of_features
|
2008 |
+
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
|
2009 |
+
'text': Value(dtype='string', id=None)}
|
2010 |
+
```
|
2011 |
+
"""
|
2012 |
+
return copy.deepcopy(self)
|
2013 |
+
|
2014 |
+
def reorder_fields_as(self, other: "Features") -> "Features":
|
2015 |
+
"""
|
2016 |
+
Reorder Features fields to match the field order of other [`Features`].
|
2017 |
+
|
2018 |
+
The order of the fields is important since it matters for the underlying arrow data.
|
2019 |
+
Re-ordering the fields allows to make the underlying arrow data type match.
|
2020 |
+
|
2021 |
+
Args:
|
2022 |
+
other ([`Features`]):
|
2023 |
+
The other [`Features`] to align with.
|
2024 |
+
|
2025 |
+
Returns:
|
2026 |
+
[`Features`]
|
2027 |
+
|
2028 |
+
Example::
|
2029 |
+
|
2030 |
+
>>> from datasets import Features, Sequence, Value
|
2031 |
+
>>> # let's say we have to features with a different order of nested fields (for a and b for example)
|
2032 |
+
>>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})})
|
2033 |
+
>>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}})
|
2034 |
+
>>> assert f1.type != f2.type
|
2035 |
+
>>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match
|
2036 |
+
>>> f1.reorder_fields_as(f2)
|
2037 |
+
{'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)}
|
2038 |
+
>>> assert f1.reorder_fields_as(f2).type == f2.type
|
2039 |
+
"""
|
2040 |
+
|
2041 |
+
def recursive_reorder(source, target, stack=""):
|
2042 |
+
stack_position = " at " + stack[1:] if stack else ""
|
2043 |
+
if isinstance(target, Sequence):
|
2044 |
+
target = target.feature
|
2045 |
+
if isinstance(target, dict):
|
2046 |
+
target = {k: [v] for k, v in target.items()}
|
2047 |
+
else:
|
2048 |
+
target = [target]
|
2049 |
+
if isinstance(source, Sequence):
|
2050 |
+
source, id_, length = source.feature, source.id, source.length
|
2051 |
+
if isinstance(source, dict):
|
2052 |
+
source = {k: [v] for k, v in source.items()}
|
2053 |
+
reordered = recursive_reorder(source, target, stack)
|
2054 |
+
return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length)
|
2055 |
+
else:
|
2056 |
+
source = [source]
|
2057 |
+
reordered = recursive_reorder(source, target, stack)
|
2058 |
+
return Sequence(reordered[0], id=id_, length=length)
|
2059 |
+
elif isinstance(source, dict):
|
2060 |
+
if not isinstance(target, dict):
|
2061 |
+
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
|
2062 |
+
if sorted(source) != sorted(target):
|
2063 |
+
message = (
|
2064 |
+
f"Keys mismatch: between {source} (source) and {target} (target).\n"
|
2065 |
+
f"{source.keys()-target.keys()} are missing from target "
|
2066 |
+
f"and {target.keys()-source.keys()} are missing from source" + stack_position
|
2067 |
+
)
|
2068 |
+
raise ValueError(message)
|
2069 |
+
return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target}
|
2070 |
+
elif isinstance(source, list):
|
2071 |
+
if not isinstance(target, list):
|
2072 |
+
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
|
2073 |
+
if len(source) != len(target):
|
2074 |
+
raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position)
|
2075 |
+
return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))]
|
2076 |
+
else:
|
2077 |
+
return source
|
2078 |
+
|
2079 |
+
return Features(recursive_reorder(self, other))
|
2080 |
+
|
2081 |
+
def flatten(self, max_depth=16) -> "Features":
|
2082 |
+
"""Flatten the features. Every dictionary column is removed and is replaced by
|
2083 |
+
all the subfields it contains. The new fields are named by concatenating the
|
2084 |
+
name of the original column and the subfield name like this: `<original>.<subfield>`.
|
2085 |
+
|
2086 |
+
If a column contains nested dictionaries, then all the lower-level subfields names are
|
2087 |
+
also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc.
|
2088 |
+
|
2089 |
+
Returns:
|
2090 |
+
[`Features`]:
|
2091 |
+
The flattened features.
|
2092 |
+
|
2093 |
+
Example:
|
2094 |
+
|
2095 |
+
```py
|
2096 |
+
>>> from datasets import load_dataset
|
2097 |
+
>>> ds = load_dataset("squad", split="train")
|
2098 |
+
>>> ds.features.flatten()
|
2099 |
+
{'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None),
|
2100 |
+
'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),
|
2101 |
+
'context': Value(dtype='string', id=None),
|
2102 |
+
'id': Value(dtype='string', id=None),
|
2103 |
+
'question': Value(dtype='string', id=None),
|
2104 |
+
'title': Value(dtype='string', id=None)}
|
2105 |
+
```
|
2106 |
+
"""
|
2107 |
+
for depth in range(1, max_depth):
|
2108 |
+
no_change = True
|
2109 |
+
flattened = self.copy()
|
2110 |
+
for column_name, subfeature in self.items():
|
2111 |
+
if isinstance(subfeature, dict):
|
2112 |
+
no_change = False
|
2113 |
+
flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()})
|
2114 |
+
del flattened[column_name]
|
2115 |
+
elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict):
|
2116 |
+
no_change = False
|
2117 |
+
flattened.update(
|
2118 |
+
{
|
2119 |
+
f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v]
|
2120 |
+
for k, v in subfeature.feature.items()
|
2121 |
+
}
|
2122 |
+
)
|
2123 |
+
del flattened[column_name]
|
2124 |
+
elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature:
|
2125 |
+
no_change = False
|
2126 |
+
flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()})
|
2127 |
+
del flattened[column_name]
|
2128 |
+
self = flattened
|
2129 |
+
if no_change:
|
2130 |
+
break
|
2131 |
+
return self
|
2132 |
+
|
2133 |
+
|
2134 |
+
def _align_features(features_list: List[Features]) -> List[Features]:
|
2135 |
+
"""Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature."""
|
2136 |
+
name2feature = {}
|
2137 |
+
for features in features_list:
|
2138 |
+
for k, v in features.items():
|
2139 |
+
if k in name2feature and isinstance(v, dict):
|
2140 |
+
# Recursively align features.
|
2141 |
+
name2feature[k] = _align_features([name2feature[k], v])[0]
|
2142 |
+
elif k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
|
2143 |
+
name2feature[k] = v
|
2144 |
+
|
2145 |
+
return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list]
|
2146 |
+
|
2147 |
+
|
2148 |
+
def _check_if_features_can_be_aligned(features_list: List[Features]):
|
2149 |
+
"""Check if the dictionaries of features can be aligned.
|
2150 |
+
|
2151 |
+
Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`.
|
2152 |
+
"""
|
2153 |
+
name2feature = {}
|
2154 |
+
for features in features_list:
|
2155 |
+
for k, v in features.items():
|
2156 |
+
if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
|
2157 |
+
name2feature[k] = v
|
2158 |
+
|
2159 |
+
for features in features_list:
|
2160 |
+
for k, v in features.items():
|
2161 |
+
if isinstance(v, dict) and isinstance(name2feature[k], dict):
|
2162 |
+
# Deep checks for structure.
|
2163 |
+
_check_if_features_can_be_aligned([name2feature[k], v])
|
2164 |
+
elif not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v:
|
2165 |
+
raise ValueError(
|
2166 |
+
f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").'
|
2167 |
+
)
|
env-llmeval/lib/python3.10/site-packages/datasets/features/image.py
ADDED
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import warnings
|
4 |
+
from dataclasses import dataclass, field
|
5 |
+
from io import BytesIO
|
6 |
+
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
import pyarrow as pa
|
10 |
+
|
11 |
+
from .. import config
|
12 |
+
from ..download.download_config import DownloadConfig
|
13 |
+
from ..download.streaming_download_manager import xopen
|
14 |
+
from ..table import array_cast
|
15 |
+
from ..utils.file_utils import is_local_path
|
16 |
+
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
|
17 |
+
|
18 |
+
|
19 |
+
if TYPE_CHECKING:
|
20 |
+
import PIL.Image
|
21 |
+
|
22 |
+
from .features import FeatureType
|
23 |
+
|
24 |
+
|
25 |
+
_IMAGE_COMPRESSION_FORMATS: Optional[List[str]] = None
|
26 |
+
_NATIVE_BYTEORDER = "<" if sys.byteorder == "little" else ">"
|
27 |
+
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
|
28 |
+
_VALID_IMAGE_ARRAY_DTPYES = [
|
29 |
+
np.dtype("|b1"),
|
30 |
+
np.dtype("|u1"),
|
31 |
+
np.dtype("<u2"),
|
32 |
+
np.dtype(">u2"),
|
33 |
+
np.dtype("<i2"),
|
34 |
+
np.dtype(">i2"),
|
35 |
+
np.dtype("<u4"),
|
36 |
+
np.dtype(">u4"),
|
37 |
+
np.dtype("<i4"),
|
38 |
+
np.dtype(">i4"),
|
39 |
+
np.dtype("<f4"),
|
40 |
+
np.dtype(">f4"),
|
41 |
+
np.dtype("<f8"),
|
42 |
+
np.dtype(">f8"),
|
43 |
+
]
|
44 |
+
|
45 |
+
|
46 |
+
@dataclass
|
47 |
+
class Image:
|
48 |
+
"""Image [`Feature`] to read image data from an image file.
|
49 |
+
|
50 |
+
Input: The Image feature accepts as input:
|
51 |
+
- A `str`: Absolute path to the image file (i.e. random access is allowed).
|
52 |
+
- A `dict` with the keys:
|
53 |
+
|
54 |
+
- `path`: String with relative path of the image file to the archive file.
|
55 |
+
- `bytes`: Bytes of the image file.
|
56 |
+
|
57 |
+
This is useful for archived files with sequential access.
|
58 |
+
|
59 |
+
- An `np.ndarray`: NumPy array representing an image.
|
60 |
+
- A `PIL.Image.Image`: PIL image object.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
decode (`bool`, defaults to `True`):
|
64 |
+
Whether to decode the image data. If `False`,
|
65 |
+
returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`.
|
66 |
+
|
67 |
+
Examples:
|
68 |
+
|
69 |
+
```py
|
70 |
+
>>> from datasets import load_dataset, Image
|
71 |
+
>>> ds = load_dataset("beans", split="train")
|
72 |
+
>>> ds.features["image"]
|
73 |
+
Image(decode=True, id=None)
|
74 |
+
>>> ds[0]["image"]
|
75 |
+
<PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0>
|
76 |
+
>>> ds = ds.cast_column('image', Image(decode=False))
|
77 |
+
{'bytes': None,
|
78 |
+
'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'}
|
79 |
+
```
|
80 |
+
"""
|
81 |
+
|
82 |
+
decode: bool = True
|
83 |
+
id: Optional[str] = None
|
84 |
+
# Automatically constructed
|
85 |
+
dtype: ClassVar[str] = "PIL.Image.Image"
|
86 |
+
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
|
87 |
+
_type: str = field(default="Image", init=False, repr=False)
|
88 |
+
|
89 |
+
def __call__(self):
|
90 |
+
return self.pa_type
|
91 |
+
|
92 |
+
def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
|
93 |
+
"""Encode example into a format for Arrow.
|
94 |
+
|
95 |
+
Args:
|
96 |
+
value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`):
|
97 |
+
Data passed as input to Image feature.
|
98 |
+
|
99 |
+
Returns:
|
100 |
+
`dict` with "path" and "bytes" fields
|
101 |
+
"""
|
102 |
+
if config.PIL_AVAILABLE:
|
103 |
+
import PIL.Image
|
104 |
+
else:
|
105 |
+
raise ImportError("To support encoding images, please install 'Pillow'.")
|
106 |
+
|
107 |
+
if isinstance(value, list):
|
108 |
+
value = np.array(value)
|
109 |
+
|
110 |
+
if isinstance(value, str):
|
111 |
+
return {"path": value, "bytes": None}
|
112 |
+
elif isinstance(value, bytes):
|
113 |
+
return {"path": None, "bytes": value}
|
114 |
+
elif isinstance(value, np.ndarray):
|
115 |
+
# convert the image array to PNG/TIFF bytes
|
116 |
+
return encode_np_array(value)
|
117 |
+
elif isinstance(value, PIL.Image.Image):
|
118 |
+
# convert the PIL image to bytes (default format is PNG/TIFF)
|
119 |
+
return encode_pil_image(value)
|
120 |
+
elif value.get("path") is not None and os.path.isfile(value["path"]):
|
121 |
+
# we set "bytes": None to not duplicate the data if they're already available locally
|
122 |
+
return {"bytes": None, "path": value.get("path")}
|
123 |
+
elif value.get("bytes") is not None or value.get("path") is not None:
|
124 |
+
# store the image bytes, and path is used to infer the image format using the file extension
|
125 |
+
return {"bytes": value.get("bytes"), "path": value.get("path")}
|
126 |
+
else:
|
127 |
+
raise ValueError(
|
128 |
+
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
|
129 |
+
)
|
130 |
+
|
131 |
+
def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image":
|
132 |
+
"""Decode example image file into image data.
|
133 |
+
|
134 |
+
Args:
|
135 |
+
value (`str` or `dict`):
|
136 |
+
A string with the absolute image file path, a dictionary with
|
137 |
+
keys:
|
138 |
+
|
139 |
+
- `path`: String with absolute or relative image file path.
|
140 |
+
- `bytes`: The bytes of the image file.
|
141 |
+
token_per_repo_id (`dict`, *optional*):
|
142 |
+
To access and decode
|
143 |
+
image files from private repositories on the Hub, you can pass
|
144 |
+
a dictionary repo_id (`str`) -> token (`bool` or `str`).
|
145 |
+
|
146 |
+
Returns:
|
147 |
+
`PIL.Image.Image`
|
148 |
+
"""
|
149 |
+
if not self.decode:
|
150 |
+
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
|
151 |
+
|
152 |
+
if config.PIL_AVAILABLE:
|
153 |
+
import PIL.Image
|
154 |
+
else:
|
155 |
+
raise ImportError("To support decoding images, please install 'Pillow'.")
|
156 |
+
|
157 |
+
if token_per_repo_id is None:
|
158 |
+
token_per_repo_id = {}
|
159 |
+
|
160 |
+
path, bytes_ = value["path"], value["bytes"]
|
161 |
+
if bytes_ is None:
|
162 |
+
if path is None:
|
163 |
+
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.")
|
164 |
+
else:
|
165 |
+
if is_local_path(path):
|
166 |
+
image = PIL.Image.open(path)
|
167 |
+
else:
|
168 |
+
source_url = path.split("::")[-1]
|
169 |
+
pattern = (
|
170 |
+
config.HUB_DATASETS_URL
|
171 |
+
if source_url.startswith(config.HF_ENDPOINT)
|
172 |
+
else config.HUB_DATASETS_HFFS_URL
|
173 |
+
)
|
174 |
+
try:
|
175 |
+
repo_id = string_to_dict(source_url, pattern)["repo_id"]
|
176 |
+
token = token_per_repo_id.get(repo_id)
|
177 |
+
except ValueError:
|
178 |
+
token = None
|
179 |
+
download_config = DownloadConfig(token=token)
|
180 |
+
with xopen(path, "rb", download_config=download_config) as f:
|
181 |
+
bytes_ = BytesIO(f.read())
|
182 |
+
image = PIL.Image.open(bytes_)
|
183 |
+
else:
|
184 |
+
image = PIL.Image.open(BytesIO(bytes_))
|
185 |
+
image.load() # to avoid "Too many open files" errors
|
186 |
+
return image
|
187 |
+
|
188 |
+
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
|
189 |
+
"""If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary."""
|
190 |
+
from .features import Value
|
191 |
+
|
192 |
+
return (
|
193 |
+
self
|
194 |
+
if self.decode
|
195 |
+
else {
|
196 |
+
"bytes": Value("binary"),
|
197 |
+
"path": Value("string"),
|
198 |
+
}
|
199 |
+
)
|
200 |
+
|
201 |
+
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
|
202 |
+
"""Cast an Arrow array to the Image arrow storage type.
|
203 |
+
The Arrow types that can be converted to the Image pyarrow storage type are:
|
204 |
+
|
205 |
+
- `pa.string()` - it must contain the "path" data
|
206 |
+
- `pa.binary()` - it must contain the image bytes
|
207 |
+
- `pa.struct({"bytes": pa.binary()})`
|
208 |
+
- `pa.struct({"path": pa.string()})`
|
209 |
+
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
|
210 |
+
- `pa.list(*)` - it must contain the image array data
|
211 |
+
|
212 |
+
Args:
|
213 |
+
storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`):
|
214 |
+
PyArrow array to cast.
|
215 |
+
|
216 |
+
Returns:
|
217 |
+
`pa.StructArray`: Array in the Image arrow storage type, that is
|
218 |
+
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
|
219 |
+
"""
|
220 |
+
if pa.types.is_string(storage.type):
|
221 |
+
bytes_array = pa.array([None] * len(storage), type=pa.binary())
|
222 |
+
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
|
223 |
+
elif pa.types.is_binary(storage.type):
|
224 |
+
path_array = pa.array([None] * len(storage), type=pa.string())
|
225 |
+
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
|
226 |
+
elif pa.types.is_struct(storage.type):
|
227 |
+
if storage.type.get_field_index("bytes") >= 0:
|
228 |
+
bytes_array = storage.field("bytes")
|
229 |
+
else:
|
230 |
+
bytes_array = pa.array([None] * len(storage), type=pa.binary())
|
231 |
+
if storage.type.get_field_index("path") >= 0:
|
232 |
+
path_array = storage.field("path")
|
233 |
+
else:
|
234 |
+
path_array = pa.array([None] * len(storage), type=pa.string())
|
235 |
+
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
|
236 |
+
elif pa.types.is_list(storage.type):
|
237 |
+
bytes_array = pa.array(
|
238 |
+
[encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()],
|
239 |
+
type=pa.binary(),
|
240 |
+
)
|
241 |
+
path_array = pa.array([None] * len(storage), type=pa.string())
|
242 |
+
storage = pa.StructArray.from_arrays(
|
243 |
+
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
|
244 |
+
)
|
245 |
+
return array_cast(storage, self.pa_type)
|
246 |
+
|
247 |
+
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
|
248 |
+
"""Embed image files into the Arrow array.
|
249 |
+
|
250 |
+
Args:
|
251 |
+
storage (`pa.StructArray`):
|
252 |
+
PyArrow array to embed.
|
253 |
+
|
254 |
+
Returns:
|
255 |
+
`pa.StructArray`: Array in the Image arrow storage type, that is
|
256 |
+
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
|
257 |
+
"""
|
258 |
+
|
259 |
+
@no_op_if_value_is_null
|
260 |
+
def path_to_bytes(path):
|
261 |
+
with xopen(path, "rb") as f:
|
262 |
+
bytes_ = f.read()
|
263 |
+
return bytes_
|
264 |
+
|
265 |
+
bytes_array = pa.array(
|
266 |
+
[
|
267 |
+
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
|
268 |
+
for x in storage.to_pylist()
|
269 |
+
],
|
270 |
+
type=pa.binary(),
|
271 |
+
)
|
272 |
+
path_array = pa.array(
|
273 |
+
[os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
|
274 |
+
type=pa.string(),
|
275 |
+
)
|
276 |
+
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
|
277 |
+
return array_cast(storage, self.pa_type)
|
278 |
+
|
279 |
+
|
280 |
+
def list_image_compression_formats() -> List[str]:
|
281 |
+
if config.PIL_AVAILABLE:
|
282 |
+
import PIL.Image
|
283 |
+
else:
|
284 |
+
raise ImportError("To support encoding images, please install 'Pillow'.")
|
285 |
+
|
286 |
+
global _IMAGE_COMPRESSION_FORMATS
|
287 |
+
if _IMAGE_COMPRESSION_FORMATS is None:
|
288 |
+
PIL.Image.init()
|
289 |
+
_IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
|
290 |
+
return _IMAGE_COMPRESSION_FORMATS
|
291 |
+
|
292 |
+
|
293 |
+
def image_to_bytes(image: "PIL.Image.Image") -> bytes:
|
294 |
+
"""Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG/TIFF compression."""
|
295 |
+
buffer = BytesIO()
|
296 |
+
if image.format in list_image_compression_formats():
|
297 |
+
format = image.format
|
298 |
+
else:
|
299 |
+
format = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
|
300 |
+
image.save(buffer, format=format)
|
301 |
+
return buffer.getvalue()
|
302 |
+
|
303 |
+
|
304 |
+
def encode_pil_image(image: "PIL.Image.Image") -> dict:
|
305 |
+
if hasattr(image, "filename") and image.filename != "":
|
306 |
+
return {"path": image.filename, "bytes": None}
|
307 |
+
else:
|
308 |
+
return {"path": None, "bytes": image_to_bytes(image)}
|
309 |
+
|
310 |
+
|
311 |
+
def encode_np_array(array: np.ndarray) -> dict:
|
312 |
+
if config.PIL_AVAILABLE:
|
313 |
+
import PIL.Image
|
314 |
+
else:
|
315 |
+
raise ImportError("To support encoding images, please install 'Pillow'.")
|
316 |
+
|
317 |
+
dtype = array.dtype
|
318 |
+
dtype_byteorder = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
|
319 |
+
dtype_kind = dtype.kind
|
320 |
+
dtype_itemsize = dtype.itemsize
|
321 |
+
|
322 |
+
dest_dtype = None
|
323 |
+
|
324 |
+
# Multi-channel array case (only np.dtype("|u1") is allowed)
|
325 |
+
if array.shape[2:]:
|
326 |
+
if dtype_kind not in ["u", "i"]:
|
327 |
+
raise TypeError(
|
328 |
+
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays."
|
329 |
+
)
|
330 |
+
dest_dtype = np.dtype("|u1")
|
331 |
+
if dtype != dest_dtype:
|
332 |
+
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
|
333 |
+
# Exact match
|
334 |
+
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
|
335 |
+
dest_dtype = dtype
|
336 |
+
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
|
337 |
+
while dtype_itemsize >= 1:
|
338 |
+
dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize)
|
339 |
+
if np.dtype(dtype_str) in _VALID_IMAGE_ARRAY_DTPYES:
|
340 |
+
dest_dtype = np.dtype(dtype_str)
|
341 |
+
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
|
342 |
+
break
|
343 |
+
else:
|
344 |
+
dtype_itemsize //= 2
|
345 |
+
if dest_dtype is None:
|
346 |
+
raise TypeError(
|
347 |
+
f"Cannot downcast dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}"
|
348 |
+
)
|
349 |
+
|
350 |
+
image = PIL.Image.fromarray(array.astype(dest_dtype))
|
351 |
+
return {"path": None, "bytes": image_to_bytes(image)}
|
352 |
+
|
353 |
+
|
354 |
+
def objects_to_list_of_image_dicts(
|
355 |
+
objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]],
|
356 |
+
) -> List[dict]:
|
357 |
+
"""Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`."""
|
358 |
+
if config.PIL_AVAILABLE:
|
359 |
+
import PIL.Image
|
360 |
+
else:
|
361 |
+
raise ImportError("To support encoding images, please install 'Pillow'.")
|
362 |
+
|
363 |
+
if objs:
|
364 |
+
_, obj = first_non_null_value(objs)
|
365 |
+
if isinstance(obj, str):
|
366 |
+
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
|
367 |
+
if isinstance(obj, np.ndarray):
|
368 |
+
obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array)
|
369 |
+
return [obj_to_image_dict_func(obj) for obj in objs]
|
370 |
+
elif isinstance(obj, PIL.Image.Image):
|
371 |
+
obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image)
|
372 |
+
return [obj_to_image_dict_func(obj) for obj in objs]
|
373 |
+
else:
|
374 |
+
return objs
|
375 |
+
else:
|
376 |
+
return objs
|
env-llmeval/lib/python3.10/site-packages/datasets/features/translation.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass, field
|
2 |
+
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
|
3 |
+
|
4 |
+
import pyarrow as pa
|
5 |
+
|
6 |
+
|
7 |
+
if TYPE_CHECKING:
|
8 |
+
from .features import FeatureType
|
9 |
+
|
10 |
+
|
11 |
+
@dataclass
|
12 |
+
class Translation:
|
13 |
+
"""`FeatureConnector` for translations with fixed languages per example.
|
14 |
+
Here for compatiblity with tfds.
|
15 |
+
|
16 |
+
Args:
|
17 |
+
languages (`dict`):
|
18 |
+
A dictionary for each example mapping string language codes to string translations.
|
19 |
+
|
20 |
+
Example:
|
21 |
+
|
22 |
+
```python
|
23 |
+
>>> # At construction time:
|
24 |
+
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
|
25 |
+
>>> # During data generation:
|
26 |
+
>>> yield {
|
27 |
+
... 'en': 'the cat',
|
28 |
+
... 'fr': 'le chat',
|
29 |
+
... 'de': 'die katze'
|
30 |
+
... }
|
31 |
+
```
|
32 |
+
"""
|
33 |
+
|
34 |
+
languages: List[str]
|
35 |
+
id: Optional[str] = None
|
36 |
+
# Automatically constructed
|
37 |
+
dtype: ClassVar[str] = "dict"
|
38 |
+
pa_type: ClassVar[Any] = None
|
39 |
+
_type: str = field(default="Translation", init=False, repr=False)
|
40 |
+
|
41 |
+
def __call__(self):
|
42 |
+
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
|
43 |
+
|
44 |
+
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
|
45 |
+
"""Flatten the Translation feature into a dictionary."""
|
46 |
+
from .features import Value
|
47 |
+
|
48 |
+
return {k: Value("string") for k in sorted(self.languages)}
|
49 |
+
|
50 |
+
|
51 |
+
@dataclass
|
52 |
+
class TranslationVariableLanguages:
|
53 |
+
"""`FeatureConnector` for translations with variable languages per example.
|
54 |
+
Here for compatiblity with tfds.
|
55 |
+
|
56 |
+
Args:
|
57 |
+
languages (`dict`):
|
58 |
+
A dictionary for each example mapping string language codes to one or more string translations.
|
59 |
+
The languages present may vary from example to example.
|
60 |
+
|
61 |
+
Returns:
|
62 |
+
- `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
|
63 |
+
Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
|
64 |
+
|
65 |
+
Example:
|
66 |
+
|
67 |
+
```python
|
68 |
+
>>> # At construction time:
|
69 |
+
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
|
70 |
+
>>> # During data generation:
|
71 |
+
>>> yield {
|
72 |
+
... 'en': 'the cat',
|
73 |
+
... 'fr': ['le chat', 'la chatte,']
|
74 |
+
... 'de': 'die katze'
|
75 |
+
... }
|
76 |
+
>>> # Tensor returned :
|
77 |
+
>>> {
|
78 |
+
... 'language': ['en', 'de', 'fr', 'fr'],
|
79 |
+
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
|
80 |
+
... }
|
81 |
+
```
|
82 |
+
"""
|
83 |
+
|
84 |
+
languages: Optional[List] = None
|
85 |
+
num_languages: Optional[int] = None
|
86 |
+
id: Optional[str] = None
|
87 |
+
# Automatically constructed
|
88 |
+
dtype: ClassVar[str] = "dict"
|
89 |
+
pa_type: ClassVar[Any] = None
|
90 |
+
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
|
91 |
+
|
92 |
+
def __post_init__(self):
|
93 |
+
self.languages = sorted(set(self.languages)) if self.languages else None
|
94 |
+
self.num_languages = len(self.languages) if self.languages else None
|
95 |
+
|
96 |
+
def __call__(self):
|
97 |
+
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
|
98 |
+
|
99 |
+
def encode_example(self, translation_dict):
|
100 |
+
lang_set = set(self.languages)
|
101 |
+
if set(translation_dict) == {"language", "translation"}:
|
102 |
+
return translation_dict
|
103 |
+
elif self.languages and set(translation_dict) - lang_set:
|
104 |
+
raise ValueError(
|
105 |
+
f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
|
106 |
+
)
|
107 |
+
|
108 |
+
# Convert dictionary into tuples, splitting out cases where there are
|
109 |
+
# multiple translations for a single language.
|
110 |
+
translation_tuples = []
|
111 |
+
for lang, text in translation_dict.items():
|
112 |
+
if isinstance(text, str):
|
113 |
+
translation_tuples.append((lang, text))
|
114 |
+
else:
|
115 |
+
translation_tuples.extend([(lang, el) for el in text])
|
116 |
+
|
117 |
+
# Ensure translations are in ascending order by language code.
|
118 |
+
languages, translations = zip(*sorted(translation_tuples))
|
119 |
+
|
120 |
+
return {"language": languages, "translation": translations}
|
121 |
+
|
122 |
+
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
|
123 |
+
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
|
124 |
+
from .features import Sequence, Value
|
125 |
+
|
126 |
+
return {
|
127 |
+
"language": Sequence(Value("string")),
|
128 |
+
"translation": Sequence(Value("string")),
|
129 |
+
}
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (577 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/_datasets_server.cpython-310.pyc
ADDED
Binary file (3.22 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/_dill.cpython-310.pyc
ADDED
Binary file (8.43 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/_filelock.cpython-310.pyc
ADDED
Binary file (1.69 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/beam_utils.cpython-310.pyc
ADDED
Binary file (2.11 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/deprecation_utils.cpython-310.pyc
ADDED
Binary file (3.62 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/doc_utils.cpython-310.pyc
ADDED
Binary file (689 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/download_manager.cpython-310.pyc
ADDED
Binary file (187 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/experimental.cpython-310.pyc
ADDED
Binary file (1.43 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/extract.cpython-310.pyc
ADDED
Binary file (13.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/file_utils.cpython-310.pyc
ADDED
Binary file (20.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/filelock.cpython-310.pyc
ADDED
Binary file (361 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/hub.cpython-310.pyc
ADDED
Binary file (1.84 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/info_utils.cpython-310.pyc
ADDED
Binary file (5.72 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/logging.cpython-310.pyc
ADDED
Binary file (5.14 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/metadata.cpython-310.pyc
ADDED
Binary file (12.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/patching.cpython-310.pyc
ADDED
Binary file (3.49 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/py_utils.cpython-310.pyc
ADDED
Binary file (21.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/readme.cpython-310.pyc
ADDED
Binary file (8.95 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/sharding.cpython-310.pyc
ADDED
Binary file (4.53 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/stratify.cpython-310.pyc
ADDED
Binary file (3.17 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/tf_utils.cpython-310.pyc
ADDED
Binary file (17.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/tqdm.cpython-310.pyc
ADDED
Binary file (4.02 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/track.cpython-310.pyc
ADDED
Binary file (2.36 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/typing.cpython-310.pyc
ADDED
Binary file (439 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/datasets/utils/__pycache__/version.cpython-310.pyc
ADDED
Binary file (4.02 kB). View file
|
|