Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
- ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step20/zero/6.post_attention_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step20/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step20/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/aqua-rat.yaml +24 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-biology.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-chemistry.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-chinese.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-english.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-history.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-mathqa.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-physics.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/jec-qa-ca.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/jec-qa-kd.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/logiqa-en.yaml +7 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/lsat-ar.yaml +7 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/lsat-rc.yaml +7 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/sat-en-without-passage.yaml +7 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/sat-math.yaml +7 -0
- lm-evaluation-harness/lm_eval/tasks/agieval/utils.py +274 -0
- lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_as.yaml +33 -0
- lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_gu.yaml +33 -0
- lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_kn.yaml +33 -0
- lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ml.yaml +33 -0
- lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ne.yaml +33 -0
- lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_sa.yaml +33 -0
- lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_sd.yaml +33 -0
- lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ur.yaml +33 -0
- lm-evaluation-harness/lm_eval/tasks/indiccopa/utils.py +136 -0
- venv/lib/python3.10/site-packages/datasets/combine.py +215 -0
- venv/lib/python3.10/site-packages/datasets/dataset_dict.py +0 -0
- venv/lib/python3.10/site-packages/datasets/exceptions.py +85 -0
- venv/lib/python3.10/site-packages/datasets/inspect.py +582 -0
- venv/lib/python3.10/site-packages/datasets/streaming.py +142 -0
- venv/lib/python3.10/site-packages/datasets/table.py +2415 -0
- venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Bermuda +0 -0
- venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Canary +0 -0
- venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Cape_Verde +0 -0
- venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faeroe +0 -0
- venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faroe +0 -0
- venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Jan_Mayen +0 -0
- venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Reykjavik +0 -0
- venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/South_Georgia +0 -0
- venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Stanley +0 -0
- venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/BajaNorte +0 -0
- venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/BajaSur +0 -0
- venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/General +0 -0
- venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Apia +0 -0
ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d64b94bf893224ff1789b692a88a7052307ab1a947134c2713501d2b119513fd
|
3 |
+
size 33555612
|
ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d5ba02121cdc9ced8147ef6bc5084f2ea1e7e9ddcb3bb75514896fcdf78d0dd1
|
3 |
+
size 33555533
|
ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cb5380630c6eb51214baa0c22f53c8663bdabbeee5ec91aa1bddf01b366fc063
|
3 |
+
size 9387
|
ckpts/universal/global_step20/zero/6.post_attention_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d7f91ef94428e9dcc46ab62e13fa34130086a40dfa1388cb432205029a078fc0
|
3 |
+
size 9372
|
ckpts/universal/global_step20/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e8dcb79d73925597af47f3730c3fbebbab533108fb86d4e7741e572d1699a7ae
|
3 |
+
size 9387
|
ckpts/universal/global_step20/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f97da87e4a3cd5212cf743dd0ade277cf4c1c6847eb0415ec868ebe426b46dc3
|
3 |
+
size 33555627
|
lm-evaluation-harness/lm_eval/tasks/agieval/aqua-rat.yaml
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group:
|
2 |
+
- agieval
|
3 |
+
- agieval_en
|
4 |
+
- agieval_nous
|
5 |
+
task: agieval_aqua_rat
|
6 |
+
dataset_path: hails/agieval-aqua-rat
|
7 |
+
dataset_name: null
|
8 |
+
output_type: multiple_choice
|
9 |
+
training_split: null
|
10 |
+
validation_split: null
|
11 |
+
test_split: test
|
12 |
+
doc_to_text: "{{query}}"
|
13 |
+
doc_to_target: "{{gold}}"
|
14 |
+
doc_to_choice: "{{choices}}"
|
15 |
+
process_results: !function utils.process_results_mcqa
|
16 |
+
metric_list:
|
17 |
+
- metric: acc
|
18 |
+
aggregation: mean
|
19 |
+
higher_is_better: true
|
20 |
+
- metric: acc_norm
|
21 |
+
aggregation: mean
|
22 |
+
higher_is_better: true
|
23 |
+
metadata:
|
24 |
+
version: 1.0
|
lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-biology.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: aqua-rat.yaml
|
2 |
+
group:
|
3 |
+
- agieval
|
4 |
+
- agieval_cn
|
5 |
+
task: agieval_gaokao_biology
|
6 |
+
dataset_path: hails/agieval-gaokao-biology
|
lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-chemistry.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: aqua-rat.yaml
|
2 |
+
group:
|
3 |
+
- agieval
|
4 |
+
- agieval_cn
|
5 |
+
task: agieval_gaokao_chemistry
|
6 |
+
dataset_path: hails/agieval-gaokao-chemistry
|
lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-chinese.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: aqua-rat.yaml
|
2 |
+
group:
|
3 |
+
- agieval
|
4 |
+
- agieval_cn
|
5 |
+
task: agieval_gaokao_chinese
|
6 |
+
dataset_path: hails/agieval-gaokao-chinese
|
lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-english.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: aqua-rat.yaml
|
2 |
+
group:
|
3 |
+
- agieval
|
4 |
+
- agieval_en # categorizing as EN because the AGIEval codebase lists this as in `english_qa_tasks`
|
5 |
+
task: agieval_gaokao_english
|
6 |
+
dataset_path: hails/agieval-gaokao-english
|
lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-history.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: aqua-rat.yaml
|
2 |
+
group:
|
3 |
+
- agieval
|
4 |
+
- agieval_cn
|
5 |
+
task: agieval_gaokao_history
|
6 |
+
dataset_path: hails/agieval-gaokao-history
|
lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-mathqa.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: aqua-rat.yaml
|
2 |
+
group:
|
3 |
+
- agieval
|
4 |
+
- agieval_cn
|
5 |
+
task: agieval_gaokao_mathqa
|
6 |
+
dataset_path: hails/agieval-gaokao-mathqa
|
lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-physics.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: aqua-rat.yaml
|
2 |
+
group:
|
3 |
+
- agieval
|
4 |
+
- agieval_cn
|
5 |
+
task: agieval_gaokao_physics
|
6 |
+
dataset_path: hails/agieval-gaokao-physics
|
lm-evaluation-harness/lm_eval/tasks/agieval/jec-qa-ca.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: aqua-rat.yaml
|
2 |
+
group:
|
3 |
+
- agieval
|
4 |
+
- agieval_cn
|
5 |
+
task: agieval_jec_qa_ca
|
6 |
+
dataset_path: hails/agieval-jec-qa-ca
|
lm-evaluation-harness/lm_eval/tasks/agieval/jec-qa-kd.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: aqua-rat.yaml
|
2 |
+
group:
|
3 |
+
- agieval
|
4 |
+
- agieval_cn
|
5 |
+
task: agieval_jec_qa_kd
|
6 |
+
dataset_path: hails/agieval-jec-qa-kd
|
lm-evaluation-harness/lm_eval/tasks/agieval/logiqa-en.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: aqua-rat.yaml
|
2 |
+
group:
|
3 |
+
- agieval
|
4 |
+
- agieval_nous
|
5 |
+
- agieval_en
|
6 |
+
task: agieval_logiqa_en
|
7 |
+
dataset_path: hails/agieval-logiqa-en
|
lm-evaluation-harness/lm_eval/tasks/agieval/lsat-ar.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: aqua-rat.yaml
|
2 |
+
group:
|
3 |
+
- agieval
|
4 |
+
- agieval_nous
|
5 |
+
- agieval_en
|
6 |
+
task: agieval_lsat_ar
|
7 |
+
dataset_path: hails/agieval-lsat-ar
|
lm-evaluation-harness/lm_eval/tasks/agieval/lsat-rc.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: aqua-rat.yaml
|
2 |
+
group:
|
3 |
+
- agieval
|
4 |
+
- agieval_nous
|
5 |
+
- agieval_en
|
6 |
+
task: agieval_lsat_rc
|
7 |
+
dataset_path: hails/agieval-lsat-rc
|
lm-evaluation-harness/lm_eval/tasks/agieval/sat-en-without-passage.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: aqua-rat.yaml
|
2 |
+
group:
|
3 |
+
- agieval
|
4 |
+
- agieval_nous
|
5 |
+
- agieval_en
|
6 |
+
task: agieval_sat_en_without_passage
|
7 |
+
dataset_path: hails/agieval-sat-en-without-passage
|
lm-evaluation-harness/lm_eval/tasks/agieval/sat-math.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: aqua-rat.yaml
|
2 |
+
group:
|
3 |
+
- agieval
|
4 |
+
- agieval_nous
|
5 |
+
- agieval_en
|
6 |
+
task: agieval_sat_math
|
7 |
+
dataset_path: hails/agieval-sat-math
|
lm-evaluation-harness/lm_eval/tasks/agieval/utils.py
ADDED
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Answer parsing and normalization code, from
|
2 |
+
# https://github.com/ruixiangcui/AGIEval/blob/main/src/
|
3 |
+
# math_equivalence.py and post_process.py
|
4 |
+
import re
|
5 |
+
from typing import Dict, List
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
|
10 |
+
def parse_math_answer(raw_string):
|
11 |
+
def remove_boxed(s):
|
12 |
+
left = "\\boxed{"
|
13 |
+
try:
|
14 |
+
assert s[: len(left)] == left
|
15 |
+
assert s[-1] == "}"
|
16 |
+
answer = s[len(left) : -1]
|
17 |
+
if "=" in answer:
|
18 |
+
answer = answer.split("=")[-1].lstrip(" ")
|
19 |
+
return answer
|
20 |
+
except Exception:
|
21 |
+
return None
|
22 |
+
|
23 |
+
def last_boxed_only_string(string):
|
24 |
+
idx = string.rfind("\\boxed")
|
25 |
+
if idx < 0:
|
26 |
+
idx = string.rfind("\\fbox")
|
27 |
+
if idx < 0:
|
28 |
+
return None
|
29 |
+
i = idx
|
30 |
+
right_brace_idx = None
|
31 |
+
num_left_braces_open = 0
|
32 |
+
while i < len(string):
|
33 |
+
if string[i] == "{":
|
34 |
+
num_left_braces_open += 1
|
35 |
+
if string[i] == "}":
|
36 |
+
num_left_braces_open -= 1
|
37 |
+
if num_left_braces_open == 0:
|
38 |
+
right_brace_idx = i
|
39 |
+
break
|
40 |
+
i += 1
|
41 |
+
|
42 |
+
if right_brace_idx is None:
|
43 |
+
retval = None
|
44 |
+
else:
|
45 |
+
retval = string[idx : right_brace_idx + 1]
|
46 |
+
|
47 |
+
return retval
|
48 |
+
|
49 |
+
def get_answer_with_dollar_sign(s):
|
50 |
+
first_pattern = "\$(.*)\$"
|
51 |
+
last_match = None
|
52 |
+
matches = re.findall(first_pattern, s)
|
53 |
+
if matches:
|
54 |
+
last_match = matches[-1]
|
55 |
+
if "=" in last_match:
|
56 |
+
last_match = last_match.split("=")[-1].lstrip(" ")
|
57 |
+
return last_match
|
58 |
+
|
59 |
+
def get_answer_without_dollar_sign(s):
|
60 |
+
last_match = None
|
61 |
+
if "=" in s:
|
62 |
+
last_match = s.split("=")[-1].lstrip(" ").rstrip(".")
|
63 |
+
if "\\n" in last_match:
|
64 |
+
last_match = last_match.split("\\n")[0]
|
65 |
+
else:
|
66 |
+
pattern = "(?:\\$)?\d+(?:\.\d+)?(?![\w\d])"
|
67 |
+
matches = re.findall(pattern, s)
|
68 |
+
if matches:
|
69 |
+
last_match = matches[-1]
|
70 |
+
return last_match
|
71 |
+
|
72 |
+
if "\\boxed" in raw_string:
|
73 |
+
answer = remove_boxed(last_boxed_only_string(raw_string))
|
74 |
+
else:
|
75 |
+
answer = get_answer_with_dollar_sign(raw_string)
|
76 |
+
if not answer:
|
77 |
+
answer = get_answer_without_dollar_sign(raw_string)
|
78 |
+
return answer
|
79 |
+
|
80 |
+
|
81 |
+
# code from https://github.com/hendrycks/math/blob/main/modeling/math_equivalence.py
|
82 |
+
def _fix_fracs(string):
|
83 |
+
substrs = string.split("\\frac")
|
84 |
+
new_str = substrs[0]
|
85 |
+
if len(substrs) > 1:
|
86 |
+
substrs = substrs[1:]
|
87 |
+
for substr in substrs:
|
88 |
+
new_str += "\\frac"
|
89 |
+
if substr[0] == "{":
|
90 |
+
new_str += substr
|
91 |
+
else:
|
92 |
+
try:
|
93 |
+
assert len(substr) >= 2
|
94 |
+
except Exception:
|
95 |
+
return string
|
96 |
+
a = substr[0]
|
97 |
+
b = substr[1]
|
98 |
+
if b != "{":
|
99 |
+
if len(substr) > 2:
|
100 |
+
post_substr = substr[2:]
|
101 |
+
new_str += "{" + a + "}{" + b + "}" + post_substr
|
102 |
+
else:
|
103 |
+
new_str += "{" + a + "}{" + b + "}"
|
104 |
+
else:
|
105 |
+
if len(substr) > 2:
|
106 |
+
post_substr = substr[2:]
|
107 |
+
new_str += "{" + a + "}" + b + post_substr
|
108 |
+
else:
|
109 |
+
new_str += "{" + a + "}" + b
|
110 |
+
string = new_str
|
111 |
+
return string
|
112 |
+
|
113 |
+
|
114 |
+
def _fix_a_slash_b(string):
|
115 |
+
if len(string.split("/")) != 2:
|
116 |
+
return string
|
117 |
+
a = string.split("/")[0]
|
118 |
+
b = string.split("/")[1]
|
119 |
+
try:
|
120 |
+
a = int(a)
|
121 |
+
b = int(b)
|
122 |
+
assert string == "{}/{}".format(a, b)
|
123 |
+
new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
|
124 |
+
return new_string
|
125 |
+
except Exception:
|
126 |
+
return string
|
127 |
+
|
128 |
+
|
129 |
+
def _remove_right_units(string):
|
130 |
+
# "\\text{ " only ever occurs (at least in the val set) when describing units
|
131 |
+
if "\\text{ " in string:
|
132 |
+
splits = string.split("\\text{ ")
|
133 |
+
assert len(splits) == 2
|
134 |
+
return splits[0]
|
135 |
+
else:
|
136 |
+
return string
|
137 |
+
|
138 |
+
|
139 |
+
def _fix_sqrt(string):
|
140 |
+
if "\\sqrt" not in string:
|
141 |
+
return string
|
142 |
+
splits = string.split("\\sqrt")
|
143 |
+
new_string = splits[0]
|
144 |
+
for split in splits[1:]:
|
145 |
+
if split[0] != "{":
|
146 |
+
a = split[0]
|
147 |
+
new_substr = "\\sqrt{" + a + "}" + split[1:]
|
148 |
+
else:
|
149 |
+
new_substr = "\\sqrt" + split
|
150 |
+
new_string += new_substr
|
151 |
+
return new_string
|
152 |
+
|
153 |
+
|
154 |
+
def _strip_string(string):
|
155 |
+
# linebreaks
|
156 |
+
string = string.replace("\n", "")
|
157 |
+
# print(string)
|
158 |
+
|
159 |
+
# remove inverse spaces
|
160 |
+
string = string.replace("\\!", "")
|
161 |
+
# print(string)
|
162 |
+
|
163 |
+
# replace \\ with \
|
164 |
+
string = string.replace("\\\\", "\\")
|
165 |
+
# print(string)
|
166 |
+
|
167 |
+
# replace tfrac and dfrac with frac
|
168 |
+
string = string.replace("tfrac", "frac")
|
169 |
+
string = string.replace("dfrac", "frac")
|
170 |
+
# print(string)
|
171 |
+
|
172 |
+
# remove \left and \right
|
173 |
+
string = string.replace("\\left", "")
|
174 |
+
string = string.replace("\\right", "")
|
175 |
+
# print(string)
|
176 |
+
|
177 |
+
# Remove circ (degrees)
|
178 |
+
string = string.replace("^{\\circ}", "")
|
179 |
+
string = string.replace("^\\circ", "")
|
180 |
+
|
181 |
+
# remove dollar signs
|
182 |
+
string = string.replace("\\$", "")
|
183 |
+
|
184 |
+
# remove units (on the right)
|
185 |
+
string = _remove_right_units(string)
|
186 |
+
|
187 |
+
# remove percentage
|
188 |
+
string = string.replace("\\%", "")
|
189 |
+
string = string.replace("\%", "")
|
190 |
+
|
191 |
+
# " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string
|
192 |
+
string = string.replace(" .", " 0.")
|
193 |
+
string = string.replace("{.", "{0.")
|
194 |
+
# if empty, return empty string
|
195 |
+
if len(string) == 0:
|
196 |
+
return string
|
197 |
+
if string[0] == ".":
|
198 |
+
string = "0" + string
|
199 |
+
|
200 |
+
# to consider: get rid of e.g. "k = " or "q = " at beginning
|
201 |
+
if len(string.split("=")) == 2:
|
202 |
+
if len(string.split("=")[0]) <= 2:
|
203 |
+
string = string.split("=")[1]
|
204 |
+
|
205 |
+
# fix sqrt3 --> sqrt{3}
|
206 |
+
string = _fix_sqrt(string)
|
207 |
+
|
208 |
+
# remove spaces
|
209 |
+
string = string.replace(" ", "")
|
210 |
+
|
211 |
+
# \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). Also does a/b --> \\frac{a}{b}
|
212 |
+
string = _fix_fracs(string)
|
213 |
+
|
214 |
+
# manually change 0.5 --> \frac{1}{2}
|
215 |
+
if string == "0.5":
|
216 |
+
string = "\\frac{1}{2}"
|
217 |
+
|
218 |
+
# NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y
|
219 |
+
string = _fix_a_slash_b(string)
|
220 |
+
|
221 |
+
return string
|
222 |
+
|
223 |
+
|
224 |
+
def is_equiv(str1, str2, verbose=False):
|
225 |
+
if str1 is None and str2 is None:
|
226 |
+
print("WARNING: Both None")
|
227 |
+
return True
|
228 |
+
if str1 is None or str2 is None:
|
229 |
+
return False
|
230 |
+
|
231 |
+
str1, str2 = parse_math_answer(str1), parse_math_answer(str2)
|
232 |
+
|
233 |
+
try:
|
234 |
+
ss1 = _strip_string(str1)
|
235 |
+
ss2 = _strip_string(str2)
|
236 |
+
if verbose:
|
237 |
+
print(ss1, ss2)
|
238 |
+
return ss1 == ss2
|
239 |
+
except Exception:
|
240 |
+
return str1 == str2
|
241 |
+
|
242 |
+
|
243 |
+
def process_results(doc: dict, results: List[str]) -> Dict[str, int]:
|
244 |
+
candidate = results[0]
|
245 |
+
|
246 |
+
gold = doc["answer"]
|
247 |
+
|
248 |
+
if not gold:
|
249 |
+
print(doc, candidate, gold)
|
250 |
+
if is_equiv(candidate, gold):
|
251 |
+
retval = 1
|
252 |
+
else:
|
253 |
+
retval = 0
|
254 |
+
|
255 |
+
results = {
|
256 |
+
"acc": retval,
|
257 |
+
}
|
258 |
+
return results
|
259 |
+
|
260 |
+
|
261 |
+
# use a custom process_results() function, because AGIEval can have multiple valid answers
|
262 |
+
def process_results_mcqa(doc, results):
|
263 |
+
results = [result[0] for result in results]
|
264 |
+
|
265 |
+
gold = doc["gold"]
|
266 |
+
|
267 |
+
acc = 1.0 if int(np.argmax(results)) in gold else 0.0
|
268 |
+
completion_len = np.array([float(len(i)) for i in doc["choices"]])
|
269 |
+
acc_norm = 1.0 if int(np.argmax(results / completion_len)) in gold else 0.0
|
270 |
+
|
271 |
+
return {
|
272 |
+
"acc": acc,
|
273 |
+
"acc_norm": acc_norm,
|
274 |
+
}
|
lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_as.yaml
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Tass file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
group: ai4bharat/IndicCOPA
|
5 |
+
dataset_path: ai4bharat/IndicCOPA
|
6 |
+
dataset_name: translation-as
|
7 |
+
output_type: multiple_choice
|
8 |
+
# training_split: train
|
9 |
+
# validation_split: validation
|
10 |
+
test_split: test
|
11 |
+
# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
|
12 |
+
# doc_to_target: label
|
13 |
+
# doc_to_choice: "{{choice1}}{{choice2}}"
|
14 |
+
# metric_list:
|
15 |
+
# - metric: acc
|
16 |
+
# aggregation: mean
|
17 |
+
# asgher_is_better: true
|
18 |
+
# metadata:
|
19 |
+
# version: 1.0
|
20 |
+
|
21 |
+
doc_to_text: !function utils.doc_to_text_as
|
22 |
+
doc_to_target: label
|
23 |
+
doc_to_choice: !function utils.doc_to_choice
|
24 |
+
metric_list:
|
25 |
+
- metric: acc
|
26 |
+
metadata:
|
27 |
+
version: 1.0
|
28 |
+
|
29 |
+
|
30 |
+
# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
|
31 |
+
# सही? नहीं, "+hypothesis]}}'
|
32 |
+
# doc_to_text: ''
|
33 |
+
task: indiccopa-as
|
lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_gu.yaml
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Tgus file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
group: ai4bharat/IndicCOPA
|
5 |
+
dataset_path: ai4bharat/IndicCOPA
|
6 |
+
dataset_name: translation-gu
|
7 |
+
output_type: multiple_choice
|
8 |
+
# training_split: train
|
9 |
+
# validation_split: validation
|
10 |
+
test_split: test
|
11 |
+
# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
|
12 |
+
# doc_to_target: label
|
13 |
+
# doc_to_choice: "{{choice1}}{{choice2}}"
|
14 |
+
# metric_list:
|
15 |
+
# - metric: acc
|
16 |
+
# aggregation: mean
|
17 |
+
# gugher_is_better: true
|
18 |
+
# metadata:
|
19 |
+
# version: 1.0
|
20 |
+
|
21 |
+
doc_to_text: !function utils.doc_to_text_gu
|
22 |
+
doc_to_target: label
|
23 |
+
doc_to_choice: !function utils.doc_to_choice
|
24 |
+
metric_list:
|
25 |
+
- metric: acc
|
26 |
+
metadata:
|
27 |
+
version: 1.0
|
28 |
+
|
29 |
+
|
30 |
+
# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
|
31 |
+
# सही? नहीं, "+hypothesis]}}'
|
32 |
+
# doc_to_text: ''
|
33 |
+
task: indiccopa-gu
|
lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_kn.yaml
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Tkns file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
group: ai4bharat/IndicCOPA
|
5 |
+
dataset_path: ai4bharat/IndicCOPA
|
6 |
+
dataset_name: translation-kn
|
7 |
+
output_type: multiple_choice
|
8 |
+
# training_split: train
|
9 |
+
# validation_split: validation
|
10 |
+
test_split: test
|
11 |
+
# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
|
12 |
+
# doc_to_target: label
|
13 |
+
# doc_to_choice: "{{choice1}}{{choice2}}"
|
14 |
+
# metric_list:
|
15 |
+
# - metric: acc
|
16 |
+
# aggregation: mean
|
17 |
+
# kngher_is_better: true
|
18 |
+
# metadata:
|
19 |
+
# version: 1.0
|
20 |
+
|
21 |
+
doc_to_text: !function utils.doc_to_text_kn
|
22 |
+
doc_to_target: label
|
23 |
+
doc_to_choice: !function utils.doc_to_choice
|
24 |
+
metric_list:
|
25 |
+
- metric: acc
|
26 |
+
metadata:
|
27 |
+
version: 1.0
|
28 |
+
|
29 |
+
|
30 |
+
# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
|
31 |
+
# सही? नहीं, "+hypothesis]}}'
|
32 |
+
# doc_to_text: ''
|
33 |
+
task: indiccopa-kn
|
lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ml.yaml
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Tmls file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
group: ai4bharat/IndicCOPA
|
5 |
+
dataset_path: ai4bharat/IndicCOPA
|
6 |
+
dataset_name: translation-ml
|
7 |
+
output_type: multiple_choice
|
8 |
+
# training_split: train
|
9 |
+
# validation_split: validation
|
10 |
+
test_split: test
|
11 |
+
# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
|
12 |
+
# doc_to_target: label
|
13 |
+
# doc_to_choice: "{{choice1}}{{choice2}}"
|
14 |
+
# metric_list:
|
15 |
+
# - metric: acc
|
16 |
+
# aggregation: mean
|
17 |
+
# mlgher_is_better: true
|
18 |
+
# metadata:
|
19 |
+
# version: 1.0
|
20 |
+
|
21 |
+
doc_to_text: !function utils.doc_to_text_ml
|
22 |
+
doc_to_target: label
|
23 |
+
doc_to_choice: !function utils.doc_to_choice
|
24 |
+
metric_list:
|
25 |
+
- metric: acc
|
26 |
+
metadata:
|
27 |
+
version: 1.0
|
28 |
+
|
29 |
+
|
30 |
+
# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
|
31 |
+
# सही? नहीं, "+hypothesis]}}'
|
32 |
+
# doc_to_text: ''
|
33 |
+
task: indiccopa-ml
|
lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ne.yaml
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Tnes file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
group: ai4bharat/IndicCOPA
|
5 |
+
dataset_path: ai4bharat/IndicCOPA
|
6 |
+
dataset_name: translation-ne
|
7 |
+
output_type: multiple_choice
|
8 |
+
# training_split: train
|
9 |
+
# validation_split: validation
|
10 |
+
test_split: test
|
11 |
+
# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
|
12 |
+
# doc_to_target: label
|
13 |
+
# doc_to_choice: "{{choice1}}{{choice2}}"
|
14 |
+
# metric_list:
|
15 |
+
# - metric: acc
|
16 |
+
# aggregation: mean
|
17 |
+
# negher_is_better: true
|
18 |
+
# metadata:
|
19 |
+
# version: 1.0
|
20 |
+
|
21 |
+
doc_to_text: !function utils.doc_to_text_ne
|
22 |
+
doc_to_target: label
|
23 |
+
doc_to_choice: !function utils.doc_to_choice
|
24 |
+
metric_list:
|
25 |
+
- metric: acc
|
26 |
+
metadata:
|
27 |
+
version: 1.0
|
28 |
+
|
29 |
+
|
30 |
+
# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
|
31 |
+
# सही? नहीं, "+hypothesis]}}'
|
32 |
+
# doc_to_text: ''
|
33 |
+
task: indiccopa-ne
|
lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_sa.yaml
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Tsas file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
group: ai4bharat/IndicCOPA
|
5 |
+
dataset_path: ai4bharat/IndicCOPA
|
6 |
+
dataset_name: translation-sa
|
7 |
+
output_type: multiple_choice
|
8 |
+
# training_split: train
|
9 |
+
# validation_split: validation
|
10 |
+
test_split: test
|
11 |
+
# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
|
12 |
+
# doc_to_target: label
|
13 |
+
# doc_to_choice: "{{choice1}}{{choice2}}"
|
14 |
+
# metric_list:
|
15 |
+
# - metric: acc
|
16 |
+
# aggregation: mean
|
17 |
+
# sagher_is_better: true
|
18 |
+
# metadata:
|
19 |
+
# version: 1.0
|
20 |
+
|
21 |
+
doc_to_text: !function utils.doc_to_text_sa
|
22 |
+
doc_to_target: label
|
23 |
+
doc_to_choice: !function utils.doc_to_choice
|
24 |
+
metric_list:
|
25 |
+
- metric: acc
|
26 |
+
metadata:
|
27 |
+
version: 1.0
|
28 |
+
|
29 |
+
|
30 |
+
# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
|
31 |
+
# सही? नहीं, "+hypothesis]}}'
|
32 |
+
# doc_to_text: ''
|
33 |
+
task: indiccopa-sa
|
lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_sd.yaml
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Tsds file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
group: ai4bharat/IndicCOPA
|
5 |
+
dataset_path: ai4bharat/IndicCOPA
|
6 |
+
dataset_name: translation-sd
|
7 |
+
output_type: multiple_choice
|
8 |
+
# training_split: train
|
9 |
+
# validation_split: validation
|
10 |
+
test_split: test
|
11 |
+
# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
|
12 |
+
# doc_to_target: label
|
13 |
+
# doc_to_choice: "{{choice1}}{{choice2}}"
|
14 |
+
# metric_list:
|
15 |
+
# - metric: acc
|
16 |
+
# aggregation: mean
|
17 |
+
# sdgher_is_better: true
|
18 |
+
# metadata:
|
19 |
+
# version: 1.0
|
20 |
+
|
21 |
+
doc_to_text: !function utils.doc_to_text_sd
|
22 |
+
doc_to_target: label
|
23 |
+
doc_to_choice: !function utils.doc_to_choice
|
24 |
+
metric_list:
|
25 |
+
- metric: acc
|
26 |
+
metadata:
|
27 |
+
version: 1.0
|
28 |
+
|
29 |
+
|
30 |
+
# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
|
31 |
+
# सही? नहीं, "+hypothesis]}}'
|
32 |
+
# doc_to_text: ''
|
33 |
+
task: indiccopa-sd
|
lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ur.yaml
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Turs file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
group: ai4bharat/IndicCOPA
|
5 |
+
dataset_path: ai4bharat/IndicCOPA
|
6 |
+
dataset_name: translation-ur
|
7 |
+
output_type: multiple_choice
|
8 |
+
# training_split: train
|
9 |
+
# validation_split: validation
|
10 |
+
test_split: test
|
11 |
+
# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
|
12 |
+
# doc_to_target: label
|
13 |
+
# doc_to_choice: "{{choice1}}{{choice2}}"
|
14 |
+
# metric_list:
|
15 |
+
# - metric: acc
|
16 |
+
# aggregation: mean
|
17 |
+
# urgher_is_better: true
|
18 |
+
# metadata:
|
19 |
+
# version: 1.0
|
20 |
+
|
21 |
+
doc_to_text: !function utils.doc_to_text_ur
|
22 |
+
doc_to_target: label
|
23 |
+
doc_to_choice: !function utils.doc_to_choice
|
24 |
+
metric_list:
|
25 |
+
- metric: acc
|
26 |
+
metadata:
|
27 |
+
version: 1.0
|
28 |
+
|
29 |
+
|
30 |
+
# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
|
31 |
+
# सही? नहीं, "+hypothesis]}}'
|
32 |
+
# doc_to_text: ''
|
33 |
+
task: indiccopa-ur
|
lm-evaluation-harness/lm_eval/tasks/indiccopa/utils.py
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import partial
|
2 |
+
|
3 |
+
|
4 |
+
def convert_choice(choice):
|
5 |
+
return choice
|
6 |
+
|
7 |
+
|
8 |
+
def doc_to_text(doc, connector):
|
9 |
+
# Drop the period
|
10 |
+
conn = connector[doc["question"]]
|
11 |
+
return doc["premise"].strip()[:-1] + f" {conn}"
|
12 |
+
|
13 |
+
|
14 |
+
def doc_to_choice(doc):
|
15 |
+
return [convert_choice(doc["choice1"]), convert_choice(doc["choice2"])]
|
16 |
+
|
17 |
+
|
18 |
+
doc_to_text_hi = partial(
|
19 |
+
doc_to_text,
|
20 |
+
connector={
|
21 |
+
"cause": "कारण",
|
22 |
+
"effect": "परिणाम",
|
23 |
+
},
|
24 |
+
)
|
25 |
+
|
26 |
+
doc_to_text_mr = partial(
|
27 |
+
doc_to_text,
|
28 |
+
connector={
|
29 |
+
"cause": "कारण",
|
30 |
+
"effect": "परिणाम",
|
31 |
+
},
|
32 |
+
)
|
33 |
+
|
34 |
+
doc_to_text_as = partial(
|
35 |
+
doc_to_text,
|
36 |
+
connector={
|
37 |
+
"cause": "কাৰণ",
|
38 |
+
"effect": "প্ৰভাৱ",
|
39 |
+
},
|
40 |
+
)
|
41 |
+
|
42 |
+
doc_to_text_bn = partial(
|
43 |
+
doc_to_text,
|
44 |
+
connector={
|
45 |
+
"cause": "কারণ",
|
46 |
+
"effect": "প্রভাব",
|
47 |
+
},
|
48 |
+
)
|
49 |
+
|
50 |
+
doc_to_text_gu = partial(
|
51 |
+
doc_to_text,
|
52 |
+
connector={
|
53 |
+
"cause": "કારણ",
|
54 |
+
"effect": "અસર",
|
55 |
+
},
|
56 |
+
)
|
57 |
+
|
58 |
+
doc_to_text_kn = partial(
|
59 |
+
doc_to_text,
|
60 |
+
connector={
|
61 |
+
"cause": "ಕಾರಣ",
|
62 |
+
"effect": "ಪರಿಣಾಮ",
|
63 |
+
},
|
64 |
+
)
|
65 |
+
|
66 |
+
doc_to_text_mai = partial(
|
67 |
+
doc_to_text,
|
68 |
+
connector={
|
69 |
+
"cause": "कारण",
|
70 |
+
"effect": "प्रभाव",
|
71 |
+
},
|
72 |
+
)
|
73 |
+
|
74 |
+
doc_to_text_ml = partial(
|
75 |
+
doc_to_text,
|
76 |
+
connector={
|
77 |
+
"cause": "കാരണമാകുന്നു",
|
78 |
+
"effect": "ഫലം",
|
79 |
+
},
|
80 |
+
)
|
81 |
+
|
82 |
+
doc_to_text_ne = partial(
|
83 |
+
doc_to_text,
|
84 |
+
connector={
|
85 |
+
"cause": "कारण",
|
86 |
+
"effect": "असर",
|
87 |
+
},
|
88 |
+
)
|
89 |
+
|
90 |
+
doc_to_text_or = partial(
|
91 |
+
doc_to_text,
|
92 |
+
connector={
|
93 |
+
"cause": "କାରଣ",
|
94 |
+
"effect": "ପ୍ରଭାବ",
|
95 |
+
},
|
96 |
+
)
|
97 |
+
|
98 |
+
doc_to_text_sa = partial(
|
99 |
+
doc_to_text,
|
100 |
+
connector={
|
101 |
+
"cause": "निमित्तम्",
|
102 |
+
"effect": "परिणाम",
|
103 |
+
},
|
104 |
+
)
|
105 |
+
|
106 |
+
doc_to_text_sd = partial(
|
107 |
+
doc_to_text,
|
108 |
+
connector={
|
109 |
+
"cause": "سبب",
|
110 |
+
"effect": "اثر",
|
111 |
+
},
|
112 |
+
)
|
113 |
+
|
114 |
+
doc_to_text_ta = partial(
|
115 |
+
doc_to_text,
|
116 |
+
connector={
|
117 |
+
"cause": "காரணம்",
|
118 |
+
"effect": "விளைவு",
|
119 |
+
},
|
120 |
+
)
|
121 |
+
|
122 |
+
doc_to_text_te = partial(
|
123 |
+
doc_to_text,
|
124 |
+
connector={
|
125 |
+
"cause": "కారణం",
|
126 |
+
"effect": "ప్రభావం",
|
127 |
+
},
|
128 |
+
)
|
129 |
+
|
130 |
+
doc_to_text_ur = partial(
|
131 |
+
doc_to_text,
|
132 |
+
connector={
|
133 |
+
"cause": "وجہ",
|
134 |
+
"effect": "اثر",
|
135 |
+
},
|
136 |
+
)
|
venv/lib/python3.10/site-packages/datasets/combine.py
ADDED
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional, TypeVar
|
2 |
+
|
3 |
+
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
|
4 |
+
from .dataset_dict import DatasetDict, IterableDatasetDict
|
5 |
+
from .info import DatasetInfo
|
6 |
+
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
|
7 |
+
from .splits import NamedSplit
|
8 |
+
from .utils import logging
|
9 |
+
from .utils.py_utils import Literal
|
10 |
+
|
11 |
+
|
12 |
+
logger = logging.get_logger(__name__)
|
13 |
+
|
14 |
+
|
15 |
+
DatasetType = TypeVar("DatasetType", Dataset, IterableDataset)
|
16 |
+
|
17 |
+
|
18 |
+
def interleave_datasets(
|
19 |
+
datasets: List[DatasetType],
|
20 |
+
probabilities: Optional[List[float]] = None,
|
21 |
+
seed: Optional[int] = None,
|
22 |
+
info: Optional[DatasetInfo] = None,
|
23 |
+
split: Optional[NamedSplit] = None,
|
24 |
+
stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
|
25 |
+
) -> DatasetType:
|
26 |
+
"""
|
27 |
+
Interleave several datasets (sources) into a single dataset.
|
28 |
+
The new dataset is constructed by alternating between the sources to get the examples.
|
29 |
+
|
30 |
+
You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects.
|
31 |
+
|
32 |
+
- If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples.
|
33 |
+
- If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities.
|
34 |
+
|
35 |
+
The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`,
|
36 |
+
in which case, the resulting dataset ends when all datasets have ran out of examples at least one time.
|
37 |
+
|
38 |
+
Note for iterable datasets:
|
39 |
+
|
40 |
+
In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process.
|
41 |
+
Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker).
|
42 |
+
|
43 |
+
Args:
|
44 |
+
datasets (`List[Dataset]` or `List[IterableDataset]`):
|
45 |
+
List of datasets to interleave.
|
46 |
+
probabilities (`List[float]`, *optional*, defaults to `None`):
|
47 |
+
If specified, the new dataset is constructed by sampling
|
48 |
+
examples from one source at a time according to these probabilities.
|
49 |
+
seed (`int`, *optional*, defaults to `None`):
|
50 |
+
The random seed used to choose a source for each example.
|
51 |
+
info ([`DatasetInfo`], *optional*):
|
52 |
+
Dataset information, like description, citation, etc.
|
53 |
+
<Added version="2.4.0"/>
|
54 |
+
split ([`NamedSplit`], *optional*):
|
55 |
+
Name of the dataset split.
|
56 |
+
<Added version="2.4.0"/>
|
57 |
+
stopping_strategy (`str`, defaults to `first_exhausted`):
|
58 |
+
Two strategies are proposed right now, `first_exhausted` and `all_exhausted`.
|
59 |
+
By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
|
60 |
+
If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
|
61 |
+
Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
|
62 |
+
- with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples.
|
63 |
+
- with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
|
64 |
+
Returns:
|
65 |
+
[`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets`
|
66 |
+
parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of
|
67 |
+
`IterableDataset`.
|
68 |
+
|
69 |
+
Example:
|
70 |
+
|
71 |
+
For regular datasets (map-style):
|
72 |
+
|
73 |
+
```python
|
74 |
+
>>> from datasets import Dataset, interleave_datasets
|
75 |
+
>>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
|
76 |
+
>>> d2 = Dataset.from_dict({"a": [10, 11, 12]})
|
77 |
+
>>> d3 = Dataset.from_dict({"a": [20, 21, 22]})
|
78 |
+
>>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
|
79 |
+
>>> dataset["a"]
|
80 |
+
[10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22]
|
81 |
+
>>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
|
82 |
+
>>> dataset["a"]
|
83 |
+
[10, 0, 11, 1, 2]
|
84 |
+
>>> dataset = interleave_datasets([d1, d2, d3])
|
85 |
+
>>> dataset["a"]
|
86 |
+
[0, 10, 20, 1, 11, 21, 2, 12, 22]
|
87 |
+
>>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
|
88 |
+
>>> dataset["a"]
|
89 |
+
[0, 10, 20, 1, 11, 21, 2, 12, 22]
|
90 |
+
>>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
|
91 |
+
>>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
|
92 |
+
>>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]})
|
93 |
+
>>> dataset = interleave_datasets([d1, d2, d3])
|
94 |
+
>>> dataset["a"]
|
95 |
+
[0, 10, 20, 1, 11, 21, 2, 12, 22]
|
96 |
+
>>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
|
97 |
+
>>> dataset["a"]
|
98 |
+
[0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24]
|
99 |
+
>>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
|
100 |
+
>>> dataset["a"]
|
101 |
+
[10, 0, 11, 1, 2]
|
102 |
+
>>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
|
103 |
+
>>> dataset["a"]
|
104 |
+
[10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24]
|
105 |
+
For datasets in streaming mode (iterable):
|
106 |
+
|
107 |
+
>>> from datasets import load_dataset, interleave_datasets
|
108 |
+
>>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True)
|
109 |
+
>>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True)
|
110 |
+
>>> dataset = interleave_datasets([d1, d2])
|
111 |
+
>>> iterator = iter(dataset)
|
112 |
+
>>> next(iterator)
|
113 |
+
{'text': 'Mtendere Village was inspired by the vision...}
|
114 |
+
>>> next(iterator)
|
115 |
+
{'text': "Média de débat d'idées, de culture...}
|
116 |
+
```
|
117 |
+
"""
|
118 |
+
from .arrow_dataset import Dataset
|
119 |
+
from .iterable_dataset import IterableDataset
|
120 |
+
|
121 |
+
if not datasets:
|
122 |
+
raise ValueError("Unable to interleave an empty list of datasets.")
|
123 |
+
for i, dataset in enumerate(datasets):
|
124 |
+
if not isinstance(dataset, (Dataset, IterableDataset)):
|
125 |
+
if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
|
126 |
+
if not dataset:
|
127 |
+
raise ValueError(
|
128 |
+
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
|
129 |
+
"is an empty dataset dictionary."
|
130 |
+
)
|
131 |
+
raise ValueError(
|
132 |
+
f"Dataset at position {i} has at least one split: {list(dataset)}\n"
|
133 |
+
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']"
|
134 |
+
)
|
135 |
+
raise ValueError(
|
136 |
+
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}."
|
137 |
+
)
|
138 |
+
if i == 0:
|
139 |
+
dataset_type, other_type = (
|
140 |
+
(Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset)
|
141 |
+
)
|
142 |
+
elif not isinstance(dataset, dataset_type):
|
143 |
+
raise ValueError(
|
144 |
+
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects."
|
145 |
+
)
|
146 |
+
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
|
147 |
+
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy.")
|
148 |
+
if dataset_type is Dataset:
|
149 |
+
return _interleave_map_style_datasets(
|
150 |
+
datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy
|
151 |
+
)
|
152 |
+
else:
|
153 |
+
return _interleave_iterable_datasets(
|
154 |
+
datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy
|
155 |
+
)
|
156 |
+
|
157 |
+
|
158 |
+
def concatenate_datasets(
|
159 |
+
dsets: List[DatasetType],
|
160 |
+
info: Optional[DatasetInfo] = None,
|
161 |
+
split: Optional[NamedSplit] = None,
|
162 |
+
axis: int = 0,
|
163 |
+
) -> DatasetType:
|
164 |
+
"""
|
165 |
+
Converts a list of [`Dataset`] with the same schema into a single [`Dataset`].
|
166 |
+
|
167 |
+
Args:
|
168 |
+
dsets (`List[datasets.Dataset]`):
|
169 |
+
List of Datasets to concatenate.
|
170 |
+
info (`DatasetInfo`, *optional*):
|
171 |
+
Dataset information, like description, citation, etc.
|
172 |
+
split (`NamedSplit`, *optional*):
|
173 |
+
Name of the dataset split.
|
174 |
+
axis (`{0, 1}`, defaults to `0`):
|
175 |
+
Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
|
176 |
+
(horizontally).
|
177 |
+
|
178 |
+
<Added version="1.6.0"/>
|
179 |
+
|
180 |
+
Example:
|
181 |
+
|
182 |
+
```py
|
183 |
+
>>> ds3 = concatenate_datasets([ds1, ds2])
|
184 |
+
```
|
185 |
+
"""
|
186 |
+
|
187 |
+
if not dsets:
|
188 |
+
raise ValueError("Unable to concatenate an empty list of datasets.")
|
189 |
+
for i, dataset in enumerate(dsets):
|
190 |
+
if not isinstance(dataset, (Dataset, IterableDataset)):
|
191 |
+
if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
|
192 |
+
if not dataset:
|
193 |
+
raise ValueError(
|
194 |
+
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
|
195 |
+
"is an empty dataset dictionary."
|
196 |
+
)
|
197 |
+
raise ValueError(
|
198 |
+
f"Dataset at position {i} has at least one split: {list(dataset)}\n"
|
199 |
+
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']"
|
200 |
+
)
|
201 |
+
raise ValueError(
|
202 |
+
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}."
|
203 |
+
)
|
204 |
+
if i == 0:
|
205 |
+
dataset_type, other_type = (
|
206 |
+
(Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset)
|
207 |
+
)
|
208 |
+
elif not isinstance(dataset, dataset_type):
|
209 |
+
raise ValueError(
|
210 |
+
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects."
|
211 |
+
)
|
212 |
+
if dataset_type is Dataset:
|
213 |
+
return _concatenate_map_style_datasets(dsets, info=info, split=split, axis=axis)
|
214 |
+
else:
|
215 |
+
return _concatenate_iterable_datasets(dsets, info=info, split=split, axis=axis)
|
venv/lib/python3.10/site-packages/datasets/dataset_dict.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
venv/lib/python3.10/site-packages/datasets/exceptions.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# SPDX-License-Identifier: Apache-2.0
|
2 |
+
# Copyright 2023 The HuggingFace Authors.
|
3 |
+
from typing import Any, Dict, List, Optional, Union
|
4 |
+
|
5 |
+
from huggingface_hub import HfFileSystem
|
6 |
+
|
7 |
+
from . import config
|
8 |
+
from .table import CastError
|
9 |
+
from .utils.track import TrackedIterable, tracked_list, tracked_str
|
10 |
+
|
11 |
+
|
12 |
+
class DatasetsError(Exception):
|
13 |
+
"""Base class for exceptions in this library."""
|
14 |
+
|
15 |
+
|
16 |
+
class DefunctDatasetError(DatasetsError):
|
17 |
+
"""The dataset has been defunct."""
|
18 |
+
|
19 |
+
|
20 |
+
class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError):
|
21 |
+
"""FileNotFoundError raised by this library."""
|
22 |
+
|
23 |
+
|
24 |
+
class DataFilesNotFoundError(FileNotFoundDatasetsError):
|
25 |
+
"""No (supported) data files found."""
|
26 |
+
|
27 |
+
|
28 |
+
class DatasetNotFoundError(FileNotFoundDatasetsError):
|
29 |
+
"""Dataset not found.
|
30 |
+
|
31 |
+
Raised when trying to access:
|
32 |
+
- a missing dataset, or
|
33 |
+
- a private/gated dataset and the user is not authenticated.
|
34 |
+
"""
|
35 |
+
|
36 |
+
|
37 |
+
class DatasetBuildError(DatasetsError):
|
38 |
+
pass
|
39 |
+
|
40 |
+
|
41 |
+
class ManualDownloadError(DatasetBuildError):
|
42 |
+
pass
|
43 |
+
|
44 |
+
|
45 |
+
class FileFormatError(DatasetBuildError):
|
46 |
+
pass
|
47 |
+
|
48 |
+
|
49 |
+
class DatasetGenerationError(DatasetBuildError):
|
50 |
+
pass
|
51 |
+
|
52 |
+
|
53 |
+
class DatasetGenerationCastError(DatasetGenerationError):
|
54 |
+
@classmethod
|
55 |
+
def from_cast_error(
|
56 |
+
cls,
|
57 |
+
cast_error: CastError,
|
58 |
+
builder_name: str,
|
59 |
+
gen_kwargs: Dict[str, Any],
|
60 |
+
token: Optional[Union[bool, str]],
|
61 |
+
) -> "DatasetGenerationCastError":
|
62 |
+
explanation_message = (
|
63 |
+
f"\n\nAll the data files must have the same columns, but at some point {cast_error.details()}"
|
64 |
+
)
|
65 |
+
formatted_tracked_gen_kwargs: List[str] = []
|
66 |
+
for gen_kwarg in gen_kwargs.values():
|
67 |
+
if not isinstance(gen_kwarg, (tracked_str, tracked_list, TrackedIterable)):
|
68 |
+
continue
|
69 |
+
while isinstance(gen_kwarg, (tracked_list, TrackedIterable)) and gen_kwarg.last_item is not None:
|
70 |
+
gen_kwarg = gen_kwarg.last_item
|
71 |
+
if isinstance(gen_kwarg, tracked_str):
|
72 |
+
gen_kwarg = gen_kwarg.get_origin()
|
73 |
+
if isinstance(gen_kwarg, str) and gen_kwarg.startswith("hf://"):
|
74 |
+
resolved_path = HfFileSystem(endpoint=config.HF_ENDPOINT, token=token).resolve_path(gen_kwarg)
|
75 |
+
gen_kwarg = "hf://" + resolved_path.unresolve()
|
76 |
+
if "@" + resolved_path.revision in gen_kwarg:
|
77 |
+
gen_kwarg = (
|
78 |
+
gen_kwarg.replace("@" + resolved_path.revision, "", 1)
|
79 |
+
+ f" (at revision {resolved_path.revision})"
|
80 |
+
)
|
81 |
+
formatted_tracked_gen_kwargs.append(str(gen_kwarg))
|
82 |
+
if formatted_tracked_gen_kwargs:
|
83 |
+
explanation_message += f"\n\nThis happened while the {builder_name} dataset builder was generating data using\n\n{', '.join(formatted_tracked_gen_kwargs)}"
|
84 |
+
help_message = "\n\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)"
|
85 |
+
return cls("An error occurred while generating the dataset" + explanation_message + help_message)
|
venv/lib/python3.10/site-packages/datasets/inspect.py
ADDED
@@ -0,0 +1,582 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
# Lint as: python3
|
16 |
+
"""List and inspect datasets."""
|
17 |
+
|
18 |
+
import inspect
|
19 |
+
import os
|
20 |
+
import shutil
|
21 |
+
import warnings
|
22 |
+
from pathlib import Path, PurePath
|
23 |
+
from typing import Dict, List, Mapping, Optional, Sequence, Union
|
24 |
+
|
25 |
+
import huggingface_hub
|
26 |
+
|
27 |
+
from . import config
|
28 |
+
from .download.download_config import DownloadConfig
|
29 |
+
from .download.download_manager import DownloadMode
|
30 |
+
from .download.streaming_download_manager import StreamingDownloadManager
|
31 |
+
from .info import DatasetInfo
|
32 |
+
from .load import (
|
33 |
+
dataset_module_factory,
|
34 |
+
get_dataset_builder_class,
|
35 |
+
import_main_class,
|
36 |
+
load_dataset_builder,
|
37 |
+
metric_module_factory,
|
38 |
+
)
|
39 |
+
from .utils.deprecation_utils import deprecated
|
40 |
+
from .utils.file_utils import relative_to_absolute_path
|
41 |
+
from .utils.logging import get_logger
|
42 |
+
from .utils.version import Version
|
43 |
+
|
44 |
+
|
45 |
+
logger = get_logger(__name__)
|
46 |
+
|
47 |
+
|
48 |
+
class SplitsNotFoundError(ValueError):
|
49 |
+
pass
|
50 |
+
|
51 |
+
|
52 |
+
@deprecated("Use 'huggingface_hub.list_datasets' instead.")
|
53 |
+
def list_datasets(with_community_datasets=True, with_details=False):
|
54 |
+
"""List all the datasets scripts available on the Hugging Face Hub.
|
55 |
+
|
56 |
+
Args:
|
57 |
+
with_community_datasets (`bool`, *optional*, defaults to `True`):
|
58 |
+
Include the community provided datasets.
|
59 |
+
with_details (`bool`, *optional*, defaults to `False`):
|
60 |
+
Return the full details on the datasets instead of only the short name.
|
61 |
+
|
62 |
+
Example:
|
63 |
+
|
64 |
+
```py
|
65 |
+
>>> from datasets import list_datasets
|
66 |
+
>>> list_datasets()
|
67 |
+
['acronym_identification',
|
68 |
+
'ade_corpus_v2',
|
69 |
+
'adversarial_qa',
|
70 |
+
'aeslc',
|
71 |
+
'afrikaans_ner_corpus',
|
72 |
+
'ag_news',
|
73 |
+
...
|
74 |
+
]
|
75 |
+
```
|
76 |
+
"""
|
77 |
+
datasets = huggingface_hub.list_datasets(full=with_details)
|
78 |
+
if not with_community_datasets:
|
79 |
+
datasets = [dataset for dataset in datasets if "/" not in dataset.id]
|
80 |
+
if not with_details:
|
81 |
+
datasets = [dataset.id for dataset in datasets]
|
82 |
+
return list(datasets)
|
83 |
+
|
84 |
+
|
85 |
+
@deprecated(
|
86 |
+
"Use 'evaluate.list_evaluation_modules' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate"
|
87 |
+
)
|
88 |
+
def list_metrics(with_community_metrics=True, with_details=False):
|
89 |
+
"""List all the metrics script available on the Hugging Face Hub.
|
90 |
+
|
91 |
+
<Deprecated version="2.5.0">
|
92 |
+
|
93 |
+
Use `evaluate.list_evaluation_modules` instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate
|
94 |
+
|
95 |
+
</Deprecated>
|
96 |
+
|
97 |
+
Args:
|
98 |
+
with_community_metrics (:obj:`bool`, optional, default ``True``): Include the community provided metrics.
|
99 |
+
with_details (:obj:`bool`, optional, default ``False``): Return the full details on the metrics instead of only the short name.
|
100 |
+
|
101 |
+
Example:
|
102 |
+
|
103 |
+
```py
|
104 |
+
>>> from datasets import list_metrics
|
105 |
+
>>> list_metrics()
|
106 |
+
['accuracy',
|
107 |
+
'bertscore',
|
108 |
+
'bleu',
|
109 |
+
'bleurt',
|
110 |
+
'cer',
|
111 |
+
'chrf',
|
112 |
+
...
|
113 |
+
]
|
114 |
+
```
|
115 |
+
"""
|
116 |
+
metrics = huggingface_hub.list_metrics()
|
117 |
+
if not with_community_metrics:
|
118 |
+
metrics = [metric for metric in metrics if "/" not in metric.id]
|
119 |
+
if not with_details:
|
120 |
+
metrics = [metric.id for metric in metrics]
|
121 |
+
return metrics
|
122 |
+
|
123 |
+
|
124 |
+
@deprecated("Clone the dataset repository from the Hugging Face Hub instead.")
|
125 |
+
def inspect_dataset(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
|
126 |
+
"""
|
127 |
+
Allow inspection/modification of a dataset script by copying on local drive at local_path.
|
128 |
+
|
129 |
+
Args:
|
130 |
+
path (`str`): Path to the dataset processing script with the dataset builder. Can be either:
|
131 |
+
|
132 |
+
- a local path to processing script or the directory containing the script (if the script has the same name
|
133 |
+
as the directory),
|
134 |
+
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
|
135 |
+
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`list_datasets`])
|
136 |
+
e.g. `'squad'`, `'glue'` or `'openai/webtext'`.
|
137 |
+
local_path (`str`):
|
138 |
+
Path to the local folder to copy the dataset script to.
|
139 |
+
download_config ([`DownloadConfig`], *optional*):
|
140 |
+
Specific download configuration parameters.
|
141 |
+
**download_kwargs (additional keyword arguments):
|
142 |
+
Optional arguments for [`DownloadConfig`] which will override
|
143 |
+
the attributes of `download_config` if supplied.
|
144 |
+
"""
|
145 |
+
if download_config is None:
|
146 |
+
download_config = DownloadConfig(**download_kwargs)
|
147 |
+
if os.path.isfile(path):
|
148 |
+
path = str(Path(path).parent)
|
149 |
+
if os.path.isdir(path):
|
150 |
+
shutil.copytree(path, local_path, dirs_exist_ok=True)
|
151 |
+
else:
|
152 |
+
huggingface_hub.HfApi(endpoint=config.HF_ENDPOINT, token=download_config.token).snapshot_download(
|
153 |
+
repo_id=path, repo_type="dataset", local_dir=local_path, force_download=download_config.force_download
|
154 |
+
)
|
155 |
+
print(
|
156 |
+
f"The dataset {path} can be inspected at {local_path}. "
|
157 |
+
f'You can modify this loading script if it has one and use it with `datasets.load_dataset("{PurePath(local_path).as_posix()}")`.'
|
158 |
+
)
|
159 |
+
|
160 |
+
|
161 |
+
@deprecated(
|
162 |
+
"Use 'evaluate.inspect_evaluation_module' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate"
|
163 |
+
)
|
164 |
+
def inspect_metric(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
|
165 |
+
r"""
|
166 |
+
Allow inspection/modification of a metric script by copying it on local drive at local_path.
|
167 |
+
|
168 |
+
<Deprecated version="2.5.0">
|
169 |
+
|
170 |
+
Use `evaluate.inspect_evaluation_module` instead, from the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
|
171 |
+
|
172 |
+
</Deprecated>
|
173 |
+
|
174 |
+
Args:
|
175 |
+
path (``str``): path to the dataset processing script with the dataset builder. Can be either:
|
176 |
+
|
177 |
+
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
|
178 |
+
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
|
179 |
+
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
|
180 |
+
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
|
181 |
+
local_path (``str``): path to the local folder to copy the datset script to.
|
182 |
+
download_config (Optional ``datasets.DownloadConfig``): specific download configuration parameters.
|
183 |
+
**download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
|
184 |
+
"""
|
185 |
+
metric_module = metric_module_factory(path, download_config=download_config, **download_kwargs)
|
186 |
+
metric_cls = import_main_class(metric_module.module_path, dataset=False)
|
187 |
+
module_source_path = inspect.getsourcefile(metric_cls)
|
188 |
+
module_source_dirpath = os.path.dirname(module_source_path)
|
189 |
+
for dirpath, dirnames, filenames in os.walk(module_source_dirpath):
|
190 |
+
dst_dirpath = os.path.join(local_path, os.path.relpath(dirpath, module_source_dirpath))
|
191 |
+
os.makedirs(dst_dirpath, exist_ok=True)
|
192 |
+
# skipping hidden directories; prune the search
|
193 |
+
dirnames[:] = [dirname for dirname in dirnames if not dirname.startswith((".", "__"))]
|
194 |
+
for filename in filenames:
|
195 |
+
shutil.copy2(os.path.join(dirpath, filename), os.path.join(dst_dirpath, filename))
|
196 |
+
shutil.copystat(dirpath, dst_dirpath)
|
197 |
+
local_path = relative_to_absolute_path(local_path)
|
198 |
+
print(
|
199 |
+
f"The processing scripts for metric {path} can be inspected at {local_path}. "
|
200 |
+
f"The main class is in {module_source_dirpath}. "
|
201 |
+
f'You can modify this processing scripts and use it with `datasets.load_metric("{PurePath(local_path).as_posix()}")`.'
|
202 |
+
)
|
203 |
+
|
204 |
+
|
205 |
+
def get_dataset_infos(
|
206 |
+
path: str,
|
207 |
+
data_files: Optional[Union[Dict, List, str]] = None,
|
208 |
+
download_config: Optional[DownloadConfig] = None,
|
209 |
+
download_mode: Optional[Union[DownloadMode, str]] = None,
|
210 |
+
revision: Optional[Union[str, Version]] = None,
|
211 |
+
token: Optional[Union[bool, str]] = None,
|
212 |
+
use_auth_token="deprecated",
|
213 |
+
**config_kwargs,
|
214 |
+
):
|
215 |
+
"""Get the meta information about a dataset, returned as a dict mapping config name to DatasetInfoDict.
|
216 |
+
|
217 |
+
Args:
|
218 |
+
path (`str`): path to the dataset processing script with the dataset builder. Can be either:
|
219 |
+
|
220 |
+
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
|
221 |
+
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
|
222 |
+
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
|
223 |
+
e.g. `'squad'`, `'glue'` or``'openai/webtext'`
|
224 |
+
revision (`Union[str, datasets.Version]`, *optional*):
|
225 |
+
If specified, the dataset module will be loaded from the datasets repository at this version.
|
226 |
+
By default:
|
227 |
+
- it is set to the local version of the lib.
|
228 |
+
- it will also try to load it from the main branch if it's not available at the local version of the lib.
|
229 |
+
Specifying a version that is different from your local version of the lib might cause compatibility issues.
|
230 |
+
download_config ([`DownloadConfig`], *optional*):
|
231 |
+
Specific download configuration parameters.
|
232 |
+
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
|
233 |
+
Download/generate mode.
|
234 |
+
data_files (`Union[Dict, List, str]`, *optional*):
|
235 |
+
Defining the data_files of the dataset configuration.
|
236 |
+
token (`str` or `bool`, *optional*):
|
237 |
+
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
|
238 |
+
If `True`, or not specified, will get token from `"~/.huggingface"`.
|
239 |
+
use_auth_token (`str` or `bool`, *optional*):
|
240 |
+
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
|
241 |
+
If `True`, or not specified, will get token from `"~/.huggingface"`.
|
242 |
+
|
243 |
+
<Deprecated version="2.14.0">
|
244 |
+
|
245 |
+
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
|
246 |
+
|
247 |
+
</Deprecated>
|
248 |
+
|
249 |
+
**config_kwargs (additional keyword arguments):
|
250 |
+
Optional attributes for builder class which will override the attributes if supplied.
|
251 |
+
|
252 |
+
Example:
|
253 |
+
|
254 |
+
```py
|
255 |
+
>>> from datasets import get_dataset_infos
|
256 |
+
>>> get_dataset_infos('rotten_tomatoes')
|
257 |
+
{'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews...), ...}
|
258 |
+
```
|
259 |
+
"""
|
260 |
+
if use_auth_token != "deprecated":
|
261 |
+
warnings.warn(
|
262 |
+
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
|
263 |
+
"You can remove this warning by passing 'token=<use_auth_token>' instead.",
|
264 |
+
FutureWarning,
|
265 |
+
)
|
266 |
+
token = use_auth_token
|
267 |
+
|
268 |
+
config_names = get_dataset_config_names(
|
269 |
+
path=path,
|
270 |
+
revision=revision,
|
271 |
+
download_config=download_config,
|
272 |
+
download_mode=download_mode,
|
273 |
+
data_files=data_files,
|
274 |
+
token=token,
|
275 |
+
)
|
276 |
+
return {
|
277 |
+
config_name: get_dataset_config_info(
|
278 |
+
path=path,
|
279 |
+
config_name=config_name,
|
280 |
+
data_files=data_files,
|
281 |
+
download_config=download_config,
|
282 |
+
download_mode=download_mode,
|
283 |
+
revision=revision,
|
284 |
+
token=token,
|
285 |
+
**config_kwargs,
|
286 |
+
)
|
287 |
+
for config_name in config_names
|
288 |
+
}
|
289 |
+
|
290 |
+
|
291 |
+
def get_dataset_config_names(
|
292 |
+
path: str,
|
293 |
+
revision: Optional[Union[str, Version]] = None,
|
294 |
+
download_config: Optional[DownloadConfig] = None,
|
295 |
+
download_mode: Optional[Union[DownloadMode, str]] = None,
|
296 |
+
dynamic_modules_path: Optional[str] = None,
|
297 |
+
data_files: Optional[Union[Dict, List, str]] = None,
|
298 |
+
**download_kwargs,
|
299 |
+
):
|
300 |
+
"""Get the list of available config names for a particular dataset.
|
301 |
+
|
302 |
+
Args:
|
303 |
+
path (`str`): path to the dataset processing script with the dataset builder. Can be either:
|
304 |
+
|
305 |
+
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
|
306 |
+
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
|
307 |
+
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
|
308 |
+
e.g. `'squad'`, `'glue'` or `'openai/webtext'`
|
309 |
+
revision (`Union[str, datasets.Version]`, *optional*):
|
310 |
+
If specified, the dataset module will be loaded from the datasets repository at this version.
|
311 |
+
By default:
|
312 |
+
- it is set to the local version of the lib.
|
313 |
+
- it will also try to load it from the main branch if it's not available at the local version of the lib.
|
314 |
+
Specifying a version that is different from your local version of the lib might cause compatibility issues.
|
315 |
+
download_config ([`DownloadConfig`], *optional*):
|
316 |
+
Specific download configuration parameters.
|
317 |
+
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
|
318 |
+
Download/generate mode.
|
319 |
+
dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
|
320 |
+
Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
|
321 |
+
By default the datasets and metrics are stored inside the `datasets_modules` module.
|
322 |
+
data_files (`Union[Dict, List, str]`, *optional*):
|
323 |
+
Defining the data_files of the dataset configuration.
|
324 |
+
**download_kwargs (additional keyword arguments):
|
325 |
+
Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
|
326 |
+
for example `token`.
|
327 |
+
|
328 |
+
Example:
|
329 |
+
|
330 |
+
```py
|
331 |
+
>>> from datasets import get_dataset_config_names
|
332 |
+
>>> get_dataset_config_names("glue")
|
333 |
+
['cola',
|
334 |
+
'sst2',
|
335 |
+
'mrpc',
|
336 |
+
'qqp',
|
337 |
+
'stsb',
|
338 |
+
'mnli',
|
339 |
+
'mnli_mismatched',
|
340 |
+
'mnli_matched',
|
341 |
+
'qnli',
|
342 |
+
'rte',
|
343 |
+
'wnli',
|
344 |
+
'ax']
|
345 |
+
```
|
346 |
+
"""
|
347 |
+
dataset_module = dataset_module_factory(
|
348 |
+
path,
|
349 |
+
revision=revision,
|
350 |
+
download_config=download_config,
|
351 |
+
download_mode=download_mode,
|
352 |
+
dynamic_modules_path=dynamic_modules_path,
|
353 |
+
data_files=data_files,
|
354 |
+
**download_kwargs,
|
355 |
+
)
|
356 |
+
builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
|
357 |
+
return list(builder_cls.builder_configs.keys()) or [
|
358 |
+
dataset_module.builder_kwargs.get("config_name", builder_cls.DEFAULT_CONFIG_NAME or "default")
|
359 |
+
]
|
360 |
+
|
361 |
+
|
362 |
+
def get_dataset_default_config_name(
|
363 |
+
path: str,
|
364 |
+
revision: Optional[Union[str, Version]] = None,
|
365 |
+
download_config: Optional[DownloadConfig] = None,
|
366 |
+
download_mode: Optional[Union[DownloadMode, str]] = None,
|
367 |
+
dynamic_modules_path: Optional[str] = None,
|
368 |
+
data_files: Optional[Union[Dict, List, str]] = None,
|
369 |
+
**download_kwargs,
|
370 |
+
) -> Optional[str]:
|
371 |
+
"""Get the default config name for a particular dataset.
|
372 |
+
Can return None only if the dataset has multiple configurations and no default configuration.
|
373 |
+
|
374 |
+
Args:
|
375 |
+
path (`str`): path to the dataset processing script with the dataset builder. Can be either:
|
376 |
+
|
377 |
+
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
|
378 |
+
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
|
379 |
+
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
|
380 |
+
e.g. `'squad'`, `'glue'` or `'openai/webtext'`
|
381 |
+
revision (`Union[str, datasets.Version]`, *optional*):
|
382 |
+
If specified, the dataset module will be loaded from the datasets repository at this version.
|
383 |
+
By default:
|
384 |
+
- it is set to the local version of the lib.
|
385 |
+
- it will also try to load it from the main branch if it's not available at the local version of the lib.
|
386 |
+
Specifying a version that is different from your local version of the lib might cause compatibility issues.
|
387 |
+
download_config ([`DownloadConfig`], *optional*):
|
388 |
+
Specific download configuration parameters.
|
389 |
+
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
|
390 |
+
Download/generate mode.
|
391 |
+
dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
|
392 |
+
Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
|
393 |
+
By default the datasets and metrics are stored inside the `datasets_modules` module.
|
394 |
+
data_files (`Union[Dict, List, str]`, *optional*):
|
395 |
+
Defining the data_files of the dataset configuration.
|
396 |
+
**download_kwargs (additional keyword arguments):
|
397 |
+
Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
|
398 |
+
for example `token`.
|
399 |
+
|
400 |
+
Returns:
|
401 |
+
Optional[str]: the default config name if there is one
|
402 |
+
|
403 |
+
Example:
|
404 |
+
|
405 |
+
```py
|
406 |
+
>>> from datasets import get_dataset_default_config_name
|
407 |
+
>>> get_dataset_default_config_name("openbookqa")
|
408 |
+
'main'
|
409 |
+
```
|
410 |
+
"""
|
411 |
+
dataset_module = dataset_module_factory(
|
412 |
+
path,
|
413 |
+
revision=revision,
|
414 |
+
download_config=download_config,
|
415 |
+
download_mode=download_mode,
|
416 |
+
dynamic_modules_path=dynamic_modules_path,
|
417 |
+
data_files=data_files,
|
418 |
+
**download_kwargs,
|
419 |
+
)
|
420 |
+
builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
|
421 |
+
builder_configs = list(builder_cls.builder_configs.keys())
|
422 |
+
if builder_configs:
|
423 |
+
default_config_name = builder_configs[0] if len(builder_configs) == 1 else None
|
424 |
+
else:
|
425 |
+
default_config_name = "default"
|
426 |
+
return builder_cls.DEFAULT_CONFIG_NAME or default_config_name
|
427 |
+
|
428 |
+
|
429 |
+
def get_dataset_config_info(
|
430 |
+
path: str,
|
431 |
+
config_name: Optional[str] = None,
|
432 |
+
data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
|
433 |
+
download_config: Optional[DownloadConfig] = None,
|
434 |
+
download_mode: Optional[Union[DownloadMode, str]] = None,
|
435 |
+
revision: Optional[Union[str, Version]] = None,
|
436 |
+
token: Optional[Union[bool, str]] = None,
|
437 |
+
use_auth_token="deprecated",
|
438 |
+
**config_kwargs,
|
439 |
+
) -> DatasetInfo:
|
440 |
+
"""Get the meta information (DatasetInfo) about a dataset for a particular config
|
441 |
+
|
442 |
+
Args:
|
443 |
+
path (``str``): path to the dataset processing script with the dataset builder. Can be either:
|
444 |
+
|
445 |
+
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
|
446 |
+
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
|
447 |
+
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
|
448 |
+
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
|
449 |
+
config_name (:obj:`str`, optional): Defining the name of the dataset configuration.
|
450 |
+
data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s).
|
451 |
+
download_config (:class:`~download.DownloadConfig`, optional): Specific download configuration parameters.
|
452 |
+
download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
|
453 |
+
revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load.
|
454 |
+
As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
|
455 |
+
You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
|
456 |
+
token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
|
457 |
+
If True, or not specified, will get token from `"~/.huggingface"`.
|
458 |
+
use_auth_token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
|
459 |
+
If True, or not specified, will get token from `"~/.huggingface"`.
|
460 |
+
|
461 |
+
<Deprecated version="2.14.0">
|
462 |
+
|
463 |
+
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
|
464 |
+
|
465 |
+
</Deprecated>
|
466 |
+
|
467 |
+
**config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied.
|
468 |
+
|
469 |
+
"""
|
470 |
+
if use_auth_token != "deprecated":
|
471 |
+
warnings.warn(
|
472 |
+
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
|
473 |
+
"You can remove this warning by passing 'token=<use_auth_token>' instead.",
|
474 |
+
FutureWarning,
|
475 |
+
)
|
476 |
+
token = use_auth_token
|
477 |
+
|
478 |
+
builder = load_dataset_builder(
|
479 |
+
path,
|
480 |
+
name=config_name,
|
481 |
+
data_files=data_files,
|
482 |
+
download_config=download_config,
|
483 |
+
download_mode=download_mode,
|
484 |
+
revision=revision,
|
485 |
+
token=token,
|
486 |
+
**config_kwargs,
|
487 |
+
)
|
488 |
+
info = builder.info
|
489 |
+
if info.splits is None:
|
490 |
+
download_config = download_config.copy() if download_config else DownloadConfig()
|
491 |
+
if token is not None:
|
492 |
+
download_config.token = token
|
493 |
+
builder._check_manual_download(
|
494 |
+
StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
|
495 |
+
)
|
496 |
+
try:
|
497 |
+
info.splits = {
|
498 |
+
split_generator.name: {"name": split_generator.name, "dataset_name": path}
|
499 |
+
for split_generator in builder._split_generators(
|
500 |
+
StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
|
501 |
+
)
|
502 |
+
}
|
503 |
+
except Exception as err:
|
504 |
+
raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err
|
505 |
+
return info
|
506 |
+
|
507 |
+
|
508 |
+
def get_dataset_split_names(
|
509 |
+
path: str,
|
510 |
+
config_name: Optional[str] = None,
|
511 |
+
data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
|
512 |
+
download_config: Optional[DownloadConfig] = None,
|
513 |
+
download_mode: Optional[Union[DownloadMode, str]] = None,
|
514 |
+
revision: Optional[Union[str, Version]] = None,
|
515 |
+
token: Optional[Union[bool, str]] = None,
|
516 |
+
use_auth_token="deprecated",
|
517 |
+
**config_kwargs,
|
518 |
+
):
|
519 |
+
"""Get the list of available splits for a particular config and dataset.
|
520 |
+
|
521 |
+
Args:
|
522 |
+
path (`str`): path to the dataset processing script with the dataset builder. Can be either:
|
523 |
+
|
524 |
+
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
|
525 |
+
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
|
526 |
+
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
|
527 |
+
e.g. `'squad'`, `'glue'` or `'openai/webtext'`
|
528 |
+
config_name (`str`, *optional*):
|
529 |
+
Defining the name of the dataset configuration.
|
530 |
+
data_files (`str` or `Sequence` or `Mapping`, *optional*):
|
531 |
+
Path(s) to source data file(s).
|
532 |
+
download_config ([`DownloadConfig`], *optional*):
|
533 |
+
Specific download configuration parameters.
|
534 |
+
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
|
535 |
+
Download/generate mode.
|
536 |
+
revision ([`Version`] or `str`, *optional*):
|
537 |
+
Version of the dataset script to load.
|
538 |
+
As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
|
539 |
+
You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
|
540 |
+
token (`str` or `bool`, *optional*):
|
541 |
+
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
|
542 |
+
If `True`, or not specified, will get token from `"~/.huggingface"`.
|
543 |
+
use_auth_token (`str` or `bool`, *optional*):
|
544 |
+
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
|
545 |
+
If `True`, or not specified, will get token from `"~/.huggingface"`.
|
546 |
+
|
547 |
+
<Deprecated version="2.14.0">
|
548 |
+
|
549 |
+
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
|
550 |
+
|
551 |
+
</Deprecated>
|
552 |
+
|
553 |
+
**config_kwargs (additional keyword arguments):
|
554 |
+
Optional attributes for builder class which will override the attributes if supplied.
|
555 |
+
|
556 |
+
Example:
|
557 |
+
|
558 |
+
```py
|
559 |
+
>>> from datasets import get_dataset_split_names
|
560 |
+
>>> get_dataset_split_names('rotten_tomatoes')
|
561 |
+
['train', 'validation', 'test']
|
562 |
+
```
|
563 |
+
"""
|
564 |
+
if use_auth_token != "deprecated":
|
565 |
+
warnings.warn(
|
566 |
+
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
|
567 |
+
"You can remove this warning by passing 'token=<use_auth_token>' instead.",
|
568 |
+
FutureWarning,
|
569 |
+
)
|
570 |
+
token = use_auth_token
|
571 |
+
|
572 |
+
info = get_dataset_config_info(
|
573 |
+
path,
|
574 |
+
config_name=config_name,
|
575 |
+
data_files=data_files,
|
576 |
+
download_config=download_config,
|
577 |
+
download_mode=download_mode,
|
578 |
+
revision=revision,
|
579 |
+
token=token,
|
580 |
+
**config_kwargs,
|
581 |
+
)
|
582 |
+
return list(info.splits.keys())
|
venv/lib/python3.10/site-packages/datasets/streaming.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
import inspect
|
3 |
+
from functools import wraps
|
4 |
+
from typing import TYPE_CHECKING, Optional
|
5 |
+
|
6 |
+
from .download.download_config import DownloadConfig
|
7 |
+
from .utils.file_utils import (
|
8 |
+
xbasename,
|
9 |
+
xdirname,
|
10 |
+
xet_parse,
|
11 |
+
xexists,
|
12 |
+
xgetsize,
|
13 |
+
xglob,
|
14 |
+
xgzip_open,
|
15 |
+
xisdir,
|
16 |
+
xisfile,
|
17 |
+
xjoin,
|
18 |
+
xlistdir,
|
19 |
+
xnumpy_load,
|
20 |
+
xopen,
|
21 |
+
xpandas_read_csv,
|
22 |
+
xpandas_read_excel,
|
23 |
+
xPath,
|
24 |
+
xpyarrow_parquet_read_table,
|
25 |
+
xrelpath,
|
26 |
+
xsio_loadmat,
|
27 |
+
xsplit,
|
28 |
+
xsplitext,
|
29 |
+
xwalk,
|
30 |
+
xxml_dom_minidom_parse,
|
31 |
+
)
|
32 |
+
from .utils.logging import get_logger
|
33 |
+
from .utils.patching import patch_submodule
|
34 |
+
from .utils.py_utils import get_imports, lock_importable_file
|
35 |
+
|
36 |
+
|
37 |
+
logger = get_logger(__name__)
|
38 |
+
|
39 |
+
|
40 |
+
if TYPE_CHECKING:
|
41 |
+
from .builder import DatasetBuilder
|
42 |
+
|
43 |
+
|
44 |
+
def extend_module_for_streaming(module_path, download_config: Optional[DownloadConfig] = None):
|
45 |
+
"""Extend the module to support streaming.
|
46 |
+
|
47 |
+
We patch some functions in the module to use `fsspec` to support data streaming:
|
48 |
+
- We use `fsspec.open` to open and read remote files. We patch the module function:
|
49 |
+
- `open`
|
50 |
+
- We use the "::" hop separator to join paths and navigate remote compressed/archive files. We patch the module
|
51 |
+
functions:
|
52 |
+
- `os.path.join`
|
53 |
+
- `pathlib.Path.joinpath` and `pathlib.Path.__truediv__` (called when using the "/" operator)
|
54 |
+
|
55 |
+
The patched functions are replaced with custom functions defined to work with the
|
56 |
+
:class:`~download.streaming_download_manager.StreamingDownloadManager`.
|
57 |
+
|
58 |
+
Args:
|
59 |
+
module_path: Path to the module to be extended.
|
60 |
+
download_config : mainly use use_auth_token or storage_options to support different platforms and auth types.
|
61 |
+
"""
|
62 |
+
|
63 |
+
module = importlib.import_module(module_path)
|
64 |
+
|
65 |
+
# TODO(QL): always update the module to add subsequent new authentication without removing old ones
|
66 |
+
if hasattr(module, "_patched_for_streaming") and module._patched_for_streaming:
|
67 |
+
if isinstance(module._patched_for_streaming, DownloadConfig):
|
68 |
+
module._patched_for_streaming.token = download_config.token
|
69 |
+
module._patched_for_streaming.storage_options = download_config.storage_options
|
70 |
+
return
|
71 |
+
|
72 |
+
def wrap_auth(function):
|
73 |
+
@wraps(function)
|
74 |
+
def wrapper(*args, **kwargs):
|
75 |
+
return function(*args, download_config=download_config, **kwargs)
|
76 |
+
|
77 |
+
wrapper._decorator_name_ = "wrap_auth"
|
78 |
+
return wrapper
|
79 |
+
|
80 |
+
# open files in a streaming fashion
|
81 |
+
patch_submodule(module, "open", wrap_auth(xopen)).start()
|
82 |
+
patch_submodule(module, "os.listdir", wrap_auth(xlistdir)).start()
|
83 |
+
patch_submodule(module, "os.walk", wrap_auth(xwalk)).start()
|
84 |
+
patch_submodule(module, "glob.glob", wrap_auth(xglob)).start()
|
85 |
+
# allow to navigate in remote zip files
|
86 |
+
patch_submodule(module, "os.path.join", xjoin).start()
|
87 |
+
patch_submodule(module, "os.path.dirname", xdirname).start()
|
88 |
+
patch_submodule(module, "os.path.basename", xbasename).start()
|
89 |
+
patch_submodule(module, "os.path.relpath", xrelpath).start()
|
90 |
+
patch_submodule(module, "os.path.split", xsplit).start()
|
91 |
+
patch_submodule(module, "os.path.splitext", xsplitext).start()
|
92 |
+
# allow checks on paths
|
93 |
+
patch_submodule(module, "os.path.exists", wrap_auth(xexists)).start()
|
94 |
+
patch_submodule(module, "os.path.isdir", wrap_auth(xisdir)).start()
|
95 |
+
patch_submodule(module, "os.path.isfile", wrap_auth(xisfile)).start()
|
96 |
+
patch_submodule(module, "os.path.getsize", wrap_auth(xgetsize)).start()
|
97 |
+
patch_submodule(module, "pathlib.Path", xPath).start()
|
98 |
+
# file readers
|
99 |
+
patch_submodule(module, "gzip.open", wrap_auth(xgzip_open)).start()
|
100 |
+
patch_submodule(module, "numpy.load", wrap_auth(xnumpy_load)).start()
|
101 |
+
patch_submodule(module, "pandas.read_csv", wrap_auth(xpandas_read_csv), attrs=["__version__"]).start()
|
102 |
+
patch_submodule(module, "pandas.read_excel", wrap_auth(xpandas_read_excel), attrs=["__version__"]).start()
|
103 |
+
patch_submodule(module, "scipy.io.loadmat", wrap_auth(xsio_loadmat), attrs=["__version__"]).start()
|
104 |
+
patch_submodule(module, "xml.etree.ElementTree.parse", wrap_auth(xet_parse)).start()
|
105 |
+
patch_submodule(module, "xml.dom.minidom.parse", wrap_auth(xxml_dom_minidom_parse)).start()
|
106 |
+
# pyarrow: do not patch pyarrow attribute in packaged modules
|
107 |
+
if not module.__name__.startswith("datasets.packaged_modules."):
|
108 |
+
patch_submodule(module, "pyarrow.parquet.read_table", wrap_auth(xpyarrow_parquet_read_table)).start()
|
109 |
+
module._patched_for_streaming = download_config
|
110 |
+
|
111 |
+
|
112 |
+
def extend_dataset_builder_for_streaming(builder: "DatasetBuilder"):
|
113 |
+
"""Extend the dataset builder module and the modules imported by it to support streaming.
|
114 |
+
|
115 |
+
Args:
|
116 |
+
builder (:class:`DatasetBuilder`): Dataset builder instance.
|
117 |
+
"""
|
118 |
+
# this extends the open and os.path.join functions for data streaming
|
119 |
+
download_config = DownloadConfig(storage_options=builder.storage_options, token=builder.token)
|
120 |
+
extend_module_for_streaming(builder.__module__, download_config=download_config)
|
121 |
+
# if needed, we also have to extend additional internal imports (like wmt14 -> wmt_utils)
|
122 |
+
if not builder.__module__.startswith("datasets."): # check that it's not a packaged builder like csv
|
123 |
+
importable_file = inspect.getfile(builder.__class__)
|
124 |
+
with lock_importable_file(importable_file):
|
125 |
+
for imports in get_imports(importable_file):
|
126 |
+
if imports[0] == "internal":
|
127 |
+
internal_import_name = imports[1]
|
128 |
+
internal_module_name = ".".join(builder.__module__.split(".")[:-1] + [internal_import_name])
|
129 |
+
extend_module_for_streaming(internal_module_name, download_config=download_config)
|
130 |
+
|
131 |
+
# builders can inherit from other builders that might use streaming functionality
|
132 |
+
# (for example, ImageFolder and AudioFolder inherit from FolderBuilder which implements examples generation)
|
133 |
+
# but these parents builders are not patched automatically as they are not instantiated, so we patch them here
|
134 |
+
from .builder import DatasetBuilder
|
135 |
+
|
136 |
+
parent_builder_modules = [
|
137 |
+
cls.__module__
|
138 |
+
for cls in type(builder).__mro__[1:] # make sure it's not the same module we've already patched
|
139 |
+
if issubclass(cls, DatasetBuilder) and cls.__module__ != DatasetBuilder.__module__
|
140 |
+
] # check it's not a standard builder from datasets.builder
|
141 |
+
for module in parent_builder_modules:
|
142 |
+
extend_module_for_streaming(module, download_config=download_config)
|
venv/lib/python3.10/site-packages/datasets/table.py
ADDED
@@ -0,0 +1,2415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import os
|
3 |
+
from functools import partial
|
4 |
+
from itertools import groupby
|
5 |
+
from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import pyarrow as pa
|
9 |
+
import pyarrow.compute as pc
|
10 |
+
import pyarrow.types
|
11 |
+
|
12 |
+
from . import config
|
13 |
+
from .utils.logging import get_logger
|
14 |
+
|
15 |
+
|
16 |
+
if TYPE_CHECKING:
|
17 |
+
from .features.features import Features, FeatureType
|
18 |
+
|
19 |
+
|
20 |
+
logger = get_logger(__name__)
|
21 |
+
|
22 |
+
|
23 |
+
def inject_arrow_table_documentation(arrow_table_method):
|
24 |
+
def wrapper(fn):
|
25 |
+
fn.__doc__ = arrow_table_method.__doc__ + (fn.__doc__ if fn.__doc__ is not None else "")
|
26 |
+
fn.__doc__ = fn.__doc__.replace("pyarrow.Table", "Table")
|
27 |
+
if hasattr(arrow_table_method, "__annotations__"):
|
28 |
+
fn.__annotations__ = arrow_table_method.__annotations__
|
29 |
+
return fn
|
30 |
+
|
31 |
+
return wrapper
|
32 |
+
|
33 |
+
|
34 |
+
def _in_memory_arrow_table_from_file(filename: str) -> pa.Table:
|
35 |
+
in_memory_stream = pa.input_stream(filename)
|
36 |
+
opened_stream = pa.ipc.open_stream(in_memory_stream)
|
37 |
+
pa_table = opened_stream.read_all()
|
38 |
+
return pa_table
|
39 |
+
|
40 |
+
|
41 |
+
def _in_memory_arrow_table_from_buffer(buffer: pa.Buffer) -> pa.Table:
|
42 |
+
stream = pa.BufferReader(buffer)
|
43 |
+
opened_stream = pa.ipc.open_stream(stream)
|
44 |
+
table = opened_stream.read_all()
|
45 |
+
return table
|
46 |
+
|
47 |
+
|
48 |
+
def _memory_mapped_record_batch_reader_from_file(filename: str) -> pa.RecordBatchStreamReader:
|
49 |
+
memory_mapped_stream = pa.memory_map(filename)
|
50 |
+
return pa.ipc.open_stream(memory_mapped_stream)
|
51 |
+
|
52 |
+
|
53 |
+
def read_schema_from_file(filename: str) -> pa.Schema:
|
54 |
+
"""
|
55 |
+
Infer arrow table schema from file without loading whole file into memory.
|
56 |
+
Usefull especially while having very big files.
|
57 |
+
"""
|
58 |
+
with pa.memory_map(filename) as memory_mapped_stream:
|
59 |
+
schema = pa.ipc.open_stream(memory_mapped_stream).schema
|
60 |
+
return schema
|
61 |
+
|
62 |
+
|
63 |
+
def _memory_mapped_arrow_table_from_file(filename: str) -> pa.Table:
|
64 |
+
opened_stream = _memory_mapped_record_batch_reader_from_file(filename)
|
65 |
+
pa_table = opened_stream.read_all()
|
66 |
+
return pa_table
|
67 |
+
|
68 |
+
|
69 |
+
def _deepcopy(x, memo: dict):
|
70 |
+
"""deepcopy a regular class instance"""
|
71 |
+
cls = x.__class__
|
72 |
+
result = cls.__new__(cls)
|
73 |
+
memo[id(x)] = result
|
74 |
+
for k, v in x.__dict__.items():
|
75 |
+
setattr(result, k, copy.deepcopy(v, memo))
|
76 |
+
return result
|
77 |
+
|
78 |
+
|
79 |
+
def _interpolation_search(arr: List[int], x: int) -> int:
|
80 |
+
"""
|
81 |
+
Return the position i of a sorted array so that arr[i] <= x < arr[i+1]
|
82 |
+
|
83 |
+
Args:
|
84 |
+
arr (`List[int]`): non-empty sorted list of integers
|
85 |
+
x (`int`): query
|
86 |
+
|
87 |
+
Returns:
|
88 |
+
`int`: the position i so that arr[i] <= x < arr[i+1]
|
89 |
+
|
90 |
+
Raises:
|
91 |
+
`IndexError`: if the array is empty or if the query is outside the array values
|
92 |
+
"""
|
93 |
+
i, j = 0, len(arr) - 1
|
94 |
+
while i < j and arr[i] <= x < arr[j]:
|
95 |
+
k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i]))
|
96 |
+
if arr[k] <= x < arr[k + 1]:
|
97 |
+
return k
|
98 |
+
elif arr[k] < x:
|
99 |
+
i, j = k + 1, j
|
100 |
+
else:
|
101 |
+
i, j = i, k
|
102 |
+
raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.")
|
103 |
+
|
104 |
+
|
105 |
+
class IndexedTableMixin:
|
106 |
+
def __init__(self, table: pa.Table):
|
107 |
+
self._schema: pa.Schema = table.schema
|
108 |
+
self._batches: List[pa.RecordBatch] = [
|
109 |
+
recordbatch for recordbatch in table.to_batches() if len(recordbatch) > 0
|
110 |
+
]
|
111 |
+
self._offsets: np.ndarray = np.cumsum([0] + [len(b) for b in self._batches], dtype=np.int64)
|
112 |
+
|
113 |
+
def fast_gather(self, indices: Union[List[int], np.ndarray]) -> pa.Table:
|
114 |
+
"""
|
115 |
+
Create a pa.Table by gathering the records at the records at the specified indices. Should be faster
|
116 |
+
than pa.concat_tables(table.fast_slice(int(i) % table.num_rows, 1) for i in indices) since NumPy can compute
|
117 |
+
the binary searches in parallel, highly optimized C
|
118 |
+
"""
|
119 |
+
if not len(indices):
|
120 |
+
raise ValueError("Indices must be non-empty")
|
121 |
+
batch_indices = np.searchsorted(self._offsets, indices, side="right") - 1
|
122 |
+
return pa.Table.from_batches(
|
123 |
+
[
|
124 |
+
self._batches[batch_idx].slice(i - self._offsets[batch_idx], 1)
|
125 |
+
for batch_idx, i in zip(batch_indices, indices)
|
126 |
+
],
|
127 |
+
schema=self._schema,
|
128 |
+
)
|
129 |
+
|
130 |
+
def fast_slice(self, offset=0, length=None) -> pa.Table:
|
131 |
+
"""
|
132 |
+
Slice the Table using interpolation search.
|
133 |
+
The behavior is the same as `pyarrow.Table.slice` but it's significantly faster.
|
134 |
+
|
135 |
+
Interpolation search is used to find the start and end indexes of the batches we want to keep.
|
136 |
+
The batches to keep are then concatenated to form the sliced Table.
|
137 |
+
"""
|
138 |
+
if offset < 0:
|
139 |
+
raise IndexError("Offset must be non-negative")
|
140 |
+
elif offset >= self._offsets[-1] or (length is not None and length <= 0):
|
141 |
+
return pa.Table.from_batches([], schema=self._schema)
|
142 |
+
i = _interpolation_search(self._offsets, offset)
|
143 |
+
if length is None or length + offset >= self._offsets[-1]:
|
144 |
+
batches = self._batches[i:]
|
145 |
+
batches[0] = batches[0].slice(offset - self._offsets[i])
|
146 |
+
else:
|
147 |
+
j = _interpolation_search(self._offsets, offset + length - 1)
|
148 |
+
batches = self._batches[i : j + 1]
|
149 |
+
batches[-1] = batches[-1].slice(0, offset + length - self._offsets[j])
|
150 |
+
batches[0] = batches[0].slice(offset - self._offsets[i])
|
151 |
+
return pa.Table.from_batches(batches, schema=self._schema)
|
152 |
+
|
153 |
+
|
154 |
+
class Table(IndexedTableMixin):
|
155 |
+
"""
|
156 |
+
Wraps a pyarrow Table by using composition.
|
157 |
+
This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`.
|
158 |
+
|
159 |
+
It implements all the basic attributes/methods of the pyarrow Table class except
|
160 |
+
the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column,
|
161 |
+
append_column, remove_column, set_column, rename_columns` and `drop`.
|
162 |
+
|
163 |
+
The implementation of these methods differs for the subclasses.
|
164 |
+
"""
|
165 |
+
|
166 |
+
def __init__(self, table: pa.Table):
|
167 |
+
super().__init__(table)
|
168 |
+
self.table = table
|
169 |
+
|
170 |
+
def __deepcopy__(self, memo: dict):
|
171 |
+
# arrow tables are immutable, so there's no need to copy self.table
|
172 |
+
# moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason
|
173 |
+
# by adding it to the memo, self.table won't be copied
|
174 |
+
memo[id(self.table)] = self.table
|
175 |
+
# same for the recordbatches used by the index
|
176 |
+
memo[id(self._batches)] = list(self._batches)
|
177 |
+
return _deepcopy(self, memo)
|
178 |
+
|
179 |
+
def validate(self, *args, **kwargs):
|
180 |
+
"""
|
181 |
+
Perform validation checks. An exception is raised if validation fails.
|
182 |
+
|
183 |
+
By default only cheap validation checks are run. Pass `full=True`
|
184 |
+
for thorough validation checks (potentially `O(n)`).
|
185 |
+
|
186 |
+
Args:
|
187 |
+
full (`bool`, defaults to `False`):
|
188 |
+
If `True`, run expensive checks, otherwise cheap checks only.
|
189 |
+
|
190 |
+
Raises:
|
191 |
+
`pa.lib.ArrowInvalid`: if validation fails
|
192 |
+
"""
|
193 |
+
return self.table.validate(*args, **kwargs)
|
194 |
+
|
195 |
+
def equals(self, *args, **kwargs):
|
196 |
+
"""
|
197 |
+
Check if contents of two tables are equal.
|
198 |
+
|
199 |
+
Args:
|
200 |
+
other ([`~datasets.table.Table`]):
|
201 |
+
Table to compare against.
|
202 |
+
check_metadata `bool`, defaults to `False`):
|
203 |
+
Whether schema metadata equality should be checked as well.
|
204 |
+
|
205 |
+
Returns:
|
206 |
+
`bool`
|
207 |
+
"""
|
208 |
+
args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args)
|
209 |
+
kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs}
|
210 |
+
return self.table.equals(*args, **kwargs)
|
211 |
+
|
212 |
+
def to_batches(self, *args, **kwargs):
|
213 |
+
"""
|
214 |
+
Convert Table to list of (contiguous) `RecordBatch` objects.
|
215 |
+
|
216 |
+
Args:
|
217 |
+
max_chunksize (`int`, defaults to `None`):
|
218 |
+
Maximum size for `RecordBatch` chunks. Individual chunks may be
|
219 |
+
smaller depending on the chunk layout of individual columns.
|
220 |
+
|
221 |
+
Returns:
|
222 |
+
`List[pyarrow.RecordBatch]`
|
223 |
+
"""
|
224 |
+
return self.table.to_batches(*args, **kwargs)
|
225 |
+
|
226 |
+
def to_pydict(self, *args, **kwargs):
|
227 |
+
"""
|
228 |
+
Convert the Table to a `dict` or `OrderedDict`.
|
229 |
+
|
230 |
+
Returns:
|
231 |
+
`dict`
|
232 |
+
"""
|
233 |
+
return self.table.to_pydict(*args, **kwargs)
|
234 |
+
|
235 |
+
def to_pylist(self, *args, **kwargs):
|
236 |
+
"""
|
237 |
+
Convert the Table to a list
|
238 |
+
|
239 |
+
Returns:
|
240 |
+
`list`
|
241 |
+
"""
|
242 |
+
return self.table.to_pylist(*args, **kwargs)
|
243 |
+
|
244 |
+
def to_pandas(self, *args, **kwargs):
|
245 |
+
"""
|
246 |
+
Convert to a pandas-compatible NumPy array or DataFrame, as appropriate.
|
247 |
+
|
248 |
+
Args:
|
249 |
+
memory_pool (`MemoryPool`, defaults to `None`):
|
250 |
+
Arrow MemoryPool to use for allocations. Uses the default memory
|
251 |
+
pool is not passed.
|
252 |
+
strings_to_categorical (`bool`, defaults to `False`):
|
253 |
+
Encode string (UTF8) and binary types to `pandas.Categorical`.
|
254 |
+
categories (`list`, defaults to `empty`):
|
255 |
+
List of fields that should be returned as `pandas.Categorical`. Only
|
256 |
+
applies to table-like data structures.
|
257 |
+
zero_copy_only (`bool`, defaults to `False`):
|
258 |
+
Raise an `ArrowException` if this function call would require copying
|
259 |
+
the underlying data.
|
260 |
+
integer_object_nulls (`bool`, defaults to `False`):
|
261 |
+
Cast integers with nulls to objects.
|
262 |
+
date_as_object (`bool`, defaults to `True`):
|
263 |
+
Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype.
|
264 |
+
timestamp_as_object (`bool`, defaults to `False`):
|
265 |
+
Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is
|
266 |
+
useful if you have timestamps that don't fit in the normal date
|
267 |
+
range of nanosecond timestamps (1678 CE-2262 CE).
|
268 |
+
If `False`, all timestamps are converted to `datetime64[ns]` dtype.
|
269 |
+
use_threads (`bool`, defaults to `True`):
|
270 |
+
Whether to parallelize the conversion using multiple threads.
|
271 |
+
deduplicate_objects (`bool`, defaults to `False`):
|
272 |
+
Do not create multiple copies Python objects when created, to save
|
273 |
+
on memory use. Conversion will be slower.
|
274 |
+
ignore_metadata (`bool`, defaults to `False`):
|
275 |
+
If `True`, do not use the 'pandas' metadata to reconstruct the
|
276 |
+
DataFrame index, if present.
|
277 |
+
safe (`bool`, defaults to `True`):
|
278 |
+
For certain data types, a cast is needed in order to store the
|
279 |
+
data in a pandas DataFrame or Series (e.g. timestamps are always
|
280 |
+
stored as nanoseconds in pandas). This option controls whether it
|
281 |
+
is a safe cast or not.
|
282 |
+
split_blocks (`bool`, defaults to `False`):
|
283 |
+
If `True`, generate one internal "block" for each column when
|
284 |
+
creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this
|
285 |
+
can temporarily reduce memory note that various pandas operations
|
286 |
+
can trigger "consolidation" which may balloon memory use.
|
287 |
+
self_destruct (`bool`, defaults to `False`):
|
288 |
+
EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow
|
289 |
+
memory while converting the Arrow object to pandas. If you use the
|
290 |
+
object after calling `to_pandas` with this option it will crash your
|
291 |
+
program.
|
292 |
+
types_mapper (`function`, defaults to `None`):
|
293 |
+
A function mapping a pyarrow DataType to a pandas `ExtensionDtype`.
|
294 |
+
This can be used to override the default pandas type for conversion
|
295 |
+
of built-in pyarrow types or in absence of `pandas_metadata` in the
|
296 |
+
Table schema. The function receives a pyarrow DataType and is
|
297 |
+
expected to return a pandas `ExtensionDtype` or `None` if the
|
298 |
+
default conversion should be used for that type. If you have
|
299 |
+
a dictionary mapping, you can pass `dict.get` as function.
|
300 |
+
|
301 |
+
Returns:
|
302 |
+
`pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object
|
303 |
+
"""
|
304 |
+
return self.table.to_pandas(*args, **kwargs)
|
305 |
+
|
306 |
+
def to_string(self, *args, **kwargs):
|
307 |
+
return self.table.to_string(*args, **kwargs)
|
308 |
+
|
309 |
+
def to_reader(self, max_chunksize: Optional[int] = None):
|
310 |
+
"""
|
311 |
+
Convert the Table to a RecordBatchReader.
|
312 |
+
|
313 |
+
Note that this method is zero-copy, it merely exposes the same data under a different API.
|
314 |
+
|
315 |
+
Args:
|
316 |
+
max_chunksize (`int`, defaults to `None`)
|
317 |
+
Maximum size for RecordBatch chunks. Individual chunks may be smaller depending
|
318 |
+
on the chunk layout of individual columns.
|
319 |
+
|
320 |
+
Returns:
|
321 |
+
`pyarrow.RecordBatchReader`
|
322 |
+
"""
|
323 |
+
return self.table.to_reader(max_chunksize=max_chunksize)
|
324 |
+
|
325 |
+
def field(self, *args, **kwargs):
|
326 |
+
"""
|
327 |
+
Select a schema field by its column name or numeric index.
|
328 |
+
|
329 |
+
Args:
|
330 |
+
i (`Union[int, str]`):
|
331 |
+
The index or name of the field to retrieve.
|
332 |
+
|
333 |
+
Returns:
|
334 |
+
`pyarrow.Field`
|
335 |
+
"""
|
336 |
+
return self.table.field(*args, **kwargs)
|
337 |
+
|
338 |
+
def column(self, *args, **kwargs):
|
339 |
+
"""
|
340 |
+
Select a column by its column name, or numeric index.
|
341 |
+
|
342 |
+
Args:
|
343 |
+
i (`Union[int, str]`):
|
344 |
+
The index or name of the column to retrieve.
|
345 |
+
|
346 |
+
Returns:
|
347 |
+
`pyarrow.ChunkedArray`
|
348 |
+
"""
|
349 |
+
return self.table.column(*args, **kwargs)
|
350 |
+
|
351 |
+
def itercolumns(self, *args, **kwargs):
|
352 |
+
"""
|
353 |
+
Iterator over all columns in their numerical order.
|
354 |
+
|
355 |
+
Yields:
|
356 |
+
`pyarrow.ChunkedArray`
|
357 |
+
"""
|
358 |
+
return self.table.itercolumns(*args, **kwargs)
|
359 |
+
|
360 |
+
@property
|
361 |
+
def schema(self):
|
362 |
+
"""
|
363 |
+
Schema of the table and its columns.
|
364 |
+
|
365 |
+
Returns:
|
366 |
+
`pyarrow.Schema`
|
367 |
+
"""
|
368 |
+
return self.table.schema
|
369 |
+
|
370 |
+
@property
|
371 |
+
def columns(self):
|
372 |
+
"""
|
373 |
+
List of all columns in numerical order.
|
374 |
+
|
375 |
+
Returns:
|
376 |
+
`List[pa.ChunkedArray]`
|
377 |
+
"""
|
378 |
+
return self.table.columns
|
379 |
+
|
380 |
+
@property
|
381 |
+
def num_columns(self):
|
382 |
+
"""
|
383 |
+
Number of columns in this table.
|
384 |
+
|
385 |
+
Returns:
|
386 |
+
int
|
387 |
+
"""
|
388 |
+
return self.table.num_columns
|
389 |
+
|
390 |
+
@property
|
391 |
+
def num_rows(self):
|
392 |
+
"""
|
393 |
+
Number of rows in this table.
|
394 |
+
|
395 |
+
Due to the definition of a table, all columns have the same number of
|
396 |
+
rows.
|
397 |
+
|
398 |
+
Returns:
|
399 |
+
int
|
400 |
+
"""
|
401 |
+
return self.table.num_rows
|
402 |
+
|
403 |
+
@property
|
404 |
+
def shape(self):
|
405 |
+
"""
|
406 |
+
Dimensions of the table: (#rows, #columns).
|
407 |
+
|
408 |
+
Returns:
|
409 |
+
`(int, int)`: Number of rows and number of columns.
|
410 |
+
"""
|
411 |
+
return self.table.shape
|
412 |
+
|
413 |
+
@property
|
414 |
+
def nbytes(self):
|
415 |
+
"""
|
416 |
+
Total number of bytes consumed by the elements of the table.
|
417 |
+
"""
|
418 |
+
return self.table.nbytes
|
419 |
+
|
420 |
+
@property
|
421 |
+
def column_names(self):
|
422 |
+
"""
|
423 |
+
Names of the table's columns.
|
424 |
+
"""
|
425 |
+
return self.table.column_names
|
426 |
+
|
427 |
+
def __eq__(self, other):
|
428 |
+
return self.equals(other)
|
429 |
+
|
430 |
+
def __getitem__(self, i):
|
431 |
+
return self.table[i]
|
432 |
+
|
433 |
+
def __len__(self):
|
434 |
+
return len(self.table)
|
435 |
+
|
436 |
+
def __repr__(self):
|
437 |
+
return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__)
|
438 |
+
|
439 |
+
def __str__(self):
|
440 |
+
return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__)
|
441 |
+
|
442 |
+
def slice(self, *args, **kwargs):
|
443 |
+
"""
|
444 |
+
Compute zero-copy slice of this Table.
|
445 |
+
|
446 |
+
Args:
|
447 |
+
offset (`int`, defaults to `0`):
|
448 |
+
Offset from start of table to slice.
|
449 |
+
length (`int`, defaults to `None`):
|
450 |
+
Length of slice (default is until end of table starting from
|
451 |
+
offset).
|
452 |
+
|
453 |
+
Returns:
|
454 |
+
`datasets.table.Table`
|
455 |
+
"""
|
456 |
+
raise NotImplementedError()
|
457 |
+
|
458 |
+
def filter(self, *args, **kwargs):
|
459 |
+
"""
|
460 |
+
Select records from a Table. See `pyarrow.compute.filter` for full usage.
|
461 |
+
"""
|
462 |
+
raise NotImplementedError()
|
463 |
+
|
464 |
+
def flatten(self, *args, **kwargs):
|
465 |
+
"""
|
466 |
+
Flatten this Table. Each column with a struct type is flattened
|
467 |
+
into one column per struct field. Other columns are left unchanged.
|
468 |
+
|
469 |
+
Args:
|
470 |
+
memory_pool (`MemoryPool`, defaults to `None`):
|
471 |
+
For memory allocations, if required, otherwise use default pool.
|
472 |
+
|
473 |
+
Returns:
|
474 |
+
`datasets.table.Table`
|
475 |
+
"""
|
476 |
+
raise NotImplementedError()
|
477 |
+
|
478 |
+
def combine_chunks(self, *args, **kwargs):
|
479 |
+
"""
|
480 |
+
Make a new table by combining the chunks this table has.
|
481 |
+
|
482 |
+
All the underlying chunks in the `ChunkedArray` of each column are
|
483 |
+
concatenated into zero or one chunk.
|
484 |
+
|
485 |
+
Args:
|
486 |
+
memory_pool (`MemoryPool`, defaults to `None`):
|
487 |
+
For memory allocations, if required, otherwise use default pool.
|
488 |
+
|
489 |
+
Returns:
|
490 |
+
`datasets.table.Table`
|
491 |
+
"""
|
492 |
+
raise NotImplementedError()
|
493 |
+
|
494 |
+
def cast(self, *args, **kwargs):
|
495 |
+
"""
|
496 |
+
Cast table values to another schema.
|
497 |
+
|
498 |
+
Args:
|
499 |
+
target_schema (`Schema`):
|
500 |
+
Schema to cast to, the names and order of fields must match.
|
501 |
+
safe (`bool`, defaults to `True`):
|
502 |
+
Check for overflows or other unsafe conversions.
|
503 |
+
|
504 |
+
Returns:
|
505 |
+
`datasets.table.Table`
|
506 |
+
"""
|
507 |
+
raise NotImplementedError()
|
508 |
+
|
509 |
+
def replace_schema_metadata(self, *args, **kwargs):
|
510 |
+
"""
|
511 |
+
EXPERIMENTAL: Create shallow copy of table by replacing schema
|
512 |
+
key-value metadata with the indicated new metadata (which may be None,
|
513 |
+
which deletes any existing metadata
|
514 |
+
|
515 |
+
Args:
|
516 |
+
metadata (`dict`, defaults to `None`):
|
517 |
+
|
518 |
+
Returns:
|
519 |
+
`datasets.table.Table`: shallow_copy
|
520 |
+
"""
|
521 |
+
raise NotImplementedError()
|
522 |
+
|
523 |
+
def add_column(self, *args, **kwargs):
|
524 |
+
"""
|
525 |
+
Add column to Table at position.
|
526 |
+
|
527 |
+
A new table is returned with the column added, the original table
|
528 |
+
object is left unchanged.
|
529 |
+
|
530 |
+
Args:
|
531 |
+
i (`int`):
|
532 |
+
Index to place the column at.
|
533 |
+
field_ (`Union[str, pyarrow.Field]`):
|
534 |
+
If a string is passed then the type is deduced from the column
|
535 |
+
data.
|
536 |
+
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
|
537 |
+
Column data.
|
538 |
+
|
539 |
+
Returns:
|
540 |
+
`datasets.table.Table`: New table with the passed column added.
|
541 |
+
"""
|
542 |
+
raise NotImplementedError()
|
543 |
+
|
544 |
+
def append_column(self, *args, **kwargs):
|
545 |
+
"""
|
546 |
+
Append column at end of columns.
|
547 |
+
|
548 |
+
Args:
|
549 |
+
field_ (`Union[str, pyarrow.Field]`):
|
550 |
+
If a string is passed then the type is deduced from the column
|
551 |
+
data.
|
552 |
+
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
|
553 |
+
Column data.
|
554 |
+
|
555 |
+
Returns:
|
556 |
+
`datasets.table.Table`: New table with the passed column added.
|
557 |
+
"""
|
558 |
+
raise NotImplementedError()
|
559 |
+
|
560 |
+
def remove_column(self, *args, **kwargs):
|
561 |
+
"""
|
562 |
+
Create new Table with the indicated column removed.
|
563 |
+
|
564 |
+
Args:
|
565 |
+
i (`int`):
|
566 |
+
Index of column to remove.
|
567 |
+
|
568 |
+
Returns:
|
569 |
+
`datasets.table.Table`: New table without the column.
|
570 |
+
"""
|
571 |
+
raise NotImplementedError()
|
572 |
+
|
573 |
+
def set_column(self, *args, **kwargs):
|
574 |
+
"""
|
575 |
+
Replace column in Table at position.
|
576 |
+
|
577 |
+
Args:
|
578 |
+
i (`int`):
|
579 |
+
Index to place the column at.
|
580 |
+
field_ (`Union[str, pyarrow.Field]`):
|
581 |
+
If a string is passed then the type is deduced from the column
|
582 |
+
data.
|
583 |
+
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
|
584 |
+
Column data.
|
585 |
+
|
586 |
+
Returns:
|
587 |
+
`datasets.table.Table`: New table with the passed column set.
|
588 |
+
"""
|
589 |
+
raise NotImplementedError()
|
590 |
+
|
591 |
+
def rename_columns(self, *args, **kwargs):
|
592 |
+
"""
|
593 |
+
Create new table with columns renamed to provided names.
|
594 |
+
"""
|
595 |
+
raise NotImplementedError()
|
596 |
+
|
597 |
+
def drop(self, *args, **kwargs):
|
598 |
+
"""
|
599 |
+
Drop one or more columns and return a new table.
|
600 |
+
|
601 |
+
Args:
|
602 |
+
columns (`List[str]`):
|
603 |
+
List of field names referencing existing columns.
|
604 |
+
|
605 |
+
Raises:
|
606 |
+
`KeyError` : if any of the passed columns name are not existing.
|
607 |
+
|
608 |
+
Returns:
|
609 |
+
`datasets.table.Table`: New table without the columns.
|
610 |
+
"""
|
611 |
+
raise NotImplementedError()
|
612 |
+
|
613 |
+
def select(self, *args, **kwargs):
|
614 |
+
"""
|
615 |
+
Select columns of the table.
|
616 |
+
|
617 |
+
Returns a new table with the specified columns, and metadata preserved.
|
618 |
+
|
619 |
+
Args:
|
620 |
+
columns (:obj:`Union[List[str], List[int]]`):
|
621 |
+
The column names or integer indices to select.
|
622 |
+
|
623 |
+
Returns:
|
624 |
+
`datasets.table.Table`: table with only a subset of the columns
|
625 |
+
"""
|
626 |
+
raise NotImplementedError()
|
627 |
+
|
628 |
+
|
629 |
+
class TableBlock(Table):
|
630 |
+
"""
|
631 |
+
`TableBlock` is the allowed class inside a `ConcanetationTable`.
|
632 |
+
Only `MemoryMappedTable` and `InMemoryTable` are `TableBlock`.
|
633 |
+
This is because we don't want a `ConcanetationTable` made out of other `ConcanetationTables`.
|
634 |
+
"""
|
635 |
+
|
636 |
+
pass
|
637 |
+
|
638 |
+
|
639 |
+
class InMemoryTable(TableBlock):
|
640 |
+
"""
|
641 |
+
The table is said in-memory when it is loaded into the user's RAM.
|
642 |
+
|
643 |
+
Pickling it does copy all the data using memory.
|
644 |
+
Its implementation is simple and uses the underlying pyarrow Table methods directly.
|
645 |
+
|
646 |
+
This is different from the `MemoryMapped` table, for which pickling doesn't copy all the
|
647 |
+
data in memory. For a `MemoryMapped`, unpickling instead reloads the table from the disk.
|
648 |
+
|
649 |
+
`InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for
|
650 |
+
data bigger than memory or when you want the memory footprint of your application to
|
651 |
+
stay low.
|
652 |
+
"""
|
653 |
+
|
654 |
+
@classmethod
|
655 |
+
def from_file(cls, filename: str):
|
656 |
+
table = _in_memory_arrow_table_from_file(filename)
|
657 |
+
return cls(table)
|
658 |
+
|
659 |
+
@classmethod
|
660 |
+
def from_buffer(cls, buffer: pa.Buffer):
|
661 |
+
table = _in_memory_arrow_table_from_buffer(buffer)
|
662 |
+
return cls(table)
|
663 |
+
|
664 |
+
@classmethod
|
665 |
+
def from_pandas(cls, *args, **kwargs):
|
666 |
+
"""
|
667 |
+
Convert pandas.DataFrame to an Arrow Table.
|
668 |
+
|
669 |
+
The column types in the resulting Arrow Table are inferred from the
|
670 |
+
dtypes of the pandas.Series in the DataFrame. In the case of non-object
|
671 |
+
Series, the NumPy dtype is translated to its Arrow equivalent. In the
|
672 |
+
case of `object`, we need to guess the datatype by looking at the
|
673 |
+
Python objects in this Series.
|
674 |
+
|
675 |
+
Be aware that Series of the `object` dtype don't carry enough
|
676 |
+
information to always lead to a meaningful Arrow type. In the case that
|
677 |
+
we cannot infer a type, e.g. because the DataFrame is of length 0 or
|
678 |
+
the Series only contains `None/nan` objects, the type is set to
|
679 |
+
null. This behavior can be avoided by constructing an explicit schema
|
680 |
+
and passing it to this function.
|
681 |
+
|
682 |
+
Args:
|
683 |
+
df (`pandas.DataFrame`):
|
684 |
+
schema (`pyarrow.Schema`, *optional*):
|
685 |
+
The expected schema of the Arrow Table. This can be used to
|
686 |
+
indicate the type of columns if we cannot infer it automatically.
|
687 |
+
If passed, the output will have exactly this schema. Columns
|
688 |
+
specified in the schema that are not found in the DataFrame columns
|
689 |
+
or its index will raise an error. Additional columns or index
|
690 |
+
levels in the DataFrame which are not specified in the schema will
|
691 |
+
be ignored.
|
692 |
+
preserve_index (`bool`, *optional*):
|
693 |
+
Whether to store the index as an additional column in the resulting
|
694 |
+
`Table`. The default of None will store the index as a column,
|
695 |
+
except for RangeIndex which is stored as metadata only. Use
|
696 |
+
`preserve_index=True` to force it to be stored as a column.
|
697 |
+
nthreads (`int`, defaults to `None` (may use up to system CPU count threads))
|
698 |
+
If greater than 1, convert columns to Arrow in parallel using
|
699 |
+
indicated number of threads.
|
700 |
+
columns (`List[str]`, *optional*):
|
701 |
+
List of column to be converted. If `None`, use all columns.
|
702 |
+
safe (`bool`, defaults to `True`):
|
703 |
+
Check for overflows or other unsafe conversions,
|
704 |
+
|
705 |
+
Returns:
|
706 |
+
`datasets.table.Table`:
|
707 |
+
|
708 |
+
Examples:
|
709 |
+
```python
|
710 |
+
>>> import pandas as pd
|
711 |
+
>>> import pyarrow as pa
|
712 |
+
>>> df = pd.DataFrame({
|
713 |
+
... 'int': [1, 2],
|
714 |
+
... 'str': ['a', 'b']
|
715 |
+
... })
|
716 |
+
>>> pa.Table.from_pandas(df)
|
717 |
+
<pyarrow.lib.Table object at 0x7f05d1fb1b40>
|
718 |
+
```
|
719 |
+
"""
|
720 |
+
return cls(pa.Table.from_pandas(*args, **kwargs))
|
721 |
+
|
722 |
+
@classmethod
|
723 |
+
def from_arrays(cls, *args, **kwargs):
|
724 |
+
"""
|
725 |
+
Construct a Table from Arrow arrays.
|
726 |
+
|
727 |
+
Args:
|
728 |
+
arrays (`List[Union[pyarrow.Array, pyarrow.ChunkedArray]]`):
|
729 |
+
Equal-length arrays that should form the table.
|
730 |
+
names (`List[str]`, *optional*):
|
731 |
+
Names for the table columns. If not passed, schema must be passed.
|
732 |
+
schema (`Schema`, defaults to `None`):
|
733 |
+
Schema for the created table. If not passed, names must be passed.
|
734 |
+
metadata (`Union[dict, Mapping]`, defaults to `None`):
|
735 |
+
Optional metadata for the schema (if inferred).
|
736 |
+
|
737 |
+
Returns:
|
738 |
+
`datasets.table.Table`
|
739 |
+
"""
|
740 |
+
return cls(pa.Table.from_arrays(*args, **kwargs))
|
741 |
+
|
742 |
+
@classmethod
|
743 |
+
def from_pydict(cls, *args, **kwargs):
|
744 |
+
"""
|
745 |
+
Construct a Table from Arrow arrays or columns.
|
746 |
+
|
747 |
+
Args:
|
748 |
+
mapping (`Union[dict, Mapping]`):
|
749 |
+
A mapping of strings to Arrays or Python lists.
|
750 |
+
schema (`Schema`, defaults to `None`):
|
751 |
+
If not passed, will be inferred from the Mapping values
|
752 |
+
metadata (`Union[dict, Mapping]`, defaults to `None`):
|
753 |
+
Optional metadata for the schema (if inferred).
|
754 |
+
|
755 |
+
Returns:
|
756 |
+
`datasets.table.Table`
|
757 |
+
"""
|
758 |
+
return cls(pa.Table.from_pydict(*args, **kwargs))
|
759 |
+
|
760 |
+
@classmethod
|
761 |
+
def from_pylist(cls, mapping, *args, **kwargs):
|
762 |
+
"""
|
763 |
+
Construct a Table from list of rows / dictionaries.
|
764 |
+
|
765 |
+
Args:
|
766 |
+
mapping (`List[dict]`):
|
767 |
+
A mapping of strings to row values.
|
768 |
+
schema (`Schema`, defaults to `None`):
|
769 |
+
If not passed, will be inferred from the Mapping values
|
770 |
+
metadata (`Union[dict, Mapping]`, defaults to `None`):
|
771 |
+
Optional metadata for the schema (if inferred).
|
772 |
+
|
773 |
+
Returns:
|
774 |
+
`datasets.table.Table`
|
775 |
+
"""
|
776 |
+
return cls(pa.Table.from_pylist(mapping, *args, **kwargs))
|
777 |
+
|
778 |
+
@classmethod
|
779 |
+
def from_batches(cls, *args, **kwargs):
|
780 |
+
"""
|
781 |
+
Construct a Table from a sequence or iterator of Arrow `RecordBatches`.
|
782 |
+
|
783 |
+
Args:
|
784 |
+
batches (`Union[Sequence[pyarrow.RecordBatch], Iterator[pyarrow.RecordBatch]]`):
|
785 |
+
Sequence of `RecordBatch` to be converted, all schemas must be equal.
|
786 |
+
schema (`Schema`, defaults to `None`):
|
787 |
+
If not passed, will be inferred from the first `RecordBatch`.
|
788 |
+
|
789 |
+
Returns:
|
790 |
+
`datasets.table.Table`:
|
791 |
+
"""
|
792 |
+
return cls(pa.Table.from_batches(*args, **kwargs))
|
793 |
+
|
794 |
+
def slice(self, offset=0, length=None):
|
795 |
+
"""
|
796 |
+
Compute zero-copy slice of this Table.
|
797 |
+
|
798 |
+
Args:
|
799 |
+
offset (`int`, defaults to `0`):
|
800 |
+
Offset from start of table to slice.
|
801 |
+
length (`int`, defaults to `None`):
|
802 |
+
Length of slice (default is until end of table starting from
|
803 |
+
offset).
|
804 |
+
|
805 |
+
Returns:
|
806 |
+
`datasets.table.Table`
|
807 |
+
"""
|
808 |
+
# Use fast slicing here
|
809 |
+
return InMemoryTable(self.fast_slice(offset=offset, length=length))
|
810 |
+
|
811 |
+
def filter(self, *args, **kwargs):
|
812 |
+
"""
|
813 |
+
Select records from a Table. See `pyarrow.compute.filter` for full usage.
|
814 |
+
"""
|
815 |
+
return InMemoryTable(self.table.filter(*args, **kwargs))
|
816 |
+
|
817 |
+
def flatten(self, *args, **kwargs):
|
818 |
+
"""
|
819 |
+
Flatten this Table. Each column with a struct type is flattened
|
820 |
+
into one column per struct field. Other columns are left unchanged.
|
821 |
+
|
822 |
+
Args:
|
823 |
+
memory_pool (`MemoryPool`, defaults to `None`):
|
824 |
+
For memory allocations, if required, otherwise use default pool.
|
825 |
+
|
826 |
+
Returns:
|
827 |
+
`datasets.table.Table`
|
828 |
+
"""
|
829 |
+
return InMemoryTable(table_flatten(self.table, *args, **kwargs))
|
830 |
+
|
831 |
+
def combine_chunks(self, *args, **kwargs):
|
832 |
+
"""
|
833 |
+
Make a new table by combining the chunks this table has.
|
834 |
+
|
835 |
+
All the underlying chunks in the `ChunkedArray` of each column are
|
836 |
+
concatenated into zero or one chunk.
|
837 |
+
|
838 |
+
Args:
|
839 |
+
memory_pool (`MemoryPool`, defaults to `None`):
|
840 |
+
For memory allocations, if required, otherwise use default pool.
|
841 |
+
|
842 |
+
Returns:
|
843 |
+
`datasets.table.Table`
|
844 |
+
"""
|
845 |
+
return InMemoryTable(self.table.combine_chunks(*args, **kwargs))
|
846 |
+
|
847 |
+
def cast(self, *args, **kwargs):
|
848 |
+
"""
|
849 |
+
Cast table values to another schema.
|
850 |
+
|
851 |
+
Args:
|
852 |
+
target_schema (`Schema`):
|
853 |
+
Schema to cast to, the names and order of fields must match.
|
854 |
+
safe (`bool`, defaults to `True`):
|
855 |
+
Check for overflows or other unsafe conversions.
|
856 |
+
|
857 |
+
Returns:
|
858 |
+
`datasets.table.Table`
|
859 |
+
"""
|
860 |
+
return InMemoryTable(table_cast(self.table, *args, **kwargs))
|
861 |
+
|
862 |
+
def replace_schema_metadata(self, *args, **kwargs):
|
863 |
+
"""
|
864 |
+
EXPERIMENTAL: Create shallow copy of table by replacing schema
|
865 |
+
key-value metadata with the indicated new metadata (which may be `None`,
|
866 |
+
which deletes any existing metadata).
|
867 |
+
|
868 |
+
Args:
|
869 |
+
metadata (`dict`, defaults to `None`):
|
870 |
+
|
871 |
+
Returns:
|
872 |
+
`datasets.table.Table`: shallow_copy
|
873 |
+
"""
|
874 |
+
return InMemoryTable(self.table.replace_schema_metadata(*args, **kwargs))
|
875 |
+
|
876 |
+
def add_column(self, *args, **kwargs):
|
877 |
+
"""
|
878 |
+
Add column to Table at position.
|
879 |
+
|
880 |
+
A new table is returned with the column added, the original table
|
881 |
+
object is left unchanged.
|
882 |
+
|
883 |
+
Args:
|
884 |
+
i (`int`):
|
885 |
+
Index to place the column at.
|
886 |
+
field_ (`Union[str, pyarrow.Field]`):
|
887 |
+
If a string is passed then the type is deduced from the column
|
888 |
+
data.
|
889 |
+
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
|
890 |
+
Column data.
|
891 |
+
|
892 |
+
Returns:
|
893 |
+
`datasets.table.Table`: New table with the passed column added.
|
894 |
+
"""
|
895 |
+
return InMemoryTable(self.table.add_column(*args, **kwargs))
|
896 |
+
|
897 |
+
def append_column(self, *args, **kwargs):
|
898 |
+
"""
|
899 |
+
Append column at end of columns.
|
900 |
+
|
901 |
+
Args:
|
902 |
+
field_ (`Union[str, pyarrow.Field]`):
|
903 |
+
If a string is passed then the type is deduced from the column
|
904 |
+
data.
|
905 |
+
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
|
906 |
+
Column data.
|
907 |
+
|
908 |
+
Returns:
|
909 |
+
`datasets.table.Table`:
|
910 |
+
New table with the passed column added.
|
911 |
+
"""
|
912 |
+
return InMemoryTable(self.table.append_column(*args, **kwargs))
|
913 |
+
|
914 |
+
def remove_column(self, *args, **kwargs):
|
915 |
+
"""
|
916 |
+
Create new Table with the indicated column removed.
|
917 |
+
|
918 |
+
Args:
|
919 |
+
i (`int`):
|
920 |
+
Index of column to remove.
|
921 |
+
|
922 |
+
Returns:
|
923 |
+
`datasets.table.Table`:
|
924 |
+
New table without the column.
|
925 |
+
"""
|
926 |
+
return InMemoryTable(self.table.remove_column(*args, **kwargs))
|
927 |
+
|
928 |
+
def set_column(self, *args, **kwargs):
|
929 |
+
"""
|
930 |
+
Replace column in Table at position.
|
931 |
+
|
932 |
+
Args:
|
933 |
+
i (`int`):
|
934 |
+
Index to place the column at.
|
935 |
+
field_ (`Union[str, pyarrow.Field]`):
|
936 |
+
If a string is passed then the type is deduced from the column
|
937 |
+
data.
|
938 |
+
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
|
939 |
+
Column data.
|
940 |
+
|
941 |
+
Returns:
|
942 |
+
`datasets.table.Table`:
|
943 |
+
New table with the passed column set.
|
944 |
+
"""
|
945 |
+
return InMemoryTable(self.table.set_column(*args, **kwargs))
|
946 |
+
|
947 |
+
def rename_columns(self, *args, **kwargs):
|
948 |
+
"""
|
949 |
+
Create new table with columns renamed to provided names.
|
950 |
+
"""
|
951 |
+
return InMemoryTable(self.table.rename_columns(*args, **kwargs))
|
952 |
+
|
953 |
+
def drop(self, *args, **kwargs):
|
954 |
+
"""
|
955 |
+
Drop one or more columns and return a new table.
|
956 |
+
|
957 |
+
Args:
|
958 |
+
columns (`List[str]`):
|
959 |
+
List of field names referencing existing columns.
|
960 |
+
|
961 |
+
Raises:
|
962 |
+
`KeyError` : if any of the passed columns name are not existing.
|
963 |
+
|
964 |
+
Returns:
|
965 |
+
`datasets.table.Table`:
|
966 |
+
New table without the columns.
|
967 |
+
"""
|
968 |
+
return InMemoryTable(self.table.drop(*args, **kwargs))
|
969 |
+
|
970 |
+
def select(self, *args, **kwargs):
|
971 |
+
"""
|
972 |
+
Select columns of the table.
|
973 |
+
|
974 |
+
Returns a new table with the specified columns, and metadata preserved.
|
975 |
+
|
976 |
+
Args:
|
977 |
+
columns (:obj:`Union[List[str], List[int]]`):
|
978 |
+
The column names or integer indices to select.
|
979 |
+
|
980 |
+
Returns:
|
981 |
+
:class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
|
982 |
+
"""
|
983 |
+
return InMemoryTable(self.table.select(*args, **kwargs))
|
984 |
+
|
985 |
+
|
986 |
+
# The MemoryMappedTable needs replays to properly reload tables from the disk
|
987 |
+
Replay = Tuple[str, tuple, dict]
|
988 |
+
|
989 |
+
|
990 |
+
class MemoryMappedTable(TableBlock):
|
991 |
+
"""
|
992 |
+
The table is said memory mapped when it doesn't use the user's RAM but loads the data
|
993 |
+
from the disk instead.
|
994 |
+
|
995 |
+
Pickling it doesn't copy the data into memory.
|
996 |
+
Instead, only the path to the memory mapped arrow file is pickled, as well as the list
|
997 |
+
of transforms to "replay" when reloading the table from the disk.
|
998 |
+
|
999 |
+
Its implementation requires to store an history of all the transforms that were applied
|
1000 |
+
to the underlying pyarrow Table, so that they can be "replayed" when reloading the Table
|
1001 |
+
from the disk.
|
1002 |
+
|
1003 |
+
This is different from the `InMemoryTable` table, for which pickling does copy all the
|
1004 |
+
data in memory.
|
1005 |
+
|
1006 |
+
`InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for
|
1007 |
+
data bigger than memory or when you want the memory footprint of your application to
|
1008 |
+
stay low.
|
1009 |
+
"""
|
1010 |
+
|
1011 |
+
def __init__(self, table: pa.Table, path: str, replays: Optional[List[Replay]] = None):
|
1012 |
+
super().__init__(table)
|
1013 |
+
self.path = os.path.abspath(path)
|
1014 |
+
self.replays: List[Replay] = replays if replays is not None else []
|
1015 |
+
|
1016 |
+
@classmethod
|
1017 |
+
def from_file(cls, filename: str, replays=None):
|
1018 |
+
table = _memory_mapped_arrow_table_from_file(filename)
|
1019 |
+
table = cls._apply_replays(table, replays)
|
1020 |
+
return cls(table, filename, replays)
|
1021 |
+
|
1022 |
+
def __getstate__(self):
|
1023 |
+
return {"path": self.path, "replays": self.replays}
|
1024 |
+
|
1025 |
+
def __setstate__(self, state):
|
1026 |
+
path = state["path"]
|
1027 |
+
replays = state["replays"]
|
1028 |
+
table = _memory_mapped_arrow_table_from_file(path)
|
1029 |
+
table = self._apply_replays(table, replays)
|
1030 |
+
MemoryMappedTable.__init__(self, table, path=path, replays=replays)
|
1031 |
+
|
1032 |
+
@staticmethod
|
1033 |
+
def _apply_replays(table: pa.Table, replays: Optional[List[Replay]] = None) -> pa.Table:
|
1034 |
+
if replays is not None:
|
1035 |
+
for name, args, kwargs in replays:
|
1036 |
+
if name == "cast":
|
1037 |
+
table = table_cast(table, *args, **kwargs)
|
1038 |
+
elif name == "flatten":
|
1039 |
+
table = table_flatten(table, *args, **kwargs)
|
1040 |
+
else:
|
1041 |
+
table = getattr(table, name)(*args, **kwargs)
|
1042 |
+
return table
|
1043 |
+
|
1044 |
+
def _append_replay(self, replay: Replay) -> List[Replay]:
|
1045 |
+
replays = copy.deepcopy(self.replays)
|
1046 |
+
replays.append(replay)
|
1047 |
+
return replays
|
1048 |
+
|
1049 |
+
def slice(self, offset=0, length=None):
|
1050 |
+
"""
|
1051 |
+
Compute zero-copy slice of this Table.
|
1052 |
+
|
1053 |
+
Args:
|
1054 |
+
offset (`int`, defaults to `0`):
|
1055 |
+
Offset from start of table to slice.
|
1056 |
+
length (`int`, defaults to `None`):
|
1057 |
+
Length of slice (default is until end of table starting from
|
1058 |
+
offset).
|
1059 |
+
|
1060 |
+
Returns:
|
1061 |
+
`datasets.table.Table`
|
1062 |
+
"""
|
1063 |
+
replay = ("slice", (offset, length), {})
|
1064 |
+
replays = self._append_replay(replay)
|
1065 |
+
# Use fast slicing here
|
1066 |
+
return MemoryMappedTable(self.fast_slice(offset=offset, length=length), self.path, replays)
|
1067 |
+
|
1068 |
+
def filter(self, *args, **kwargs):
|
1069 |
+
"""
|
1070 |
+
Select records from a Table. See `pyarrow.compute.filter` for full usage.
|
1071 |
+
"""
|
1072 |
+
replay = ("filter", copy.deepcopy(args), copy.deepcopy(kwargs))
|
1073 |
+
replays = self._append_replay(replay)
|
1074 |
+
return MemoryMappedTable(self.table.filter(*args, **kwargs), self.path, replays)
|
1075 |
+
|
1076 |
+
def flatten(self, *args, **kwargs):
|
1077 |
+
"""
|
1078 |
+
Flatten this Table. Each column with a struct type is flattened
|
1079 |
+
into one column per struct field. Other columns are left unchanged.
|
1080 |
+
|
1081 |
+
Args:
|
1082 |
+
memory_pool (`MemoryPool`, defaults to `None`):
|
1083 |
+
For memory allocations, if required, otherwise use default pool.
|
1084 |
+
|
1085 |
+
Returns:
|
1086 |
+
`datasets.table.Table`
|
1087 |
+
"""
|
1088 |
+
replay = ("flatten", copy.deepcopy(args), copy.deepcopy(kwargs))
|
1089 |
+
replays = self._append_replay(replay)
|
1090 |
+
return MemoryMappedTable(table_flatten(self.table, *args, **kwargs), self.path, replays)
|
1091 |
+
|
1092 |
+
def combine_chunks(self, *args, **kwargs):
|
1093 |
+
"""
|
1094 |
+
Make a new table by combining the chunks this table has.
|
1095 |
+
|
1096 |
+
All the underlying chunks in the ChunkedArray of each column are
|
1097 |
+
concatenated into zero or one chunk.
|
1098 |
+
|
1099 |
+
Args:
|
1100 |
+
memory_pool (`MemoryPool`, defaults to `None`):
|
1101 |
+
For memory allocations, if required, otherwise use default pool.
|
1102 |
+
|
1103 |
+
Returns:
|
1104 |
+
`datasets.table.Table`
|
1105 |
+
"""
|
1106 |
+
replay = ("combine_chunks", copy.deepcopy(args), copy.deepcopy(kwargs))
|
1107 |
+
replays = self._append_replay(replay)
|
1108 |
+
return MemoryMappedTable(self.table.combine_chunks(*args, **kwargs), self.path, replays)
|
1109 |
+
|
1110 |
+
def cast(self, *args, **kwargs):
|
1111 |
+
"""
|
1112 |
+
Cast table values to another schema
|
1113 |
+
|
1114 |
+
Args:
|
1115 |
+
target_schema (`Schema`):
|
1116 |
+
Schema to cast to, the names and order of fields must match.
|
1117 |
+
safe (`bool`, defaults to `True`):
|
1118 |
+
Check for overflows or other unsafe conversions.
|
1119 |
+
|
1120 |
+
Returns:
|
1121 |
+
`datasets.table.Table`
|
1122 |
+
"""
|
1123 |
+
replay = ("cast", copy.deepcopy(args), copy.deepcopy(kwargs))
|
1124 |
+
replays = self._append_replay(replay)
|
1125 |
+
return MemoryMappedTable(table_cast(self.table, *args, **kwargs), self.path, replays)
|
1126 |
+
|
1127 |
+
def replace_schema_metadata(self, *args, **kwargs):
|
1128 |
+
"""
|
1129 |
+
EXPERIMENTAL: Create shallow copy of table by replacing schema
|
1130 |
+
key-value metadata with the indicated new metadata (which may be None,
|
1131 |
+
which deletes any existing metadata.
|
1132 |
+
|
1133 |
+
Args:
|
1134 |
+
metadata (`dict`, defaults to `None`):
|
1135 |
+
|
1136 |
+
Returns:
|
1137 |
+
`datasets.table.Table`: shallow_copy
|
1138 |
+
"""
|
1139 |
+
replay = ("replace_schema_metadata", copy.deepcopy(args), copy.deepcopy(kwargs))
|
1140 |
+
replays = self._append_replay(replay)
|
1141 |
+
return MemoryMappedTable(self.table.replace_schema_metadata(*args, **kwargs), self.path, replays)
|
1142 |
+
|
1143 |
+
def add_column(self, *args, **kwargs):
|
1144 |
+
"""
|
1145 |
+
Add column to Table at position.
|
1146 |
+
|
1147 |
+
A new table is returned with the column added, the original table
|
1148 |
+
object is left unchanged.
|
1149 |
+
|
1150 |
+
Args:
|
1151 |
+
i (`int`):
|
1152 |
+
Index to place the column at.
|
1153 |
+
field_ (`Union[str, pyarrow.Field]`):
|
1154 |
+
If a string is passed then the type is deduced from the column
|
1155 |
+
data.
|
1156 |
+
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
|
1157 |
+
Column data.
|
1158 |
+
|
1159 |
+
Returns:
|
1160 |
+
`datasets.table.Table`: New table with the passed column added.
|
1161 |
+
"""
|
1162 |
+
replay = ("add_column", copy.deepcopy(args), copy.deepcopy(kwargs))
|
1163 |
+
replays = self._append_replay(replay)
|
1164 |
+
return MemoryMappedTable(self.table.add_column(*args, **kwargs), self.path, replays)
|
1165 |
+
|
1166 |
+
def append_column(self, *args, **kwargs):
|
1167 |
+
"""
|
1168 |
+
Append column at end of columns.
|
1169 |
+
|
1170 |
+
Args:
|
1171 |
+
field_ (`Union[str, pyarrow.Field]`):
|
1172 |
+
If a string is passed then the type is deduced from the column
|
1173 |
+
data.
|
1174 |
+
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
|
1175 |
+
Column data.
|
1176 |
+
|
1177 |
+
Returns:
|
1178 |
+
`datasets.table.Table`:
|
1179 |
+
New table with the passed column added.
|
1180 |
+
"""
|
1181 |
+
replay = ("append_column", copy.deepcopy(args), copy.deepcopy(kwargs))
|
1182 |
+
replays = self._append_replay(replay)
|
1183 |
+
return MemoryMappedTable(self.table.append_column(*args, **kwargs), self.path, replays)
|
1184 |
+
|
1185 |
+
def remove_column(self, *args, **kwargs):
|
1186 |
+
"""
|
1187 |
+
Create new Table with the indicated column removed.
|
1188 |
+
|
1189 |
+
Args:
|
1190 |
+
i (`int`):
|
1191 |
+
Index of column to remove.
|
1192 |
+
|
1193 |
+
Returns:
|
1194 |
+
`datasets.table.Table`:
|
1195 |
+
New table without the column.
|
1196 |
+
"""
|
1197 |
+
replay = ("remove_column", copy.deepcopy(args), copy.deepcopy(kwargs))
|
1198 |
+
replays = self._append_replay(replay)
|
1199 |
+
return MemoryMappedTable(self.table.remove_column(*args, **kwargs), self.path, replays)
|
1200 |
+
|
1201 |
+
def set_column(self, *args, **kwargs):
|
1202 |
+
"""
|
1203 |
+
Replace column in Table at position.
|
1204 |
+
|
1205 |
+
Args:
|
1206 |
+
i (`int`):
|
1207 |
+
Index to place the column at.
|
1208 |
+
field_ (`Union[str, pyarrow.Field]`):
|
1209 |
+
If a string is passed then the type is deduced from the column
|
1210 |
+
data.
|
1211 |
+
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
|
1212 |
+
Column data.
|
1213 |
+
|
1214 |
+
Returns:
|
1215 |
+
`datasets.table.Table`:
|
1216 |
+
New table with the passed column set.
|
1217 |
+
"""
|
1218 |
+
replay = ("set_column", copy.deepcopy(args), copy.deepcopy(kwargs))
|
1219 |
+
replays = self._append_replay(replay)
|
1220 |
+
return MemoryMappedTable(self.table.set_column(*args, **kwargs), self.path, replays)
|
1221 |
+
|
1222 |
+
def rename_columns(self, *args, **kwargs):
|
1223 |
+
"""
|
1224 |
+
Create new table with columns renamed to provided names.
|
1225 |
+
"""
|
1226 |
+
replay = ("rename_columns", copy.deepcopy(args), copy.deepcopy(kwargs))
|
1227 |
+
replays = self._append_replay(replay)
|
1228 |
+
return MemoryMappedTable(self.table.rename_columns(*args, **kwargs), self.path, replays)
|
1229 |
+
|
1230 |
+
def drop(self, *args, **kwargs):
|
1231 |
+
"""
|
1232 |
+
Drop one or more columns and return a new table.
|
1233 |
+
|
1234 |
+
Args:
|
1235 |
+
columns (`List[str]`):
|
1236 |
+
List of field names referencing existing columns.
|
1237 |
+
|
1238 |
+
Raises:
|
1239 |
+
`KeyError` : if any of the passed columns name are not existing.
|
1240 |
+
|
1241 |
+
Returns:
|
1242 |
+
`datasets.table.Table`:
|
1243 |
+
New table without the columns.
|
1244 |
+
"""
|
1245 |
+
replay = ("drop", copy.deepcopy(args), copy.deepcopy(kwargs))
|
1246 |
+
replays = self._append_replay(replay)
|
1247 |
+
return MemoryMappedTable(self.table.drop(*args, **kwargs), self.path, replays)
|
1248 |
+
|
1249 |
+
def select(self, *args, **kwargs):
|
1250 |
+
"""
|
1251 |
+
Select columns of the table.
|
1252 |
+
|
1253 |
+
Returns a new table with the specified columns, and metadata preserved.
|
1254 |
+
|
1255 |
+
Args:
|
1256 |
+
columns (:obj:`Union[List[str], List[int]]`):
|
1257 |
+
The column names or integer indices to select.
|
1258 |
+
|
1259 |
+
Returns:
|
1260 |
+
:class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
|
1261 |
+
"""
|
1262 |
+
replay = ("select", copy.deepcopy(args), copy.deepcopy(kwargs))
|
1263 |
+
replays = self._append_replay(replay)
|
1264 |
+
return MemoryMappedTable(self.table.select(*args, **kwargs), self.path, replays)
|
1265 |
+
|
1266 |
+
|
1267 |
+
# A ConcatenationTable is the concatenation of several tables.
|
1268 |
+
# The ``blocks`` attributes stores a list of list of blocks.
|
1269 |
+
# The first axis concatenates the tables along the axis 0 (it appends rows),
|
1270 |
+
# while the second axis concatenates tables along the axis 1 (it appends columns).
|
1271 |
+
TableBlockContainer = TypeVar("TableBlockContainer", TableBlock, List[TableBlock], List[List[TableBlock]])
|
1272 |
+
|
1273 |
+
|
1274 |
+
class ConcatenationTable(Table):
|
1275 |
+
"""
|
1276 |
+
The table comes from the concatenation of several tables called blocks.
|
1277 |
+
It enables concatenation on both axis 0 (append rows) and axis 1 (append columns).
|
1278 |
+
|
1279 |
+
The underlying tables are called "blocks" and can be either `InMemoryTable`
|
1280 |
+
or `MemoryMappedTable` objects.
|
1281 |
+
This allows to combine tables that come from memory or that are memory mapped.
|
1282 |
+
When a `ConcatenationTable` is pickled, then each block is pickled:
|
1283 |
+
- the `InMemoryTable` objects are pickled by copying all the data in memory.
|
1284 |
+
- the MemoryMappedTable objects are pickled without copying the data into memory.
|
1285 |
+
Instead, only the path to the memory mapped arrow file is pickled, as well as the list
|
1286 |
+
of transforms to "replays" when reloading the table from the disk.
|
1287 |
+
|
1288 |
+
Its implementation requires to store each block separately.
|
1289 |
+
The `blocks` attributes stores a list of list of blocks.
|
1290 |
+
The first axis concatenates the tables along the axis 0 (it appends rows),
|
1291 |
+
while the second axis concatenates tables along the axis 1 (it appends columns).
|
1292 |
+
|
1293 |
+
If some columns are missing when concatenating on axis 0, they are filled with null values.
|
1294 |
+
This is done using `pyarrow.concat_tables(tables, promote=True)`.
|
1295 |
+
|
1296 |
+
You can access the fully combined table by accessing the `ConcatenationTable.table` attribute,
|
1297 |
+
and the blocks by accessing the `ConcatenationTable.blocks` attribute.
|
1298 |
+
"""
|
1299 |
+
|
1300 |
+
def __init__(self, table: pa.Table, blocks: List[List[TableBlock]]):
|
1301 |
+
super().__init__(table)
|
1302 |
+
self.blocks = blocks
|
1303 |
+
# Check that all the blocks have the right type.
|
1304 |
+
# Only InMemoryTable and MemoryMappedTable are allowed.
|
1305 |
+
for subtables in blocks:
|
1306 |
+
for subtable in subtables:
|
1307 |
+
if not isinstance(subtable, TableBlock):
|
1308 |
+
raise TypeError(
|
1309 |
+
"The blocks of a ConcatenationTable must be InMemoryTable or MemoryMappedTable objects"
|
1310 |
+
f", but got {subtable}."
|
1311 |
+
)
|
1312 |
+
|
1313 |
+
def __getstate__(self):
|
1314 |
+
return {"blocks": self.blocks, "schema": self.table.schema}
|
1315 |
+
|
1316 |
+
def __setstate__(self, state):
|
1317 |
+
blocks = state["blocks"]
|
1318 |
+
schema = state["schema"]
|
1319 |
+
table = self._concat_blocks_horizontally_and_vertically(blocks)
|
1320 |
+
if schema is not None and table.schema != schema:
|
1321 |
+
# We fix the columns by concatenating with an empty table with the right columns
|
1322 |
+
empty_table = pa.Table.from_batches([], schema=schema)
|
1323 |
+
# we set promote=True to fill missing columns with null values
|
1324 |
+
if config.PYARROW_VERSION.major < 14:
|
1325 |
+
table = pa.concat_tables([table, empty_table], promote=True)
|
1326 |
+
else:
|
1327 |
+
table = pa.concat_tables([table, empty_table], promote_options="default")
|
1328 |
+
ConcatenationTable.__init__(self, table, blocks=blocks)
|
1329 |
+
|
1330 |
+
@staticmethod
|
1331 |
+
def _concat_blocks(blocks: List[Union[TableBlock, pa.Table]], axis: int = 0) -> pa.Table:
|
1332 |
+
pa_tables = [table.table if hasattr(table, "table") else table for table in blocks]
|
1333 |
+
if axis == 0:
|
1334 |
+
# we set promote=True to fill missing columns with null values
|
1335 |
+
if config.PYARROW_VERSION.major < 14:
|
1336 |
+
return pa.concat_tables(pa_tables, promote=True)
|
1337 |
+
else:
|
1338 |
+
return pa.concat_tables(pa_tables, promote_options="default")
|
1339 |
+
elif axis == 1:
|
1340 |
+
for i, table in enumerate(pa_tables):
|
1341 |
+
if i == 0:
|
1342 |
+
pa_table = table
|
1343 |
+
else:
|
1344 |
+
for name, col in zip(table.column_names, table.columns):
|
1345 |
+
pa_table = pa_table.append_column(name, col)
|
1346 |
+
return pa_table
|
1347 |
+
else:
|
1348 |
+
raise ValueError("'axis' must be either 0 or 1")
|
1349 |
+
|
1350 |
+
@classmethod
|
1351 |
+
def _concat_blocks_horizontally_and_vertically(cls, blocks: List[List[TableBlock]]) -> pa.Table:
|
1352 |
+
pa_tables_to_concat_vertically = []
|
1353 |
+
for i, tables in enumerate(blocks):
|
1354 |
+
if not tables:
|
1355 |
+
continue
|
1356 |
+
pa_table_horizontally_concatenated = cls._concat_blocks(tables, axis=1)
|
1357 |
+
pa_tables_to_concat_vertically.append(pa_table_horizontally_concatenated)
|
1358 |
+
return cls._concat_blocks(pa_tables_to_concat_vertically, axis=0)
|
1359 |
+
|
1360 |
+
@classmethod
|
1361 |
+
def _merge_blocks(cls, blocks: TableBlockContainer, axis: Optional[int] = None) -> TableBlockContainer:
|
1362 |
+
if axis is not None:
|
1363 |
+
merged_blocks = []
|
1364 |
+
for is_in_memory, block_group in groupby(blocks, key=lambda x: isinstance(x, InMemoryTable)):
|
1365 |
+
if is_in_memory:
|
1366 |
+
block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))]
|
1367 |
+
merged_blocks += list(block_group)
|
1368 |
+
else: # both
|
1369 |
+
merged_blocks = [cls._merge_blocks(row_block, axis=1) for row_block in blocks]
|
1370 |
+
if all(len(row_block) == 1 for row_block in merged_blocks):
|
1371 |
+
merged_blocks = cls._merge_blocks(
|
1372 |
+
[block for row_block in merged_blocks for block in row_block], axis=0
|
1373 |
+
)
|
1374 |
+
return merged_blocks
|
1375 |
+
|
1376 |
+
@classmethod
|
1377 |
+
def _consolidate_blocks(cls, blocks: TableBlockContainer) -> TableBlockContainer:
|
1378 |
+
if isinstance(blocks, TableBlock):
|
1379 |
+
return blocks
|
1380 |
+
elif isinstance(blocks[0], TableBlock):
|
1381 |
+
return cls._merge_blocks(blocks, axis=0)
|
1382 |
+
else:
|
1383 |
+
return cls._merge_blocks(blocks)
|
1384 |
+
|
1385 |
+
@classmethod
|
1386 |
+
def from_blocks(cls, blocks: TableBlockContainer) -> "ConcatenationTable":
|
1387 |
+
blocks = cls._consolidate_blocks(blocks)
|
1388 |
+
if isinstance(blocks, TableBlock):
|
1389 |
+
table = blocks
|
1390 |
+
return cls(table.table, [[table]])
|
1391 |
+
elif isinstance(blocks[0], TableBlock):
|
1392 |
+
table = cls._concat_blocks(blocks, axis=0)
|
1393 |
+
blocks = [[t] for t in blocks]
|
1394 |
+
return cls(table, blocks)
|
1395 |
+
else:
|
1396 |
+
table = cls._concat_blocks_horizontally_and_vertically(blocks)
|
1397 |
+
return cls(table, blocks)
|
1398 |
+
|
1399 |
+
@classmethod
|
1400 |
+
def from_tables(cls, tables: List[Union[pa.Table, Table]], axis: int = 0) -> "ConcatenationTable":
|
1401 |
+
"""Create `ConcatenationTable` from list of tables.
|
1402 |
+
|
1403 |
+
Args:
|
1404 |
+
tables (list of `Table` or list of `pyarrow.Table`):
|
1405 |
+
List of tables.
|
1406 |
+
axis (`{0, 1}`, defaults to `0`, meaning over rows):
|
1407 |
+
Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
|
1408 |
+
(horizontally).
|
1409 |
+
|
1410 |
+
<Added version="1.6.0"/>
|
1411 |
+
"""
|
1412 |
+
|
1413 |
+
def to_blocks(table: Union[pa.Table, Table]) -> List[List[TableBlock]]:
|
1414 |
+
if isinstance(table, pa.Table):
|
1415 |
+
return [[InMemoryTable(table)]]
|
1416 |
+
elif isinstance(table, ConcatenationTable):
|
1417 |
+
return copy.deepcopy(table.blocks)
|
1418 |
+
else:
|
1419 |
+
return [[table]]
|
1420 |
+
|
1421 |
+
def _slice_row_block(row_block: List[TableBlock], length: int) -> Tuple[List[TableBlock], List[TableBlock]]:
|
1422 |
+
sliced = [table.slice(0, length) for table in row_block]
|
1423 |
+
remainder = [table.slice(length, len(row_block[0]) - length) for table in row_block]
|
1424 |
+
return sliced, remainder
|
1425 |
+
|
1426 |
+
def _split_both_like(
|
1427 |
+
result: List[List[TableBlock]], blocks: List[List[TableBlock]]
|
1428 |
+
) -> Tuple[List[List[TableBlock]], List[List[TableBlock]]]:
|
1429 |
+
"""
|
1430 |
+
Make sure each row_block contain the same num_rows to be able to concatenate them on axis=1.
|
1431 |
+
|
1432 |
+
To do so, we modify both blocks sets to have the same row_blocks boundaries.
|
1433 |
+
For example, if `result` has 2 row_blocks of 3 rows and `blocks` has 3 row_blocks of 2 rows,
|
1434 |
+
we modify both to have 4 row_blocks of size 2, 1, 1 and 2:
|
1435 |
+
|
1436 |
+
[ x x x | x x x ]
|
1437 |
+
+ [ y y | y y | y y ]
|
1438 |
+
-----------------------------
|
1439 |
+
= [ x x | x | x | x x ]
|
1440 |
+
[ y y | y | y | y y ]
|
1441 |
+
|
1442 |
+
"""
|
1443 |
+
result, blocks = list(result), list(blocks)
|
1444 |
+
new_result, new_blocks = [], []
|
1445 |
+
while result and blocks:
|
1446 |
+
# we slice the longest row block to save two row blocks of same length
|
1447 |
+
# and we replace the long row block by its remainder if necessary
|
1448 |
+
if len(result[0][0]) > len(blocks[0][0]):
|
1449 |
+
new_blocks.append(blocks[0])
|
1450 |
+
sliced, result[0] = _slice_row_block(result[0], len(blocks.pop(0)[0]))
|
1451 |
+
new_result.append(sliced)
|
1452 |
+
elif len(result[0][0]) < len(blocks[0][0]):
|
1453 |
+
new_result.append(result[0])
|
1454 |
+
sliced, blocks[0] = _slice_row_block(blocks[0], len(result.pop(0)[0]))
|
1455 |
+
new_blocks.append(sliced)
|
1456 |
+
else:
|
1457 |
+
new_result.append(result.pop(0))
|
1458 |
+
new_blocks.append(blocks.pop(0))
|
1459 |
+
if result or blocks:
|
1460 |
+
raise ValueError("Failed to concatenate on axis=1 because tables don't have the same number of rows")
|
1461 |
+
return new_result, new_blocks
|
1462 |
+
|
1463 |
+
def _extend_blocks(
|
1464 |
+
result: List[List[TableBlock]], blocks: List[List[TableBlock]], axis: int = 0
|
1465 |
+
) -> List[List[TableBlock]]:
|
1466 |
+
if axis == 0:
|
1467 |
+
result.extend(blocks)
|
1468 |
+
elif axis == 1:
|
1469 |
+
# We make sure each row_block have the same num_rows
|
1470 |
+
result, blocks = _split_both_like(result, blocks)
|
1471 |
+
for i, row_block in enumerate(blocks):
|
1472 |
+
result[i].extend(row_block)
|
1473 |
+
return result
|
1474 |
+
|
1475 |
+
blocks = to_blocks(tables[0])
|
1476 |
+
for table in tables[1:]:
|
1477 |
+
table_blocks = to_blocks(table)
|
1478 |
+
blocks = _extend_blocks(blocks, table_blocks, axis=axis)
|
1479 |
+
return cls.from_blocks(blocks)
|
1480 |
+
|
1481 |
+
@property
|
1482 |
+
def _slices(self):
|
1483 |
+
offset = 0
|
1484 |
+
for tables in self.blocks:
|
1485 |
+
length = len(tables[0])
|
1486 |
+
yield (offset, length)
|
1487 |
+
offset += length
|
1488 |
+
|
1489 |
+
def slice(self, offset=0, length=None):
|
1490 |
+
"""
|
1491 |
+
Compute zero-copy slice of this Table.
|
1492 |
+
|
1493 |
+
Args:
|
1494 |
+
offset (`int`, defaults to `0`):
|
1495 |
+
Offset from start of table to slice.
|
1496 |
+
length (`int`, defaults to `None`):
|
1497 |
+
Length of slice (default is until end of table starting from
|
1498 |
+
offset).
|
1499 |
+
|
1500 |
+
Returns:
|
1501 |
+
`datasets.table.Table`
|
1502 |
+
"""
|
1503 |
+
table = self.table.slice(offset, length=length)
|
1504 |
+
length = length if length is not None else self.num_rows - offset
|
1505 |
+
blocks = []
|
1506 |
+
for tables in self.blocks:
|
1507 |
+
n_rows = len(tables[0])
|
1508 |
+
if length == 0:
|
1509 |
+
break
|
1510 |
+
elif n_rows <= offset:
|
1511 |
+
offset = offset - n_rows
|
1512 |
+
elif n_rows <= offset + length:
|
1513 |
+
blocks.append([t.slice(offset) for t in tables])
|
1514 |
+
length, offset = length + offset - n_rows, 0
|
1515 |
+
else:
|
1516 |
+
blocks.append([t.slice(offset, length) for t in tables])
|
1517 |
+
length, offset = 0, 0
|
1518 |
+
return ConcatenationTable(table, blocks)
|
1519 |
+
|
1520 |
+
def filter(self, mask, *args, **kwargs):
|
1521 |
+
"""
|
1522 |
+
Select records from a Table. See `pyarrow.compute.filter` for full usage.
|
1523 |
+
"""
|
1524 |
+
table = self.table.filter(mask, *args, **kwargs)
|
1525 |
+
blocks = []
|
1526 |
+
for (offset, length), tables in zip(self._slices, self.blocks):
|
1527 |
+
submask = mask.slice(offset, length)
|
1528 |
+
blocks.append([t.filter(submask, *args, **kwargs) for t in tables])
|
1529 |
+
return ConcatenationTable(table, blocks)
|
1530 |
+
|
1531 |
+
def flatten(self, *args, **kwargs):
|
1532 |
+
"""
|
1533 |
+
Flatten this Table. Each column with a struct type is flattened
|
1534 |
+
into one column per struct field. Other columns are left unchanged.
|
1535 |
+
|
1536 |
+
Args:
|
1537 |
+
memory_pool (`MemoryPool`, defaults to `None`):
|
1538 |
+
For memory allocations, if required, otherwise use default pool.
|
1539 |
+
|
1540 |
+
Returns:
|
1541 |
+
`datasets.table.Table`
|
1542 |
+
"""
|
1543 |
+
table = table_flatten(self.table, *args, **kwargs)
|
1544 |
+
blocks = []
|
1545 |
+
for tables in self.blocks:
|
1546 |
+
blocks.append([t.flatten(*args, **kwargs) for t in tables])
|
1547 |
+
return ConcatenationTable(table, blocks)
|
1548 |
+
|
1549 |
+
def combine_chunks(self, *args, **kwargs):
|
1550 |
+
"""
|
1551 |
+
Make a new table by combining the chunks this table has.
|
1552 |
+
|
1553 |
+
All the underlying chunks in the `ChunkedArray` of each column are
|
1554 |
+
concatenated into zero or one chunk.
|
1555 |
+
|
1556 |
+
Args:
|
1557 |
+
memory_pool (`MemoryPool`, defaults to `None`):
|
1558 |
+
For memory allocations, if required, otherwise use default pool.
|
1559 |
+
|
1560 |
+
Returns:
|
1561 |
+
`datasets.table.Table`
|
1562 |
+
"""
|
1563 |
+
table = self.table.combine_chunks(*args, **kwargs)
|
1564 |
+
blocks = []
|
1565 |
+
for tables in self.blocks:
|
1566 |
+
blocks.append([t.combine_chunks(*args, **kwargs) for t in tables])
|
1567 |
+
return ConcatenationTable(table, blocks)
|
1568 |
+
|
1569 |
+
def cast(self, target_schema, *args, **kwargs):
|
1570 |
+
"""
|
1571 |
+
Cast table values to another schema.
|
1572 |
+
|
1573 |
+
Args:
|
1574 |
+
target_schema (`Schema`):
|
1575 |
+
Schema to cast to, the names and order of fields must match.
|
1576 |
+
safe (`bool`, defaults to `True`):
|
1577 |
+
Check for overflows or other unsafe conversions.
|
1578 |
+
|
1579 |
+
Returns:
|
1580 |
+
`datasets.table.Table`
|
1581 |
+
"""
|
1582 |
+
from .features import Features
|
1583 |
+
|
1584 |
+
table = table_cast(self.table, target_schema, *args, **kwargs)
|
1585 |
+
target_features = Features.from_arrow_schema(target_schema)
|
1586 |
+
blocks = []
|
1587 |
+
for subtables in self.blocks:
|
1588 |
+
new_tables = []
|
1589 |
+
fields = list(target_schema)
|
1590 |
+
for subtable in subtables:
|
1591 |
+
subfields = []
|
1592 |
+
for name in subtable.column_names:
|
1593 |
+
subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name)))
|
1594 |
+
subfeatures = Features({subfield.name: target_features[subfield.name] for subfield in subfields})
|
1595 |
+
subschema = subfeatures.arrow_schema
|
1596 |
+
new_tables.append(subtable.cast(subschema, *args, **kwargs))
|
1597 |
+
blocks.append(new_tables)
|
1598 |
+
return ConcatenationTable(table, blocks)
|
1599 |
+
|
1600 |
+
def replace_schema_metadata(self, *args, **kwargs):
|
1601 |
+
"""
|
1602 |
+
EXPERIMENTAL: Create shallow copy of table by replacing schema
|
1603 |
+
key-value metadata with the indicated new metadata (which may be `None`,
|
1604 |
+
which deletes any existing metadata).
|
1605 |
+
|
1606 |
+
Args:
|
1607 |
+
metadata (`dict`, defaults to `None`):
|
1608 |
+
|
1609 |
+
Returns:
|
1610 |
+
`datasets.table.Table`: shallow_copy
|
1611 |
+
"""
|
1612 |
+
table = self.table.replace_schema_metadata(*args, **kwargs)
|
1613 |
+
blocks = []
|
1614 |
+
for tables in self.blocks:
|
1615 |
+
blocks.append([t.replace_schema_metadata(*args, **kwargs) for t in tables])
|
1616 |
+
return ConcatenationTable(table, self.blocks)
|
1617 |
+
|
1618 |
+
def add_column(self, *args, **kwargs):
|
1619 |
+
"""
|
1620 |
+
Add column to Table at position.
|
1621 |
+
|
1622 |
+
A new table is returned with the column added, the original table
|
1623 |
+
object is left unchanged.
|
1624 |
+
|
1625 |
+
Args:
|
1626 |
+
i (`int`):
|
1627 |
+
Index to place the column at.
|
1628 |
+
field_ (`Union[str, pyarrow.Field]`):
|
1629 |
+
If a string is passed then the type is deduced from the column
|
1630 |
+
data.
|
1631 |
+
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
|
1632 |
+
Column data.
|
1633 |
+
|
1634 |
+
Returns:
|
1635 |
+
`datasets.table.Table`: New table with the passed column added.
|
1636 |
+
"""
|
1637 |
+
raise NotImplementedError()
|
1638 |
+
|
1639 |
+
def append_column(self, *args, **kwargs):
|
1640 |
+
"""
|
1641 |
+
Append column at end of columns.
|
1642 |
+
|
1643 |
+
Args:
|
1644 |
+
field_ (`Union[str, pyarrow.Field]`):
|
1645 |
+
If a string is passed then the type is deduced from the column
|
1646 |
+
data.
|
1647 |
+
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
|
1648 |
+
Column data.
|
1649 |
+
|
1650 |
+
Returns:
|
1651 |
+
`datasets.table.Table`:
|
1652 |
+
New table with the passed column added.
|
1653 |
+
"""
|
1654 |
+
raise NotImplementedError()
|
1655 |
+
|
1656 |
+
def remove_column(self, i, *args, **kwargs):
|
1657 |
+
"""
|
1658 |
+
Create new Table with the indicated column removed.
|
1659 |
+
|
1660 |
+
Args:
|
1661 |
+
i (`int`):
|
1662 |
+
Index of column to remove.
|
1663 |
+
|
1664 |
+
Returns:
|
1665 |
+
`datasets.table.Table`:
|
1666 |
+
New table without the column.
|
1667 |
+
"""
|
1668 |
+
table = self.table.remove_column(i, *args, **kwargs)
|
1669 |
+
name = self.table.column_names[i]
|
1670 |
+
blocks = []
|
1671 |
+
for tables in self.blocks:
|
1672 |
+
blocks.append(
|
1673 |
+
[
|
1674 |
+
t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t
|
1675 |
+
for t in tables
|
1676 |
+
]
|
1677 |
+
)
|
1678 |
+
return ConcatenationTable(table, blocks)
|
1679 |
+
|
1680 |
+
def set_column(self, *args, **kwargs):
|
1681 |
+
"""
|
1682 |
+
Replace column in Table at position.
|
1683 |
+
|
1684 |
+
Args:
|
1685 |
+
i (`int`):
|
1686 |
+
Index to place the column at.
|
1687 |
+
field_ (`Union[str, pyarrow.Field]`):
|
1688 |
+
If a string is passed then the type is deduced from the column
|
1689 |
+
data.
|
1690 |
+
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
|
1691 |
+
Column data.
|
1692 |
+
|
1693 |
+
Returns:
|
1694 |
+
`datasets.table.Table`:
|
1695 |
+
New table with the passed column set.
|
1696 |
+
"""
|
1697 |
+
raise NotImplementedError()
|
1698 |
+
|
1699 |
+
def rename_columns(self, names, *args, **kwargs):
|
1700 |
+
"""
|
1701 |
+
Create new table with columns renamed to provided names.
|
1702 |
+
"""
|
1703 |
+
table = self.table.rename_columns(names, *args, **kwargs)
|
1704 |
+
names = dict(zip(self.table.column_names, names))
|
1705 |
+
blocks = []
|
1706 |
+
for tables in self.blocks:
|
1707 |
+
blocks.append(
|
1708 |
+
[t.rename_columns([names[name] for name in t.column_names], *args, **kwargs) for t in tables]
|
1709 |
+
)
|
1710 |
+
return ConcatenationTable(table, blocks)
|
1711 |
+
|
1712 |
+
def drop(self, columns, *args, **kwargs):
|
1713 |
+
"""
|
1714 |
+
Drop one or more columns and return a new table.
|
1715 |
+
|
1716 |
+
Args:
|
1717 |
+
columns (`List[str]`):
|
1718 |
+
List of field names referencing existing columns.
|
1719 |
+
|
1720 |
+
Raises:
|
1721 |
+
`KeyError` : if any of the passed columns name are not existing.
|
1722 |
+
|
1723 |
+
Returns:
|
1724 |
+
`datasets.table.Table`:
|
1725 |
+
New table without the columns.
|
1726 |
+
"""
|
1727 |
+
table = self.table.drop(columns, *args, **kwargs)
|
1728 |
+
blocks = []
|
1729 |
+
for tables in self.blocks:
|
1730 |
+
blocks.append([t.drop([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables])
|
1731 |
+
return ConcatenationTable(table, blocks)
|
1732 |
+
|
1733 |
+
def select(self, columns, *args, **kwargs):
|
1734 |
+
"""
|
1735 |
+
Select columns of the table.
|
1736 |
+
|
1737 |
+
Returns a new table with the specified columns, and metadata preserved.
|
1738 |
+
|
1739 |
+
Args:
|
1740 |
+
columns (:obj:`Union[List[str], List[int]]`):
|
1741 |
+
The column names or integer indices to select.
|
1742 |
+
|
1743 |
+
Returns:
|
1744 |
+
:class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
|
1745 |
+
"""
|
1746 |
+
table = self.table.select(columns, *args, **kwargs)
|
1747 |
+
blocks = []
|
1748 |
+
for tables in self.blocks:
|
1749 |
+
blocks.append([t.select([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables])
|
1750 |
+
return ConcatenationTable(table, blocks)
|
1751 |
+
|
1752 |
+
|
1753 |
+
def concat_tables(tables: List[Table], axis: int = 0) -> Table:
|
1754 |
+
"""
|
1755 |
+
Concatenate tables.
|
1756 |
+
|
1757 |
+
Args:
|
1758 |
+
tables (list of `Table`):
|
1759 |
+
List of tables to be concatenated.
|
1760 |
+
axis (`{0, 1}`, defaults to `0`, meaning over rows):
|
1761 |
+
Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
|
1762 |
+
(horizontally).
|
1763 |
+
|
1764 |
+
<Added version="1.6.0"/>
|
1765 |
+
Returns:
|
1766 |
+
`datasets.table.Table`:
|
1767 |
+
If the number of input tables is > 1, then the returned table is a `datasets.table.ConcatenationTable`.
|
1768 |
+
Otherwise if there's only one table, it is returned as is.
|
1769 |
+
"""
|
1770 |
+
tables = list(tables)
|
1771 |
+
if len(tables) == 1:
|
1772 |
+
return tables[0]
|
1773 |
+
return ConcatenationTable.from_tables(tables, axis=axis)
|
1774 |
+
|
1775 |
+
|
1776 |
+
def list_table_cache_files(table: Table) -> List[str]:
|
1777 |
+
"""
|
1778 |
+
Get the cache files that are loaded by the table.
|
1779 |
+
Cache file are used when parts of the table come from the disk via memory mapping.
|
1780 |
+
|
1781 |
+
Returns:
|
1782 |
+
`List[str]`:
|
1783 |
+
A list of paths to the cache files loaded by the table.
|
1784 |
+
"""
|
1785 |
+
if isinstance(table, ConcatenationTable):
|
1786 |
+
cache_files = []
|
1787 |
+
for subtables in table.blocks:
|
1788 |
+
for subtable in subtables:
|
1789 |
+
cache_files += list_table_cache_files(subtable)
|
1790 |
+
return cache_files
|
1791 |
+
elif isinstance(table, MemoryMappedTable):
|
1792 |
+
return [table.path]
|
1793 |
+
else:
|
1794 |
+
return []
|
1795 |
+
|
1796 |
+
|
1797 |
+
def _wrap_for_chunked_arrays(func):
|
1798 |
+
"""Apply the function on each chunk of a `pyarrow.ChunkedArray`, or on the array directly"""
|
1799 |
+
|
1800 |
+
def wrapper(array, *args, **kwargs):
|
1801 |
+
if isinstance(array, pa.ChunkedArray):
|
1802 |
+
return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
|
1803 |
+
else:
|
1804 |
+
return func(array, *args, **kwargs)
|
1805 |
+
|
1806 |
+
return wrapper
|
1807 |
+
|
1808 |
+
|
1809 |
+
def _are_list_values_of_length(array: pa.ListArray, length: int) -> bool:
|
1810 |
+
"""Check if all the sub-lists of a `pa.ListArray` have the specified length."""
|
1811 |
+
return pc.all(pc.equal(array.value_lengths(), length)).as_py() or array.null_count == len(array)
|
1812 |
+
|
1813 |
+
|
1814 |
+
def _combine_list_array_offsets_with_mask(array: pa.ListArray) -> pa.Array:
|
1815 |
+
"""Add the null bitmap to the offsets of a `pa.ListArray`."""
|
1816 |
+
offsets = array.offsets
|
1817 |
+
if array.null_count > 0:
|
1818 |
+
offsets = pa.concat_arrays(
|
1819 |
+
[
|
1820 |
+
pc.replace_with_mask(offsets[:-1], array.is_null(), pa.nulls(len(array), pa.int32())),
|
1821 |
+
offsets[-1:],
|
1822 |
+
]
|
1823 |
+
)
|
1824 |
+
return offsets
|
1825 |
+
|
1826 |
+
|
1827 |
+
def _storage_type(type: pa.DataType) -> pa.DataType:
|
1828 |
+
"""Convert a (possibly nested) `pa.ExtensionType` to its storage type."""
|
1829 |
+
if isinstance(type, pa.ExtensionType):
|
1830 |
+
return _storage_type(type.storage_type)
|
1831 |
+
elif isinstance(type, pa.StructType):
|
1832 |
+
return pa.struct([pa.field(field.name, _storage_type(field.type)) for field in type])
|
1833 |
+
elif isinstance(type, pa.ListType):
|
1834 |
+
return pa.list_(_storage_type(type.value_type))
|
1835 |
+
elif isinstance(type, pa.FixedSizeListType):
|
1836 |
+
return pa.list_(_storage_type(type.value_type), type.list_size)
|
1837 |
+
return type
|
1838 |
+
|
1839 |
+
|
1840 |
+
@_wrap_for_chunked_arrays
|
1841 |
+
def array_cast(
|
1842 |
+
array: pa.Array, pa_type: pa.DataType, allow_primitive_to_str: bool = True, allow_decimal_to_str: bool = True
|
1843 |
+
) -> Union[pa.Array, pa.FixedSizeListArray, pa.ListArray, pa.StructArray, pa.ExtensionArray]:
|
1844 |
+
"""Improved version of `pa.Array.cast`
|
1845 |
+
|
1846 |
+
It supports casting `pa.StructArray` objects to re-order the fields.
|
1847 |
+
It also let you control certain aspects of the casting, e.g. whether
|
1848 |
+
to disable casting primitives (`booleans`, `floats` or `ints`) or
|
1849 |
+
disable casting decimals to strings.
|
1850 |
+
|
1851 |
+
Args:
|
1852 |
+
array (`pa.Array`):
|
1853 |
+
PyArrow array to cast
|
1854 |
+
pa_type (`pa.DataType`):
|
1855 |
+
Target PyArrow type
|
1856 |
+
allow_primitive_to_str (`bool`, defaults to `True`):
|
1857 |
+
Whether to allow casting primitives to strings.
|
1858 |
+
Defaults to `True`.
|
1859 |
+
allow_decimal_to_str (`bool`, defaults to `True`):
|
1860 |
+
Whether to allow casting decimals to strings.
|
1861 |
+
Defaults to `True`.
|
1862 |
+
|
1863 |
+
Raises:
|
1864 |
+
`pa.ArrowInvalidError`: if the arrow data casting fails
|
1865 |
+
`TypeError`: if the target type is not supported according, e.g.
|
1866 |
+
|
1867 |
+
- if a field is missing
|
1868 |
+
- if casting from primitives to strings and `allow_primitive_to_str` is `False`
|
1869 |
+
- if casting from decimals to strings and `allow_decimal_to_str` is `False`
|
1870 |
+
|
1871 |
+
Returns:
|
1872 |
+
`List[pyarrow.Array]`: the casted array
|
1873 |
+
"""
|
1874 |
+
_c = partial(array_cast, allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str)
|
1875 |
+
if isinstance(array, pa.ExtensionArray):
|
1876 |
+
array = array.storage
|
1877 |
+
if isinstance(pa_type, pa.ExtensionType):
|
1878 |
+
return pa_type.wrap_array(_c(array, pa_type.storage_type))
|
1879 |
+
elif array.type == pa_type:
|
1880 |
+
return array
|
1881 |
+
elif pa.types.is_struct(array.type):
|
1882 |
+
if pa.types.is_struct(pa_type) and ({field.name for field in pa_type} == {field.name for field in array.type}):
|
1883 |
+
if array.type.num_fields == 0:
|
1884 |
+
return array
|
1885 |
+
arrays = [_c(array.field(field.name), field.type) for field in pa_type]
|
1886 |
+
return pa.StructArray.from_arrays(arrays, fields=list(pa_type), mask=array.is_null())
|
1887 |
+
elif pa.types.is_list(array.type):
|
1888 |
+
if pa.types.is_fixed_size_list(pa_type):
|
1889 |
+
if _are_list_values_of_length(array, pa_type.list_size):
|
1890 |
+
if array.null_count > 0:
|
1891 |
+
# Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array
|
1892 |
+
array_type = array.type
|
1893 |
+
storage_type = _storage_type(array_type)
|
1894 |
+
if array_type != storage_type:
|
1895 |
+
# Temporarily convert to the storage type to support extension types in the slice operation
|
1896 |
+
array = _c(array, storage_type)
|
1897 |
+
array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True)
|
1898 |
+
array = _c(array, array_type)
|
1899 |
+
else:
|
1900 |
+
array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True)
|
1901 |
+
array_values = array.values
|
1902 |
+
if config.PYARROW_VERSION.major < 15:
|
1903 |
+
return pa.Array.from_buffers(
|
1904 |
+
pa_type,
|
1905 |
+
len(array),
|
1906 |
+
[array.is_valid().buffers()[1]],
|
1907 |
+
children=[_c(array_values, pa_type.value_type)],
|
1908 |
+
)
|
1909 |
+
else:
|
1910 |
+
return pa.FixedSizeListArray.from_arrays(
|
1911 |
+
_c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null()
|
1912 |
+
)
|
1913 |
+
else:
|
1914 |
+
array_values = array.values[
|
1915 |
+
array.offset * pa_type.length : (array.offset + len(array)) * pa_type.length
|
1916 |
+
]
|
1917 |
+
return pa.FixedSizeListArray.from_arrays(_c(array_values, pa_type.value_type), pa_type.list_size)
|
1918 |
+
elif pa.types.is_list(pa_type):
|
1919 |
+
# Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
|
1920 |
+
array_offsets = _combine_list_array_offsets_with_mask(array)
|
1921 |
+
return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type))
|
1922 |
+
elif pa.types.is_fixed_size_list(array.type):
|
1923 |
+
if pa.types.is_fixed_size_list(pa_type):
|
1924 |
+
if pa_type.list_size == array.type.list_size:
|
1925 |
+
array_values = array.values[
|
1926 |
+
array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
|
1927 |
+
]
|
1928 |
+
if config.PYARROW_VERSION.major < 15:
|
1929 |
+
return pa.Array.from_buffers(
|
1930 |
+
pa_type,
|
1931 |
+
len(array),
|
1932 |
+
[array.is_valid().buffers()[1]],
|
1933 |
+
children=[_c(array_values, pa_type.value_type)],
|
1934 |
+
)
|
1935 |
+
else:
|
1936 |
+
return pa.FixedSizeListArray.from_arrays(
|
1937 |
+
_c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null()
|
1938 |
+
)
|
1939 |
+
elif pa.types.is_list(pa_type):
|
1940 |
+
array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
|
1941 |
+
return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type), mask=array.is_null())
|
1942 |
+
else:
|
1943 |
+
if pa.types.is_string(pa_type):
|
1944 |
+
if not allow_primitive_to_str and pa.types.is_primitive(array.type):
|
1945 |
+
raise TypeError(
|
1946 |
+
f"Couldn't cast array of type {array.type} to {pa_type} "
|
1947 |
+
f"since allow_primitive_to_str is set to {allow_primitive_to_str} "
|
1948 |
+
)
|
1949 |
+
if not allow_decimal_to_str and pa.types.is_decimal(array.type):
|
1950 |
+
raise TypeError(
|
1951 |
+
f"Couldn't cast array of type {array.type} to {pa_type} "
|
1952 |
+
f"and allow_decimal_to_str is set to {allow_decimal_to_str}"
|
1953 |
+
)
|
1954 |
+
if pa.types.is_null(pa_type) and not pa.types.is_null(array.type):
|
1955 |
+
raise TypeError(f"Couldn't cast array of type {array.type} to {pa_type}")
|
1956 |
+
return array.cast(pa_type)
|
1957 |
+
raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{pa_type}")
|
1958 |
+
|
1959 |
+
|
1960 |
+
@_wrap_for_chunked_arrays
|
1961 |
+
def cast_array_to_feature(
|
1962 |
+
array: pa.Array, feature: "FeatureType", allow_primitive_to_str: bool = True, allow_decimal_to_str: bool = True
|
1963 |
+
) -> pa.Array:
|
1964 |
+
"""Cast an array to the arrow type that corresponds to the requested feature type.
|
1965 |
+
For custom features like [`Audio`] or [`Image`], it takes into account the "cast_storage" methods
|
1966 |
+
they defined to enable casting from other arrow types.
|
1967 |
+
|
1968 |
+
Args:
|
1969 |
+
array (`pa.Array`):
|
1970 |
+
The PyArrow array to cast.
|
1971 |
+
feature (`datasets.features.FeatureType`):
|
1972 |
+
The target feature type.
|
1973 |
+
allow_primitive_to_str (`bool`, defaults to `True`):
|
1974 |
+
Whether to allow casting primitives to strings.
|
1975 |
+
Defaults to `True`.
|
1976 |
+
allow_decimal_to_str (`bool`, defaults to `True`):
|
1977 |
+
Whether to allow casting decimals to strings.
|
1978 |
+
Defaults to `True`.
|
1979 |
+
|
1980 |
+
Raises:
|
1981 |
+
`pa.ArrowInvalidError`: if the arrow data casting fails
|
1982 |
+
`TypeError`: if the target type is not supported according, e.g.
|
1983 |
+
|
1984 |
+
- if a field is missing
|
1985 |
+
- if casting from primitives and `allow_primitive_to_str` is `False`
|
1986 |
+
- if casting from decimals and `allow_decimal_to_str` is `False`
|
1987 |
+
|
1988 |
+
Returns:
|
1989 |
+
array (`pyarrow.Array`): the casted array
|
1990 |
+
"""
|
1991 |
+
from .features.features import Sequence, get_nested_type
|
1992 |
+
|
1993 |
+
_c = partial(
|
1994 |
+
cast_array_to_feature,
|
1995 |
+
allow_primitive_to_str=allow_primitive_to_str,
|
1996 |
+
allow_decimal_to_str=allow_decimal_to_str,
|
1997 |
+
)
|
1998 |
+
|
1999 |
+
if isinstance(array, pa.ExtensionArray):
|
2000 |
+
array = array.storage
|
2001 |
+
if hasattr(feature, "cast_storage"):
|
2002 |
+
return feature.cast_storage(array)
|
2003 |
+
|
2004 |
+
elif pa.types.is_struct(array.type):
|
2005 |
+
# feature must be a dict or Sequence(subfeatures_dict)
|
2006 |
+
if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
|
2007 |
+
feature = {
|
2008 |
+
name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items()
|
2009 |
+
}
|
2010 |
+
if isinstance(feature, dict) and {field.name for field in array.type} == set(feature):
|
2011 |
+
if array.type.num_fields == 0:
|
2012 |
+
return array
|
2013 |
+
arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()]
|
2014 |
+
return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null())
|
2015 |
+
elif pa.types.is_list(array.type):
|
2016 |
+
# feature must be either [subfeature] or Sequence(subfeature)
|
2017 |
+
if isinstance(feature, list):
|
2018 |
+
casted_array_values = _c(array.values, feature[0])
|
2019 |
+
if casted_array_values.type == array.values.type:
|
2020 |
+
return array
|
2021 |
+
else:
|
2022 |
+
# Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
|
2023 |
+
array_offsets = _combine_list_array_offsets_with_mask(array)
|
2024 |
+
return pa.ListArray.from_arrays(array_offsets, casted_array_values)
|
2025 |
+
elif isinstance(feature, Sequence):
|
2026 |
+
if feature.length > -1:
|
2027 |
+
if _are_list_values_of_length(array, feature.length):
|
2028 |
+
if array.null_count > 0:
|
2029 |
+
# Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array
|
2030 |
+
array_type = array.type
|
2031 |
+
storage_type = _storage_type(array_type)
|
2032 |
+
if array_type != storage_type:
|
2033 |
+
# Temporarily convert to the storage type to support extension types in the slice operation
|
2034 |
+
array = array_cast(
|
2035 |
+
array,
|
2036 |
+
storage_type,
|
2037 |
+
allow_primitive_to_str=allow_primitive_to_str,
|
2038 |
+
allow_decimal_to_str=allow_decimal_to_str,
|
2039 |
+
)
|
2040 |
+
array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True)
|
2041 |
+
array = array_cast(
|
2042 |
+
array,
|
2043 |
+
array_type,
|
2044 |
+
allow_primitive_to_str=allow_primitive_to_str,
|
2045 |
+
allow_decimal_to_str=allow_decimal_to_str,
|
2046 |
+
)
|
2047 |
+
else:
|
2048 |
+
array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True)
|
2049 |
+
array_values = array.values
|
2050 |
+
casted_array_values = _c(array_values, feature.feature)
|
2051 |
+
if config.PYARROW_VERSION.major < 15:
|
2052 |
+
return pa.Array.from_buffers(
|
2053 |
+
pa.list_(casted_array_values.type, feature.length),
|
2054 |
+
len(array),
|
2055 |
+
[array.is_valid().buffers()[1]],
|
2056 |
+
children=[casted_array_values],
|
2057 |
+
)
|
2058 |
+
else:
|
2059 |
+
return pa.FixedSizeListArray.from_arrays(
|
2060 |
+
casted_array_values, feature.length, mask=array.is_null()
|
2061 |
+
)
|
2062 |
+
else:
|
2063 |
+
array_values = array.values[
|
2064 |
+
array.offset * feature.length : (array.offset + len(array)) * feature.length
|
2065 |
+
]
|
2066 |
+
return pa.FixedSizeListArray.from_arrays(_c(array_values, feature.feature), feature.length)
|
2067 |
+
else:
|
2068 |
+
casted_array_values = _c(array.values, feature.feature)
|
2069 |
+
if casted_array_values.type == array.values.type:
|
2070 |
+
return array
|
2071 |
+
else:
|
2072 |
+
# Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
|
2073 |
+
array_offsets = _combine_list_array_offsets_with_mask(array)
|
2074 |
+
return pa.ListArray.from_arrays(array_offsets, casted_array_values)
|
2075 |
+
elif pa.types.is_fixed_size_list(array.type):
|
2076 |
+
# feature must be either [subfeature] or Sequence(subfeature)
|
2077 |
+
if isinstance(feature, list):
|
2078 |
+
array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
|
2079 |
+
return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature[0]), mask=array.is_null())
|
2080 |
+
elif isinstance(feature, Sequence):
|
2081 |
+
if feature.length > -1:
|
2082 |
+
if feature.length == array.type.list_size:
|
2083 |
+
array_values = array.values[
|
2084 |
+
array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
|
2085 |
+
]
|
2086 |
+
casted_array_values = _c(array_values, feature.feature)
|
2087 |
+
if config.PYARROW_VERSION.major < 15:
|
2088 |
+
return pa.Array.from_buffers(
|
2089 |
+
pa.list_(casted_array_values.type, feature.length),
|
2090 |
+
len(array),
|
2091 |
+
[array.is_valid().buffers()[1]],
|
2092 |
+
children=[casted_array_values],
|
2093 |
+
)
|
2094 |
+
else:
|
2095 |
+
return pa.FixedSizeListArray.from_arrays(
|
2096 |
+
casted_array_values, feature.length, mask=array.is_null()
|
2097 |
+
)
|
2098 |
+
else:
|
2099 |
+
array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
|
2100 |
+
return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature.feature), mask=array.is_null())
|
2101 |
+
if pa.types.is_null(array.type):
|
2102 |
+
return array_cast(
|
2103 |
+
array,
|
2104 |
+
get_nested_type(feature),
|
2105 |
+
allow_primitive_to_str=allow_primitive_to_str,
|
2106 |
+
allow_decimal_to_str=allow_decimal_to_str,
|
2107 |
+
)
|
2108 |
+
elif not isinstance(feature, (Sequence, dict, list, tuple)):
|
2109 |
+
return array_cast(
|
2110 |
+
array,
|
2111 |
+
feature(),
|
2112 |
+
allow_primitive_to_str=allow_primitive_to_str,
|
2113 |
+
allow_decimal_to_str=allow_decimal_to_str,
|
2114 |
+
)
|
2115 |
+
raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{feature}")
|
2116 |
+
|
2117 |
+
|
2118 |
+
@_wrap_for_chunked_arrays
|
2119 |
+
def embed_array_storage(array: pa.Array, feature: "FeatureType"):
|
2120 |
+
"""Embed data into an arrays's storage.
|
2121 |
+
For custom features like Audio or Image, it takes into account the "embed_storage" methods
|
2122 |
+
they define to embed external data (e.g. an image file) into an array.
|
2123 |
+
|
2124 |
+
<Added version="2.4.0"/>
|
2125 |
+
|
2126 |
+
Args:
|
2127 |
+
array (`pa.Array`):
|
2128 |
+
The PyArrow array in which to embed data.
|
2129 |
+
feature (`datasets.features.FeatureType`):
|
2130 |
+
Array features.
|
2131 |
+
|
2132 |
+
Raises:
|
2133 |
+
`TypeError`: if the target type is not supported according, e.g.
|
2134 |
+
|
2135 |
+
- if a field is missing
|
2136 |
+
|
2137 |
+
Returns:
|
2138 |
+
array (`pyarrow.Array`): the casted array
|
2139 |
+
"""
|
2140 |
+
from .features import Sequence
|
2141 |
+
|
2142 |
+
_e = embed_array_storage
|
2143 |
+
|
2144 |
+
if isinstance(array, pa.ExtensionArray):
|
2145 |
+
array = array.storage
|
2146 |
+
if hasattr(feature, "embed_storage"):
|
2147 |
+
return feature.embed_storage(array)
|
2148 |
+
elif pa.types.is_struct(array.type):
|
2149 |
+
# feature must be a dict or Sequence(subfeatures_dict)
|
2150 |
+
if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
|
2151 |
+
feature = {
|
2152 |
+
name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items()
|
2153 |
+
}
|
2154 |
+
if isinstance(feature, dict):
|
2155 |
+
arrays = [_e(array.field(name), subfeature) for name, subfeature in feature.items()]
|
2156 |
+
return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null())
|
2157 |
+
elif pa.types.is_list(array.type):
|
2158 |
+
# feature must be either [subfeature] or Sequence(subfeature)
|
2159 |
+
# Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
|
2160 |
+
array_offsets = _combine_list_array_offsets_with_mask(array)
|
2161 |
+
if isinstance(feature, list):
|
2162 |
+
return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature[0]))
|
2163 |
+
if isinstance(feature, Sequence) and feature.length == -1:
|
2164 |
+
return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature.feature))
|
2165 |
+
elif pa.types.is_fixed_size_list(array.type):
|
2166 |
+
# feature must be Sequence(subfeature)
|
2167 |
+
if isinstance(feature, Sequence) and feature.length > -1:
|
2168 |
+
array_values = array.values[
|
2169 |
+
array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
|
2170 |
+
]
|
2171 |
+
embedded_array_values = _e(array_values, feature.feature)
|
2172 |
+
if config.PYARROW_VERSION.major < 15:
|
2173 |
+
return pa.Array.from_buffers(
|
2174 |
+
pa.list_(array_values.type, feature.length),
|
2175 |
+
len(array),
|
2176 |
+
[array.is_valid().buffers()[1]],
|
2177 |
+
children=[embedded_array_values],
|
2178 |
+
)
|
2179 |
+
else:
|
2180 |
+
return pa.FixedSizeListArray.from_arrays(embedded_array_values, feature.length, mask=array.is_null())
|
2181 |
+
if not isinstance(feature, (Sequence, dict, list, tuple)):
|
2182 |
+
return array
|
2183 |
+
raise TypeError(f"Couldn't embed array of type\n{array.type}\nwith\n{feature}")
|
2184 |
+
|
2185 |
+
|
2186 |
+
class CastError(ValueError):
|
2187 |
+
"""When it's not possible to cast an Arrow table to a specific schema or set of features"""
|
2188 |
+
|
2189 |
+
def __init__(self, *args, table_column_names: List[str], requested_column_names: List[str]) -> None:
|
2190 |
+
super().__init__(*args)
|
2191 |
+
self.table_column_names = table_column_names
|
2192 |
+
self.requested_column_names = requested_column_names
|
2193 |
+
|
2194 |
+
def __reduce__(self):
|
2195 |
+
# Fix unpickling: TypeError: __init__() missing 2 required keyword-only arguments: 'table_column_names' and 'requested_column_names'
|
2196 |
+
return partial(
|
2197 |
+
CastError, table_column_names=self.table_column_names, requested_column_names=self.requested_column_names
|
2198 |
+
), ()
|
2199 |
+
|
2200 |
+
def details(self):
|
2201 |
+
new_columns = set(self.table_column_names) - set(self.requested_column_names)
|
2202 |
+
missing_columns = set(self.requested_column_names) - set(self.table_column_names)
|
2203 |
+
if new_columns and missing_columns:
|
2204 |
+
return f"there are {len(new_columns)} new columns ({', '.join(new_columns)}) and {len(missing_columns)} missing columns ({', '.join(missing_columns)})."
|
2205 |
+
elif new_columns:
|
2206 |
+
return f"there are {len(new_columns)} new columns ({new_columns})"
|
2207 |
+
else:
|
2208 |
+
return f"there are {len(missing_columns)} missing columns ({missing_columns})"
|
2209 |
+
|
2210 |
+
|
2211 |
+
def cast_table_to_features(table: pa.Table, features: "Features"):
|
2212 |
+
"""Cast a table to the arrow schema that corresponds to the requested features.
|
2213 |
+
|
2214 |
+
Args:
|
2215 |
+
table (`pyarrow.Table`):
|
2216 |
+
PyArrow table to cast.
|
2217 |
+
features ([`Features`]):
|
2218 |
+
Target features.
|
2219 |
+
|
2220 |
+
Returns:
|
2221 |
+
table (`pyarrow.Table`): the casted table
|
2222 |
+
"""
|
2223 |
+
if sorted(table.column_names) != sorted(features):
|
2224 |
+
raise CastError(
|
2225 |
+
f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match",
|
2226 |
+
table_column_names=table.column_names,
|
2227 |
+
requested_column_names=list(features),
|
2228 |
+
)
|
2229 |
+
arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
|
2230 |
+
return pa.Table.from_arrays(arrays, schema=features.arrow_schema)
|
2231 |
+
|
2232 |
+
|
2233 |
+
def cast_table_to_schema(table: pa.Table, schema: pa.Schema):
|
2234 |
+
"""Cast a table to the arrow schema. Different from `cast_table_to_features`, this method can preserve nullability.
|
2235 |
+
|
2236 |
+
Args:
|
2237 |
+
table (`pa.Table`):
|
2238 |
+
PyArrow table to cast.
|
2239 |
+
features ([`Features`]):
|
2240 |
+
Target features.
|
2241 |
+
|
2242 |
+
Returns:
|
2243 |
+
`pa.Table`: the casted table
|
2244 |
+
"""
|
2245 |
+
from .features import Features
|
2246 |
+
|
2247 |
+
features = Features.from_arrow_schema(schema)
|
2248 |
+
if sorted(table.column_names) != sorted(features):
|
2249 |
+
raise CastError(
|
2250 |
+
f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match",
|
2251 |
+
table_column_names=table.column_names,
|
2252 |
+
requested_column_names=list(features),
|
2253 |
+
)
|
2254 |
+
arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
|
2255 |
+
return pa.Table.from_arrays(arrays, schema=schema)
|
2256 |
+
|
2257 |
+
|
2258 |
+
def embed_table_storage(table: pa.Table):
|
2259 |
+
"""Embed external data into a table's storage.
|
2260 |
+
|
2261 |
+
<Added version="2.4.0"/>
|
2262 |
+
|
2263 |
+
Args:
|
2264 |
+
table (`pyarrow.Table`):
|
2265 |
+
PyArrow table in which to embed data.
|
2266 |
+
|
2267 |
+
Returns:
|
2268 |
+
table (`pyarrow.Table`): the table with embedded data
|
2269 |
+
"""
|
2270 |
+
from .features.features import Features, require_storage_embed
|
2271 |
+
|
2272 |
+
features = Features.from_arrow_schema(table.schema)
|
2273 |
+
arrays = [
|
2274 |
+
embed_array_storage(table[name], feature) if require_storage_embed(feature) else table[name]
|
2275 |
+
for name, feature in features.items()
|
2276 |
+
]
|
2277 |
+
return pa.Table.from_arrays(arrays, schema=features.arrow_schema)
|
2278 |
+
|
2279 |
+
|
2280 |
+
def table_cast(table: pa.Table, schema: pa.Schema):
|
2281 |
+
"""Improved version of `pa.Table.cast`.
|
2282 |
+
|
2283 |
+
It supports casting to feature types stored in the schema metadata.
|
2284 |
+
|
2285 |
+
Args:
|
2286 |
+
table (`pyarrow.Table`):
|
2287 |
+
PyArrow table to cast.
|
2288 |
+
schema (`pyarrow.Schema`):
|
2289 |
+
Target PyArrow schema.
|
2290 |
+
|
2291 |
+
Returns:
|
2292 |
+
table (`pyarrow.Table`): the casted table
|
2293 |
+
"""
|
2294 |
+
if table.schema != schema:
|
2295 |
+
return cast_table_to_schema(table, schema)
|
2296 |
+
elif table.schema.metadata != schema.metadata:
|
2297 |
+
return table.replace_schema_metadata(schema.metadata)
|
2298 |
+
else:
|
2299 |
+
return table
|
2300 |
+
|
2301 |
+
|
2302 |
+
def table_flatten(table: pa.Table):
|
2303 |
+
"""Improved version of `pa.Table.flatten`.
|
2304 |
+
|
2305 |
+
It behaves as `pa.Table.flatten` in a sense it does 1-step flatten of the columns with a struct type into one column per struct field,
|
2306 |
+
but updates the metadata and skips decodable features unless the `decode` attribute of these features is set to False.
|
2307 |
+
|
2308 |
+
Args:
|
2309 |
+
table (`pa.Table`):
|
2310 |
+
PyArrow table to flatten.
|
2311 |
+
|
2312 |
+
Returns:
|
2313 |
+
`Table`: the flattened table
|
2314 |
+
"""
|
2315 |
+
from .features import Features
|
2316 |
+
|
2317 |
+
features = Features.from_arrow_schema(table.schema)
|
2318 |
+
if any(hasattr(subfeature, "flatten") and subfeature.flatten() == subfeature for subfeature in features.values()):
|
2319 |
+
flat_arrays = []
|
2320 |
+
flat_column_names = []
|
2321 |
+
for field in table.schema:
|
2322 |
+
array = table.column(field.name)
|
2323 |
+
subfeature = features[field.name]
|
2324 |
+
if pa.types.is_struct(field.type) and (
|
2325 |
+
not hasattr(subfeature, "flatten") or subfeature.flatten() != subfeature
|
2326 |
+
):
|
2327 |
+
flat_arrays.extend(array.flatten())
|
2328 |
+
flat_column_names.extend([f"{field.name}.{subfield.name}" for subfield in field.type])
|
2329 |
+
else:
|
2330 |
+
flat_arrays.append(array)
|
2331 |
+
flat_column_names.append(field.name)
|
2332 |
+
flat_table = pa.Table.from_arrays(
|
2333 |
+
flat_arrays,
|
2334 |
+
names=flat_column_names,
|
2335 |
+
)
|
2336 |
+
else:
|
2337 |
+
flat_table = table.flatten()
|
2338 |
+
# Preserve complex types in the metadata
|
2339 |
+
flat_features = features.flatten(max_depth=2)
|
2340 |
+
flat_features = Features({column_name: flat_features[column_name] for column_name in flat_table.column_names})
|
2341 |
+
return flat_table.replace_schema_metadata(flat_features.arrow_schema.metadata)
|
2342 |
+
|
2343 |
+
|
2344 |
+
def table_visitor(table: pa.Table, function: Callable[[pa.Array], None]):
|
2345 |
+
"""Visit all arrays in a table and apply a function to them.
|
2346 |
+
|
2347 |
+
Args:
|
2348 |
+
table (`pyarrow.Table`):
|
2349 |
+
PyArrow table to visit.
|
2350 |
+
function (`Callable[[pa.Array], None]`):
|
2351 |
+
Function to apply to each array.
|
2352 |
+
"""
|
2353 |
+
from .features import Features, Sequence
|
2354 |
+
|
2355 |
+
features = Features.from_arrow_schema(table.schema)
|
2356 |
+
|
2357 |
+
def _visit(array, feature):
|
2358 |
+
if isinstance(array, pa.ChunkedArray):
|
2359 |
+
for chunk in array.chunks:
|
2360 |
+
_visit(chunk, feature)
|
2361 |
+
else:
|
2362 |
+
if isinstance(array, pa.ExtensionArray):
|
2363 |
+
array = array.storage
|
2364 |
+
function(array, feature)
|
2365 |
+
if pa.types.is_struct(array.type) and not hasattr(feature, "cast_storage"):
|
2366 |
+
if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
|
2367 |
+
feature = {
|
2368 |
+
name: Sequence(subfeature, length=feature.length)
|
2369 |
+
for name, subfeature in feature.feature.items()
|
2370 |
+
}
|
2371 |
+
for name, subfeature in feature.items():
|
2372 |
+
_visit(array.field(name), subfeature)
|
2373 |
+
elif pa.types.is_list(array.type):
|
2374 |
+
if isinstance(feature, list):
|
2375 |
+
_visit(array.values, feature[0])
|
2376 |
+
elif isinstance(feature, Sequence):
|
2377 |
+
_visit(array.values, feature.feature)
|
2378 |
+
|
2379 |
+
for name, feature in features.items():
|
2380 |
+
_visit(table[name], feature)
|
2381 |
+
|
2382 |
+
|
2383 |
+
def table_iter(table: Table, batch_size: int, drop_last_batch=False) -> Iterator[pa.Table]:
|
2384 |
+
"""Iterate over sub-tables of size `batch_size`.
|
2385 |
+
|
2386 |
+
Args:
|
2387 |
+
table (`pyarrow.Table`):
|
2388 |
+
PyArrow table to iterate over.
|
2389 |
+
batch_size (`int`):
|
2390 |
+
Size of each sub-table to yield.
|
2391 |
+
drop_last_batch (`bool`, defaults to `False`):
|
2392 |
+
Drop the last batch if it is smaller than `batch_size`.
|
2393 |
+
"""
|
2394 |
+
chunks_buffer = []
|
2395 |
+
chunks_buffer_size = 0
|
2396 |
+
for chunk in table.to_reader(max_chunksize=batch_size):
|
2397 |
+
if len(chunk) == 0:
|
2398 |
+
continue
|
2399 |
+
elif chunks_buffer_size + len(chunk) < batch_size:
|
2400 |
+
chunks_buffer.append(chunk)
|
2401 |
+
chunks_buffer_size += len(chunk)
|
2402 |
+
continue
|
2403 |
+
elif chunks_buffer_size + len(chunk) == batch_size:
|
2404 |
+
chunks_buffer.append(chunk)
|
2405 |
+
yield pa.Table.from_batches(chunks_buffer)
|
2406 |
+
chunks_buffer = []
|
2407 |
+
chunks_buffer_size = 0
|
2408 |
+
else:
|
2409 |
+
cropped_chunk_length = batch_size - chunks_buffer_size
|
2410 |
+
chunks_buffer.append(chunk.slice(0, cropped_chunk_length))
|
2411 |
+
yield pa.Table.from_batches(chunks_buffer)
|
2412 |
+
chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)]
|
2413 |
+
chunks_buffer_size = len(chunk) - cropped_chunk_length
|
2414 |
+
if not drop_last_batch and chunks_buffer:
|
2415 |
+
yield pa.Table.from_batches(chunks_buffer)
|
venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Bermuda
ADDED
Binary file (2.4 kB). View file
|
|
venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Canary
ADDED
Binary file (1.9 kB). View file
|
|
venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Cape_Verde
ADDED
Binary file (256 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faeroe
ADDED
Binary file (1.82 kB). View file
|
|
venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faroe
ADDED
Binary file (1.82 kB). View file
|
|
venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Jan_Mayen
ADDED
Binary file (2.3 kB). View file
|
|
venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Reykjavik
ADDED
Binary file (148 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/South_Georgia
ADDED
Binary file (150 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Stanley
ADDED
Binary file (1.2 kB). View file
|
|
venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/BajaNorte
ADDED
Binary file (2.37 kB). View file
|
|
venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/BajaSur
ADDED
Binary file (1.13 kB). View file
|
|
venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/General
ADDED
Binary file (1.22 kB). View file
|
|
venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Apia
ADDED
Binary file (598 Bytes). View file
|
|