diff --git a/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..bd40c79bd0942d3227e9bff1f6277ac3a4e7b441
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d64b94bf893224ff1789b692a88a7052307ab1a947134c2713501d2b119513fd
+size 33555612
diff --git a/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2cede7d446e78e8c6fd768b89f4f51c31c74f606
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d5ba02121cdc9ced8147ef6bc5084f2ea1e7e9ddcb3bb75514896fcdf78d0dd1
+size 33555533
diff --git a/ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3f931790dc1c56bb89cbb47a5cb8f214469114ba
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cb5380630c6eb51214baa0c22f53c8663bdabbeee5ec91aa1bddf01b366fc063
+size 9387
diff --git a/ckpts/universal/global_step20/zero/6.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/6.post_attention_layernorm.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a83f9f93e1de66a16a19398998ca2a2521f96d33
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/6.post_attention_layernorm.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d7f91ef94428e9dcc46ab62e13fa34130086a40dfa1388cb432205029a078fc0
+size 9372
diff --git a/ckpts/universal/global_step20/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6cb3d212548d6287858f10549c27fd933633097a
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8dcb79d73925597af47f3730c3fbebbab533108fb86d4e7741e572d1699a7ae
+size 9387
diff --git a/ckpts/universal/global_step20/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f891df52cca2448714dc437b90a5d4dc7a4e0ca7
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f97da87e4a3cd5212cf743dd0ade277cf4c1c6847eb0415ec868ebe426b46dc3
+size 33555627
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/aqua-rat.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/aqua-rat.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..babebf638edcf0e9c5a2432adb6a2fdaf4793c1d
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/aqua-rat.yaml
@@ -0,0 +1,24 @@
+group:
+ - agieval
+ - agieval_en
+ - agieval_nous
+task: agieval_aqua_rat
+dataset_path: hails/agieval-aqua-rat
+dataset_name: null
+output_type: multiple_choice
+training_split: null
+validation_split: null
+test_split: test
+doc_to_text: "{{query}}"
+doc_to_target: "{{gold}}"
+doc_to_choice: "{{choices}}"
+process_results: !function utils.process_results_mcqa
+metric_list:
+ - metric: acc
+ aggregation: mean
+ higher_is_better: true
+ - metric: acc_norm
+ aggregation: mean
+ higher_is_better: true
+metadata:
+ version: 1.0
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-biology.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-biology.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..36c44cbbeeb730f05c9d425c20f02c78acc81563
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-biology.yaml
@@ -0,0 +1,6 @@
+include: aqua-rat.yaml
+group:
+ - agieval
+ - agieval_cn
+task: agieval_gaokao_biology
+dataset_path: hails/agieval-gaokao-biology
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-chemistry.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-chemistry.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..69810122eb274cdcb285232330a19807886ee50d
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-chemistry.yaml
@@ -0,0 +1,6 @@
+include: aqua-rat.yaml
+group:
+ - agieval
+ - agieval_cn
+task: agieval_gaokao_chemistry
+dataset_path: hails/agieval-gaokao-chemistry
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-chinese.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-chinese.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..30d249b9d5544a3441e50284929aac6f081d6b76
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-chinese.yaml
@@ -0,0 +1,6 @@
+include: aqua-rat.yaml
+group:
+ - agieval
+ - agieval_cn
+task: agieval_gaokao_chinese
+dataset_path: hails/agieval-gaokao-chinese
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-english.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-english.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a540fcf25f503be64d3f5810be7b037a2e7c0504
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-english.yaml
@@ -0,0 +1,6 @@
+include: aqua-rat.yaml
+group:
+ - agieval
+ - agieval_en # categorizing as EN because the AGIEval codebase lists this as in `english_qa_tasks`
+task: agieval_gaokao_english
+dataset_path: hails/agieval-gaokao-english
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-history.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-history.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b9c9c630fa2c843da5c8311b1e0570bb1cc267f9
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-history.yaml
@@ -0,0 +1,6 @@
+include: aqua-rat.yaml
+group:
+ - agieval
+ - agieval_cn
+task: agieval_gaokao_history
+dataset_path: hails/agieval-gaokao-history
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-mathqa.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-mathqa.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..aa94e8eec85a931e5acbdb843730b58e8c1506e5
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-mathqa.yaml
@@ -0,0 +1,6 @@
+include: aqua-rat.yaml
+group:
+ - agieval
+ - agieval_cn
+task: agieval_gaokao_mathqa
+dataset_path: hails/agieval-gaokao-mathqa
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-physics.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-physics.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..175dd6cca03fab93107e0bab827ea356ceb127eb
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-physics.yaml
@@ -0,0 +1,6 @@
+include: aqua-rat.yaml
+group:
+ - agieval
+ - agieval_cn
+task: agieval_gaokao_physics
+dataset_path: hails/agieval-gaokao-physics
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/jec-qa-ca.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/jec-qa-ca.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f93b47a5b1418d839933b71e71b523fd38696691
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/jec-qa-ca.yaml
@@ -0,0 +1,6 @@
+include: aqua-rat.yaml
+group:
+ - agieval
+ - agieval_cn
+task: agieval_jec_qa_ca
+dataset_path: hails/agieval-jec-qa-ca
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/jec-qa-kd.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/jec-qa-kd.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0458eb7ea8356df569ac6c3b50af0bd4097ea857
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/jec-qa-kd.yaml
@@ -0,0 +1,6 @@
+include: aqua-rat.yaml
+group:
+ - agieval
+ - agieval_cn
+task: agieval_jec_qa_kd
+dataset_path: hails/agieval-jec-qa-kd
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/logiqa-en.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/logiqa-en.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7112418659c4478c4e59f9bdcdebb6d64e7b9bb6
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/logiqa-en.yaml
@@ -0,0 +1,7 @@
+include: aqua-rat.yaml
+group:
+ - agieval
+ - agieval_nous
+ - agieval_en
+task: agieval_logiqa_en
+dataset_path: hails/agieval-logiqa-en
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/lsat-ar.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/lsat-ar.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..302f9b519ee268831c1725fb96322d6628b9fdf9
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/lsat-ar.yaml
@@ -0,0 +1,7 @@
+include: aqua-rat.yaml
+group:
+ - agieval
+ - agieval_nous
+ - agieval_en
+task: agieval_lsat_ar
+dataset_path: hails/agieval-lsat-ar
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/lsat-rc.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/lsat-rc.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..de155af78aa8d5ad3b14849d8a2807a7194f6744
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/lsat-rc.yaml
@@ -0,0 +1,7 @@
+include: aqua-rat.yaml
+group:
+ - agieval
+ - agieval_nous
+ - agieval_en
+task: agieval_lsat_rc
+dataset_path: hails/agieval-lsat-rc
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/sat-en-without-passage.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/sat-en-without-passage.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..01490d9ee10aba867a1863e9d6a74b678f4f5588
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/sat-en-without-passage.yaml
@@ -0,0 +1,7 @@
+include: aqua-rat.yaml
+group:
+ - agieval
+ - agieval_nous
+ - agieval_en
+task: agieval_sat_en_without_passage
+dataset_path: hails/agieval-sat-en-without-passage
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/sat-math.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/sat-math.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f5b644ee062975dbdb74870428d71189e297343a
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/sat-math.yaml
@@ -0,0 +1,7 @@
+include: aqua-rat.yaml
+group:
+ - agieval
+ - agieval_nous
+ - agieval_en
+task: agieval_sat_math
+dataset_path: hails/agieval-sat-math
diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/utils.py b/lm-evaluation-harness/lm_eval/tasks/agieval/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa6e544f1a7e15e853b99be2fe01502baadefcee
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/agieval/utils.py
@@ -0,0 +1,274 @@
+# Answer parsing and normalization code, from
+# https://github.com/ruixiangcui/AGIEval/blob/main/src/
+# math_equivalence.py and post_process.py
+import re
+from typing import Dict, List
+
+import numpy as np
+
+
+def parse_math_answer(raw_string):
+ def remove_boxed(s):
+ left = "\\boxed{"
+ try:
+ assert s[: len(left)] == left
+ assert s[-1] == "}"
+ answer = s[len(left) : -1]
+ if "=" in answer:
+ answer = answer.split("=")[-1].lstrip(" ")
+ return answer
+ except Exception:
+ return None
+
+ def last_boxed_only_string(string):
+ idx = string.rfind("\\boxed")
+ if idx < 0:
+ idx = string.rfind("\\fbox")
+ if idx < 0:
+ return None
+ i = idx
+ right_brace_idx = None
+ num_left_braces_open = 0
+ while i < len(string):
+ if string[i] == "{":
+ num_left_braces_open += 1
+ if string[i] == "}":
+ num_left_braces_open -= 1
+ if num_left_braces_open == 0:
+ right_brace_idx = i
+ break
+ i += 1
+
+ if right_brace_idx is None:
+ retval = None
+ else:
+ retval = string[idx : right_brace_idx + 1]
+
+ return retval
+
+ def get_answer_with_dollar_sign(s):
+ first_pattern = "\$(.*)\$"
+ last_match = None
+ matches = re.findall(first_pattern, s)
+ if matches:
+ last_match = matches[-1]
+ if "=" in last_match:
+ last_match = last_match.split("=")[-1].lstrip(" ")
+ return last_match
+
+ def get_answer_without_dollar_sign(s):
+ last_match = None
+ if "=" in s:
+ last_match = s.split("=")[-1].lstrip(" ").rstrip(".")
+ if "\\n" in last_match:
+ last_match = last_match.split("\\n")[0]
+ else:
+ pattern = "(?:\\$)?\d+(?:\.\d+)?(?![\w\d])"
+ matches = re.findall(pattern, s)
+ if matches:
+ last_match = matches[-1]
+ return last_match
+
+ if "\\boxed" in raw_string:
+ answer = remove_boxed(last_boxed_only_string(raw_string))
+ else:
+ answer = get_answer_with_dollar_sign(raw_string)
+ if not answer:
+ answer = get_answer_without_dollar_sign(raw_string)
+ return answer
+
+
+# code from https://github.com/hendrycks/math/blob/main/modeling/math_equivalence.py
+def _fix_fracs(string):
+ substrs = string.split("\\frac")
+ new_str = substrs[0]
+ if len(substrs) > 1:
+ substrs = substrs[1:]
+ for substr in substrs:
+ new_str += "\\frac"
+ if substr[0] == "{":
+ new_str += substr
+ else:
+ try:
+ assert len(substr) >= 2
+ except Exception:
+ return string
+ a = substr[0]
+ b = substr[1]
+ if b != "{":
+ if len(substr) > 2:
+ post_substr = substr[2:]
+ new_str += "{" + a + "}{" + b + "}" + post_substr
+ else:
+ new_str += "{" + a + "}{" + b + "}"
+ else:
+ if len(substr) > 2:
+ post_substr = substr[2:]
+ new_str += "{" + a + "}" + b + post_substr
+ else:
+ new_str += "{" + a + "}" + b
+ string = new_str
+ return string
+
+
+def _fix_a_slash_b(string):
+ if len(string.split("/")) != 2:
+ return string
+ a = string.split("/")[0]
+ b = string.split("/")[1]
+ try:
+ a = int(a)
+ b = int(b)
+ assert string == "{}/{}".format(a, b)
+ new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
+ return new_string
+ except Exception:
+ return string
+
+
+def _remove_right_units(string):
+ # "\\text{ " only ever occurs (at least in the val set) when describing units
+ if "\\text{ " in string:
+ splits = string.split("\\text{ ")
+ assert len(splits) == 2
+ return splits[0]
+ else:
+ return string
+
+
+def _fix_sqrt(string):
+ if "\\sqrt" not in string:
+ return string
+ splits = string.split("\\sqrt")
+ new_string = splits[0]
+ for split in splits[1:]:
+ if split[0] != "{":
+ a = split[0]
+ new_substr = "\\sqrt{" + a + "}" + split[1:]
+ else:
+ new_substr = "\\sqrt" + split
+ new_string += new_substr
+ return new_string
+
+
+def _strip_string(string):
+ # linebreaks
+ string = string.replace("\n", "")
+ # print(string)
+
+ # remove inverse spaces
+ string = string.replace("\\!", "")
+ # print(string)
+
+ # replace \\ with \
+ string = string.replace("\\\\", "\\")
+ # print(string)
+
+ # replace tfrac and dfrac with frac
+ string = string.replace("tfrac", "frac")
+ string = string.replace("dfrac", "frac")
+ # print(string)
+
+ # remove \left and \right
+ string = string.replace("\\left", "")
+ string = string.replace("\\right", "")
+ # print(string)
+
+ # Remove circ (degrees)
+ string = string.replace("^{\\circ}", "")
+ string = string.replace("^\\circ", "")
+
+ # remove dollar signs
+ string = string.replace("\\$", "")
+
+ # remove units (on the right)
+ string = _remove_right_units(string)
+
+ # remove percentage
+ string = string.replace("\\%", "")
+ string = string.replace("\%", "")
+
+ # " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string
+ string = string.replace(" .", " 0.")
+ string = string.replace("{.", "{0.")
+ # if empty, return empty string
+ if len(string) == 0:
+ return string
+ if string[0] == ".":
+ string = "0" + string
+
+ # to consider: get rid of e.g. "k = " or "q = " at beginning
+ if len(string.split("=")) == 2:
+ if len(string.split("=")[0]) <= 2:
+ string = string.split("=")[1]
+
+ # fix sqrt3 --> sqrt{3}
+ string = _fix_sqrt(string)
+
+ # remove spaces
+ string = string.replace(" ", "")
+
+ # \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). Also does a/b --> \\frac{a}{b}
+ string = _fix_fracs(string)
+
+ # manually change 0.5 --> \frac{1}{2}
+ if string == "0.5":
+ string = "\\frac{1}{2}"
+
+ # NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y
+ string = _fix_a_slash_b(string)
+
+ return string
+
+
+def is_equiv(str1, str2, verbose=False):
+ if str1 is None and str2 is None:
+ print("WARNING: Both None")
+ return True
+ if str1 is None or str2 is None:
+ return False
+
+ str1, str2 = parse_math_answer(str1), parse_math_answer(str2)
+
+ try:
+ ss1 = _strip_string(str1)
+ ss2 = _strip_string(str2)
+ if verbose:
+ print(ss1, ss2)
+ return ss1 == ss2
+ except Exception:
+ return str1 == str2
+
+
+def process_results(doc: dict, results: List[str]) -> Dict[str, int]:
+ candidate = results[0]
+
+ gold = doc["answer"]
+
+ if not gold:
+ print(doc, candidate, gold)
+ if is_equiv(candidate, gold):
+ retval = 1
+ else:
+ retval = 0
+
+ results = {
+ "acc": retval,
+ }
+ return results
+
+
+# use a custom process_results() function, because AGIEval can have multiple valid answers
+def process_results_mcqa(doc, results):
+ results = [result[0] for result in results]
+
+ gold = doc["gold"]
+
+ acc = 1.0 if int(np.argmax(results)) in gold else 0.0
+ completion_len = np.array([float(len(i)) for i in doc["choices"]])
+ acc_norm = 1.0 if int(np.argmax(results / completion_len)) in gold else 0.0
+
+ return {
+ "acc": acc,
+ "acc_norm": acc_norm,
+ }
diff --git a/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_as.yaml b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_as.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d521eeb35ff3a92b05b4c8b171bdab3dcb9a77ab
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_as.yaml
@@ -0,0 +1,33 @@
+# Tass file will be included in the generated language-specific task configs.
+# It doesn't have a yaml file extension as it is not meant to be imported directly
+# by the harness.
+group: ai4bharat/IndicCOPA
+dataset_path: ai4bharat/IndicCOPA
+dataset_name: translation-as
+output_type: multiple_choice
+# training_split: train
+# validation_split: validation
+test_split: test
+# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
+# doc_to_target: label
+# doc_to_choice: "{{choice1}}{{choice2}}"
+# metric_list:
+# - metric: acc
+# aggregation: mean
+# asgher_is_better: true
+# metadata:
+# version: 1.0
+
+doc_to_text: !function utils.doc_to_text_as
+doc_to_target: label
+doc_to_choice: !function utils.doc_to_choice
+metric_list:
+ - metric: acc
+metadata:
+ version: 1.0
+
+
+# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
+# सही? नहीं, "+hypothesis]}}'
+# doc_to_text: ''
+task: indiccopa-as
diff --git a/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_gu.yaml b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_gu.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..82ed98b7539db34d4f91e60f3c35fc1fedce4c04
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_gu.yaml
@@ -0,0 +1,33 @@
+# Tgus file will be included in the generated language-specific task configs.
+# It doesn't have a yaml file extension as it is not meant to be imported directly
+# by the harness.
+group: ai4bharat/IndicCOPA
+dataset_path: ai4bharat/IndicCOPA
+dataset_name: translation-gu
+output_type: multiple_choice
+# training_split: train
+# validation_split: validation
+test_split: test
+# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
+# doc_to_target: label
+# doc_to_choice: "{{choice1}}{{choice2}}"
+# metric_list:
+# - metric: acc
+# aggregation: mean
+# gugher_is_better: true
+# metadata:
+# version: 1.0
+
+doc_to_text: !function utils.doc_to_text_gu
+doc_to_target: label
+doc_to_choice: !function utils.doc_to_choice
+metric_list:
+ - metric: acc
+metadata:
+ version: 1.0
+
+
+# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
+# सही? नहीं, "+hypothesis]}}'
+# doc_to_text: ''
+task: indiccopa-gu
diff --git a/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_kn.yaml b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_kn.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..281b9500877fe5d874f858d9fdf290b631123a33
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_kn.yaml
@@ -0,0 +1,33 @@
+# Tkns file will be included in the generated language-specific task configs.
+# It doesn't have a yaml file extension as it is not meant to be imported directly
+# by the harness.
+group: ai4bharat/IndicCOPA
+dataset_path: ai4bharat/IndicCOPA
+dataset_name: translation-kn
+output_type: multiple_choice
+# training_split: train
+# validation_split: validation
+test_split: test
+# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
+# doc_to_target: label
+# doc_to_choice: "{{choice1}}{{choice2}}"
+# metric_list:
+# - metric: acc
+# aggregation: mean
+# kngher_is_better: true
+# metadata:
+# version: 1.0
+
+doc_to_text: !function utils.doc_to_text_kn
+doc_to_target: label
+doc_to_choice: !function utils.doc_to_choice
+metric_list:
+ - metric: acc
+metadata:
+ version: 1.0
+
+
+# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
+# सही? नहीं, "+hypothesis]}}'
+# doc_to_text: ''
+task: indiccopa-kn
diff --git a/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ml.yaml b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ml.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e8f250dc5c4e68d821ee6524089415649cbd517
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ml.yaml
@@ -0,0 +1,33 @@
+# Tmls file will be included in the generated language-specific task configs.
+# It doesn't have a yaml file extension as it is not meant to be imported directly
+# by the harness.
+group: ai4bharat/IndicCOPA
+dataset_path: ai4bharat/IndicCOPA
+dataset_name: translation-ml
+output_type: multiple_choice
+# training_split: train
+# validation_split: validation
+test_split: test
+# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
+# doc_to_target: label
+# doc_to_choice: "{{choice1}}{{choice2}}"
+# metric_list:
+# - metric: acc
+# aggregation: mean
+# mlgher_is_better: true
+# metadata:
+# version: 1.0
+
+doc_to_text: !function utils.doc_to_text_ml
+doc_to_target: label
+doc_to_choice: !function utils.doc_to_choice
+metric_list:
+ - metric: acc
+metadata:
+ version: 1.0
+
+
+# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
+# सही? नहीं, "+hypothesis]}}'
+# doc_to_text: ''
+task: indiccopa-ml
diff --git a/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ne.yaml b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ne.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6479636ab29d4ed3f1b58f5765625917a4e1d13d
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ne.yaml
@@ -0,0 +1,33 @@
+# Tnes file will be included in the generated language-specific task configs.
+# It doesn't have a yaml file extension as it is not meant to be imported directly
+# by the harness.
+group: ai4bharat/IndicCOPA
+dataset_path: ai4bharat/IndicCOPA
+dataset_name: translation-ne
+output_type: multiple_choice
+# training_split: train
+# validation_split: validation
+test_split: test
+# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
+# doc_to_target: label
+# doc_to_choice: "{{choice1}}{{choice2}}"
+# metric_list:
+# - metric: acc
+# aggregation: mean
+# negher_is_better: true
+# metadata:
+# version: 1.0
+
+doc_to_text: !function utils.doc_to_text_ne
+doc_to_target: label
+doc_to_choice: !function utils.doc_to_choice
+metric_list:
+ - metric: acc
+metadata:
+ version: 1.0
+
+
+# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
+# सही? नहीं, "+hypothesis]}}'
+# doc_to_text: ''
+task: indiccopa-ne
diff --git a/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_sa.yaml b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_sa.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..228964a1575f554b50ff3efdb456495dd9d644e9
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_sa.yaml
@@ -0,0 +1,33 @@
+# Tsas file will be included in the generated language-specific task configs.
+# It doesn't have a yaml file extension as it is not meant to be imported directly
+# by the harness.
+group: ai4bharat/IndicCOPA
+dataset_path: ai4bharat/IndicCOPA
+dataset_name: translation-sa
+output_type: multiple_choice
+# training_split: train
+# validation_split: validation
+test_split: test
+# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
+# doc_to_target: label
+# doc_to_choice: "{{choice1}}{{choice2}}"
+# metric_list:
+# - metric: acc
+# aggregation: mean
+# sagher_is_better: true
+# metadata:
+# version: 1.0
+
+doc_to_text: !function utils.doc_to_text_sa
+doc_to_target: label
+doc_to_choice: !function utils.doc_to_choice
+metric_list:
+ - metric: acc
+metadata:
+ version: 1.0
+
+
+# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
+# सही? नहीं, "+hypothesis]}}'
+# doc_to_text: ''
+task: indiccopa-sa
diff --git a/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_sd.yaml b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_sd.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..107fd93623982a2bf197260f74d9d96ffc83b53d
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_sd.yaml
@@ -0,0 +1,33 @@
+# Tsds file will be included in the generated language-specific task configs.
+# It doesn't have a yaml file extension as it is not meant to be imported directly
+# by the harness.
+group: ai4bharat/IndicCOPA
+dataset_path: ai4bharat/IndicCOPA
+dataset_name: translation-sd
+output_type: multiple_choice
+# training_split: train
+# validation_split: validation
+test_split: test
+# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
+# doc_to_target: label
+# doc_to_choice: "{{choice1}}{{choice2}}"
+# metric_list:
+# - metric: acc
+# aggregation: mean
+# sdgher_is_better: true
+# metadata:
+# version: 1.0
+
+doc_to_text: !function utils.doc_to_text_sd
+doc_to_target: label
+doc_to_choice: !function utils.doc_to_choice
+metric_list:
+ - metric: acc
+metadata:
+ version: 1.0
+
+
+# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
+# सही? नहीं, "+hypothesis]}}'
+# doc_to_text: ''
+task: indiccopa-sd
diff --git a/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ur.yaml b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ur.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..65f314815ae7ec3e63267b5ec85a58888f4345ac
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ur.yaml
@@ -0,0 +1,33 @@
+# Turs file will be included in the generated language-specific task configs.
+# It doesn't have a yaml file extension as it is not meant to be imported directly
+# by the harness.
+group: ai4bharat/IndicCOPA
+dataset_path: ai4bharat/IndicCOPA
+dataset_name: translation-ur
+output_type: multiple_choice
+# training_split: train
+# validation_split: validation
+test_split: test
+# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
+# doc_to_target: label
+# doc_to_choice: "{{choice1}}{{choice2}}"
+# metric_list:
+# - metric: acc
+# aggregation: mean
+# urgher_is_better: true
+# metadata:
+# version: 1.0
+
+doc_to_text: !function utils.doc_to_text_ur
+doc_to_target: label
+doc_to_choice: !function utils.doc_to_choice
+metric_list:
+ - metric: acc
+metadata:
+ version: 1.0
+
+
+# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
+# सही? नहीं, "+hypothesis]}}'
+# doc_to_text: ''
+task: indiccopa-ur
diff --git a/lm-evaluation-harness/lm_eval/tasks/indiccopa/utils.py b/lm-evaluation-harness/lm_eval/tasks/indiccopa/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..37b6471997252fc1fe1b128730a55b87ffdd2d1c
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/indiccopa/utils.py
@@ -0,0 +1,136 @@
+from functools import partial
+
+
+def convert_choice(choice):
+ return choice
+
+
+def doc_to_text(doc, connector):
+ # Drop the period
+ conn = connector[doc["question"]]
+ return doc["premise"].strip()[:-1] + f" {conn}"
+
+
+def doc_to_choice(doc):
+ return [convert_choice(doc["choice1"]), convert_choice(doc["choice2"])]
+
+
+doc_to_text_hi = partial(
+ doc_to_text,
+ connector={
+ "cause": "कारण",
+ "effect": "परिणाम",
+ },
+)
+
+doc_to_text_mr = partial(
+ doc_to_text,
+ connector={
+ "cause": "कारण",
+ "effect": "परिणाम",
+ },
+)
+
+doc_to_text_as = partial(
+ doc_to_text,
+ connector={
+ "cause": "কাৰণ",
+ "effect": "প্ৰভাৱ",
+ },
+)
+
+doc_to_text_bn = partial(
+ doc_to_text,
+ connector={
+ "cause": "কারণ",
+ "effect": "প্রভাব",
+ },
+)
+
+doc_to_text_gu = partial(
+ doc_to_text,
+ connector={
+ "cause": "કારણ",
+ "effect": "અસર",
+ },
+)
+
+doc_to_text_kn = partial(
+ doc_to_text,
+ connector={
+ "cause": "ಕಾರಣ",
+ "effect": "ಪರಿಣಾಮ",
+ },
+)
+
+doc_to_text_mai = partial(
+ doc_to_text,
+ connector={
+ "cause": "कारण",
+ "effect": "प्रभाव",
+ },
+)
+
+doc_to_text_ml = partial(
+ doc_to_text,
+ connector={
+ "cause": "കാരണമാകുന്നു",
+ "effect": "ഫലം",
+ },
+)
+
+doc_to_text_ne = partial(
+ doc_to_text,
+ connector={
+ "cause": "कारण",
+ "effect": "असर",
+ },
+)
+
+doc_to_text_or = partial(
+ doc_to_text,
+ connector={
+ "cause": "କାରଣ",
+ "effect": "ପ୍ରଭାବ",
+ },
+)
+
+doc_to_text_sa = partial(
+ doc_to_text,
+ connector={
+ "cause": "निमित्तम्",
+ "effect": "परिणाम",
+ },
+)
+
+doc_to_text_sd = partial(
+ doc_to_text,
+ connector={
+ "cause": "سبب",
+ "effect": "اثر",
+ },
+)
+
+doc_to_text_ta = partial(
+ doc_to_text,
+ connector={
+ "cause": "காரணம்",
+ "effect": "விளைவு",
+ },
+)
+
+doc_to_text_te = partial(
+ doc_to_text,
+ connector={
+ "cause": "కారణం",
+ "effect": "ప్రభావం",
+ },
+)
+
+doc_to_text_ur = partial(
+ doc_to_text,
+ connector={
+ "cause": "وجہ",
+ "effect": "اثر",
+ },
+)
diff --git a/venv/lib/python3.10/site-packages/datasets/combine.py b/venv/lib/python3.10/site-packages/datasets/combine.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2aad87f0cc9278626d0be5111f91b6de49ef935
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/combine.py
@@ -0,0 +1,215 @@
+from typing import List, Optional, TypeVar
+
+from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
+from .dataset_dict import DatasetDict, IterableDatasetDict
+from .info import DatasetInfo
+from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
+from .splits import NamedSplit
+from .utils import logging
+from .utils.py_utils import Literal
+
+
+logger = logging.get_logger(__name__)
+
+
+DatasetType = TypeVar("DatasetType", Dataset, IterableDataset)
+
+
+def interleave_datasets(
+ datasets: List[DatasetType],
+ probabilities: Optional[List[float]] = None,
+ seed: Optional[int] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+) -> DatasetType:
+ """
+ Interleave several datasets (sources) into a single dataset.
+ The new dataset is constructed by alternating between the sources to get the examples.
+
+ You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects.
+
+ - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples.
+ - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities.
+
+ The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`,
+ in which case, the resulting dataset ends when all datasets have ran out of examples at least one time.
+
+ Note for iterable datasets:
+
+ In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process.
+ Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker).
+
+ Args:
+ datasets (`List[Dataset]` or `List[IterableDataset]`):
+ List of datasets to interleave.
+ probabilities (`List[float]`, *optional*, defaults to `None`):
+ If specified, the new dataset is constructed by sampling
+ examples from one source at a time according to these probabilities.
+ seed (`int`, *optional*, defaults to `None`):
+ The random seed used to choose a source for each example.
+ info ([`DatasetInfo`], *optional*):
+ Dataset information, like description, citation, etc.
+
+ split ([`NamedSplit`], *optional*):
+ Name of the dataset split.
+
+ stopping_strategy (`str`, defaults to `first_exhausted`):
+ Two strategies are proposed right now, `first_exhausted` and `all_exhausted`.
+ By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
+ If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
+ Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
+ - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples.
+ - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
+ Returns:
+ [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets`
+ parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of
+ `IterableDataset`.
+
+ Example:
+
+ For regular datasets (map-style):
+
+ ```python
+ >>> from datasets import Dataset, interleave_datasets
+ >>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
+ >>> d2 = Dataset.from_dict({"a": [10, 11, 12]})
+ >>> d3 = Dataset.from_dict({"a": [20, 21, 22]})
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
+ >>> dataset["a"]
+ [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22]
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
+ >>> dataset["a"]
+ [10, 0, 11, 1, 2]
+ >>> dataset = interleave_datasets([d1, d2, d3])
+ >>> dataset["a"]
+ [0, 10, 20, 1, 11, 21, 2, 12, 22]
+ >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
+ >>> dataset["a"]
+ [0, 10, 20, 1, 11, 21, 2, 12, 22]
+ >>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
+ >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
+ >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]})
+ >>> dataset = interleave_datasets([d1, d2, d3])
+ >>> dataset["a"]
+ [0, 10, 20, 1, 11, 21, 2, 12, 22]
+ >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
+ >>> dataset["a"]
+ [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24]
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
+ >>> dataset["a"]
+ [10, 0, 11, 1, 2]
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
+ >>> dataset["a"]
+ [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24]
+ For datasets in streaming mode (iterable):
+
+ >>> from datasets import load_dataset, interleave_datasets
+ >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True)
+ >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True)
+ >>> dataset = interleave_datasets([d1, d2])
+ >>> iterator = iter(dataset)
+ >>> next(iterator)
+ {'text': 'Mtendere Village was inspired by the vision...}
+ >>> next(iterator)
+ {'text': "Média de débat d'idées, de culture...}
+ ```
+ """
+ from .arrow_dataset import Dataset
+ from .iterable_dataset import IterableDataset
+
+ if not datasets:
+ raise ValueError("Unable to interleave an empty list of datasets.")
+ for i, dataset in enumerate(datasets):
+ if not isinstance(dataset, (Dataset, IterableDataset)):
+ if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
+ if not dataset:
+ raise ValueError(
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
+ "is an empty dataset dictionary."
+ )
+ raise ValueError(
+ f"Dataset at position {i} has at least one split: {list(dataset)}\n"
+ f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']"
+ )
+ raise ValueError(
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}."
+ )
+ if i == 0:
+ dataset_type, other_type = (
+ (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset)
+ )
+ elif not isinstance(dataset, dataset_type):
+ raise ValueError(
+ f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects."
+ )
+ if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
+ raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy.")
+ if dataset_type is Dataset:
+ return _interleave_map_style_datasets(
+ datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy
+ )
+ else:
+ return _interleave_iterable_datasets(
+ datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy
+ )
+
+
+def concatenate_datasets(
+ dsets: List[DatasetType],
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ axis: int = 0,
+) -> DatasetType:
+ """
+ Converts a list of [`Dataset`] with the same schema into a single [`Dataset`].
+
+ Args:
+ dsets (`List[datasets.Dataset]`):
+ List of Datasets to concatenate.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ axis (`{0, 1}`, defaults to `0`):
+ Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
+ (horizontally).
+
+
+
+ Example:
+
+ ```py
+ >>> ds3 = concatenate_datasets([ds1, ds2])
+ ```
+ """
+
+ if not dsets:
+ raise ValueError("Unable to concatenate an empty list of datasets.")
+ for i, dataset in enumerate(dsets):
+ if not isinstance(dataset, (Dataset, IterableDataset)):
+ if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
+ if not dataset:
+ raise ValueError(
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
+ "is an empty dataset dictionary."
+ )
+ raise ValueError(
+ f"Dataset at position {i} has at least one split: {list(dataset)}\n"
+ f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']"
+ )
+ raise ValueError(
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}."
+ )
+ if i == 0:
+ dataset_type, other_type = (
+ (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset)
+ )
+ elif not isinstance(dataset, dataset_type):
+ raise ValueError(
+ f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects."
+ )
+ if dataset_type is Dataset:
+ return _concatenate_map_style_datasets(dsets, info=info, split=split, axis=axis)
+ else:
+ return _concatenate_iterable_datasets(dsets, info=info, split=split, axis=axis)
diff --git a/venv/lib/python3.10/site-packages/datasets/dataset_dict.py b/venv/lib/python3.10/site-packages/datasets/dataset_dict.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ddbbb21648e8e316401415284b8fc70a8a9b03c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/dataset_dict.py
@@ -0,0 +1,2293 @@
+import contextlib
+import copy
+import fnmatch
+import json
+import math
+import posixpath
+import re
+import warnings
+from io import BytesIO
+from pathlib import Path
+from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
+
+import fsspec
+import numpy as np
+from fsspec.core import url_to_fs
+from huggingface_hub import (
+ CommitInfo,
+ CommitOperationAdd,
+ CommitOperationDelete,
+ DatasetCard,
+ DatasetCardData,
+ HfApi,
+)
+from huggingface_hub.hf_api import RepoFile
+
+from . import config
+from .arrow_dataset import PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED, Dataset
+from .features import Features
+from .features.features import FeatureType
+from .info import DatasetInfo, DatasetInfosDict
+from .naming import _split_re
+from .splits import NamedSplit, Split, SplitDict, SplitInfo
+from .table import Table
+from .tasks import TaskTemplate
+from .utils import logging
+from .utils.deprecation_utils import deprecated
+from .utils.doc_utils import is_documented_by
+from .utils.metadata import MetadataConfigs
+from .utils.py_utils import asdict, glob_pattern_to_regex, string_to_dict
+from .utils.typing import PathLike
+
+
+logger = logging.get_logger(__name__)
+
+
+class DatasetDict(dict):
+ """A dictionary (dict of str: datasets.Dataset) with dataset transforms methods (map, filter, etc.)"""
+
+ def _check_values_type(self):
+ for dataset in self.values():
+ if not isinstance(dataset, Dataset):
+ raise TypeError(f"Values in `DatasetDict` should be of type `Dataset` but got type '{type(dataset)}'")
+
+ def _check_values_features(self):
+ items = list(self.items())
+ for item_a, item_b in zip(items[:-1], items[1:]):
+ if item_a[1].features != item_b[1].features:
+ raise ValueError(
+ f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}"
+ )
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables
+ for dataset in self.values():
+ if hasattr(dataset, "_data"):
+ del dataset._data
+ if hasattr(dataset, "_indices"):
+ del dataset._indices
+
+ def __getitem__(self, k) -> Dataset:
+ if isinstance(k, (str, NamedSplit)) or len(self) == 0:
+ return super().__getitem__(k)
+ else:
+ available_suggested_splits = [
+ split for split in (Split.TRAIN, Split.TEST, Split.VALIDATION) if split in self
+ ]
+ suggested_split = available_suggested_splits[0] if available_suggested_splits else list(self)[0]
+ raise KeyError(
+ f"Invalid key: {k}. Please first select a split. For example: "
+ f"`my_dataset_dictionary['{suggested_split}'][{k}]`. "
+ f"Available splits: {sorted(self)}"
+ )
+
+ @property
+ def data(self) -> Dict[str, Table]:
+ """The Apache Arrow tables backing each split.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.data
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.data for k, dataset in self.items()}
+
+ @property
+ def cache_files(self) -> Dict[str, Dict]:
+ """The cache files containing the Apache Arrow table backing each split.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.cache_files
+ {'test': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-test.arrow'}],
+ 'train': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-train.arrow'}],
+ 'validation': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.cache_files for k, dataset in self.items()}
+
+ @property
+ def num_columns(self) -> Dict[str, int]:
+ """Number of columns in each split of the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.num_columns
+ {'test': 2, 'train': 2, 'validation': 2}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.num_columns for k, dataset in self.items()}
+
+ @property
+ def num_rows(self) -> Dict[str, int]:
+ """Number of rows in each split of the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.num_rows
+ {'test': 1066, 'train': 8530, 'validation': 1066}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.num_rows for k, dataset in self.items()}
+
+ @property
+ def column_names(self) -> Dict[str, List[str]]:
+ """Names of the columns in each split of the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.column_names
+ {'test': ['text', 'label'],
+ 'train': ['text', 'label'],
+ 'validation': ['text', 'label']}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.column_names for k, dataset in self.items()}
+
+ @property
+ def shape(self) -> Dict[str, Tuple[int]]:
+ """Shape of each split of the dataset (number of columns, number of rows).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.shape
+ {'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.shape for k, dataset in self.items()}
+
+ def flatten(self, max_depth=16) -> "DatasetDict":
+ """Flatten the Apache Arrow Table of each split (nested features are flatten).
+ Each column with a struct type is flattened into one column per struct field.
+ Other columns are left unchanged.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("squad")
+ >>> ds["train"].features
+ {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
+ 'context': Value(dtype='string', id=None),
+ 'id': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None),
+ 'title': Value(dtype='string', id=None)}
+ >>> ds.flatten()
+ DatasetDict({
+ train: Dataset({
+ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
+ num_rows: 87599
+ })
+ validation: Dataset({
+ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
+ num_rows: 10570
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.flatten(max_depth=max_depth) for k, dataset in self.items()})
+
+ def unique(self, column: str) -> Dict[str, List]:
+ """Return a list of the unique elements in a column for each split.
+
+ This is implemented in the low-level backend and as such, very fast.
+
+ Args:
+ column (`str`):
+ column name (list all the column names with [`~datasets.DatasetDict.column_names`])
+
+ Returns:
+ Dict[`str`, `list`]: Dictionary of unique elements in the given column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.unique("label")
+ {'test': [1, 0], 'train': [1, 0], 'validation': [1, 0]}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.unique(column) for k, dataset in self.items()}
+
+ def cleanup_cache_files(self) -> Dict[str, int]:
+ """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one.
+ Be careful when running this command that no other process is currently using other cache files.
+
+ Return:
+ `Dict` with the number of removed files for each split
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.cleanup_cache_files()
+ {'test': 0, 'train': 0, 'validation': 0}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.cleanup_cache_files() for k, dataset in self.items()}
+
+ def __repr__(self):
+ repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
+ repr = re.sub(r"^", " " * 4, repr, 0, re.M)
+ return f"DatasetDict({{\n{repr}\n}})"
+
+ def cast(self, features: Features) -> "DatasetDict":
+ """
+ Cast the dataset to a new set of features.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ features ([`Features`]):
+ New features to cast the dataset to.
+ The name and order of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~DatasetDict.map`] to update the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> new_features = ds["train"].features.copy()
+ >>> new_features['label'] = ClassLabel(names=['bad', 'good'])
+ >>> new_features['text'] = Value('large_string')
+ >>> ds = ds.cast(new_features)
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='large_string', id=None)}
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
+
+ def cast_column(self, column: str, feature) -> "DatasetDict":
+ """Cast column to feature for decoding.
+
+ Args:
+ column (`str`):
+ Column name.
+ feature ([`Feature`]):
+ Target feature.
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()})
+
+ def remove_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict":
+ """
+ Remove one or several column(s) from each split in the dataset
+ and the features associated to the column(s).
+
+ The transformation is applied to all the splits of the dataset dictionary.
+
+ You can also remove a column using [`~DatasetDict.map`] with `remove_columns` but the present method
+ doesn't copy the data of the remaining columns and is thus faster.
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to remove.
+
+ Returns:
+ [`DatasetDict`]: A copy of the dataset object without the columns to remove.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds = ds.remove_columns("label")
+ DatasetDict({
+ train: Dataset({
+ features: ['text'],
+ num_rows: 8530
+ })
+ validation: Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ test: Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.remove_columns(column_names=column_names) for k, dataset in self.items()})
+
+ def rename_column(self, original_column_name: str, new_column_name: str) -> "DatasetDict":
+ """
+ Rename a column in the dataset and move the features associated to the original column under the new column name.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ You can also rename a column using [`~DatasetDict.map`] with `remove_columns` but the present method:
+ - takes care of moving the original features under the new column name.
+ - doesn't copy the data to a new dataset and is thus much faster.
+
+ Args:
+ original_column_name (`str`):
+ Name of the column to rename.
+ new_column_name (`str`):
+ New name for the column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds = ds.rename_column("label", "label_new")
+ DatasetDict({
+ train: Dataset({
+ features: ['text', 'label_new'],
+ num_rows: 8530
+ })
+ validation: Dataset({
+ features: ['text', 'label_new'],
+ num_rows: 1066
+ })
+ test: Dataset({
+ features: ['text', 'label_new'],
+ num_rows: 1066
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict(
+ {
+ k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
+ for k, dataset in self.items()
+ }
+ )
+
+ def rename_columns(self, column_mapping: Dict[str, str]) -> "DatasetDict":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ column_mapping (`Dict[str, str]`):
+ A mapping of columns to rename to their new names.
+
+ Returns:
+ [`DatasetDict`]: A copy of the dataset with renamed columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
+ DatasetDict({
+ train: Dataset({
+ features: ['text_new', 'label_new'],
+ num_rows: 8530
+ })
+ validation: Dataset({
+ features: ['text_new', 'label_new'],
+ num_rows: 1066
+ })
+ test: Dataset({
+ features: ['text_new', 'label_new'],
+ num_rows: 1066
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()})
+
+ def select_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict":
+ """Select one or several column(s) from each split in the dataset and
+ the features associated to the column(s).
+
+ The transformation is applied to all the splits of the dataset
+ dictionary.
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to keep.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.select_columns("text")
+ DatasetDict({
+ train: Dataset({
+ features: ['text'],
+ num_rows: 8530
+ })
+ validation: Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ test: Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.select_columns(column_names=column_names) for k, dataset in self.items()})
+
+ def class_encode_column(self, column: str, include_nulls: bool = False) -> "DatasetDict":
+ """Casts the given column as [`~datasets.features.ClassLabel`] and updates the tables.
+
+ Args:
+ column (`str`):
+ The name of the column to cast.
+ include_nulls (`bool`, defaults to `False`):
+ Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.
+
+
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("boolq")
+ >>> ds["train"].features
+ {'answer': Value(dtype='bool', id=None),
+ 'passage': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None)}
+ >>> ds = ds.class_encode_column("answer")
+ >>> ds["train"].features
+ {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None),
+ 'passage': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None)}
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict(
+ {k: dataset.class_encode_column(column=column, include_nulls=include_nulls) for k, dataset in self.items()}
+ )
+
+ @contextlib.contextmanager
+ def formatted_as(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """To be used in a `with` statement. Set `__getitem__` return format (type and columns).
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ type (`str`, *optional*):
+ Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to False):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+ """
+ self._check_values_type()
+ old_format_type = {k: dataset._format_type for k, dataset in self.items()}
+ old_format_kwargs = {k: dataset._format_kwargs for k, dataset in self.items()}
+ old_format_columns = {k: dataset._format_columns for k, dataset in self.items()}
+ old_output_all_columns = {k: dataset._output_all_columns for k, dataset in self.items()}
+ try:
+ self.set_format(type, columns, output_all_columns, **format_kwargs)
+ yield
+ finally:
+ for k, dataset in self.items():
+ dataset.set_format(
+ old_format_type[k], old_format_columns[k], old_output_all_columns[k], **old_format_kwargs[k]
+ )
+
+ def set_format(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """Set `__getitem__` return format (type and columns).
+ The format is set for every dataset in the dataset dictionary.
+
+ Args:
+ type (`str`, *optional*):
+ Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to False):
+ Keep un-formatted columns as well in the output (as python objects),
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+
+ It is possible to call `map` after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
+ gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted:
+
+ `new formatted columns = (all columns - previously unformatted columns)`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
+ >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds["train"].format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'numpy'}
+ ```
+ """
+ self._check_values_type()
+ for dataset in self.values():
+ dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
+
+ def reset_format(self):
+ """Reset `__getitem__` return format to python objects and all columns.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Same as `self.set_format()`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
+ >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds["train"].format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'numpy'}
+ >>> ds.reset_format()
+ >>> ds["train"].format
+ {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': None}
+ ```
+ """
+ self._check_values_type()
+ for dataset in self.values():
+ dataset.set_format()
+
+ def set_transform(
+ self,
+ transform: Optional[Callable],
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ ):
+ """Set ``__getitem__`` return format using this transform. The transform is applied on-the-fly on batches when ``__getitem__`` is called.
+ The transform is set for every dataset in the dataset dictionary
+ As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format`
+
+ Args:
+ transform (`Callable`, optional): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format`
+ A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
+ This function is applied right before returning the objects in ``__getitem__``.
+ columns (`List[str]`, optional): columns to format in the output
+ If specified, then the input batch of the transform only contains those columns.
+ output_all_columns (`bool`, default to False): keep un-formatted columns as well in the output (as python objects)
+ If set to True, then the other un-formatted columns are kept with the output of the transform.
+
+ """
+ self._check_values_type()
+ for dataset in self.values():
+ dataset.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
+
+ def with_format(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ) -> "DatasetDict":
+ """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
+ The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
+ The format is set for every dataset in the dataset dictionary.
+
+ It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].
+
+ Contrary to [`~datasets.DatasetDict.set_format`], `with_format` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
+
+ Args:
+ type (`str`, *optional*):
+ Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds["train"].format
+ {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': None}
+ >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds["train"].format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'tensorflow'}
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
+ return dataset
+
+ def with_transform(
+ self,
+ transform: Optional[Callable],
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ ) -> "DatasetDict":
+ """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
+ The transform is set for every dataset in the dataset dictionary
+
+ As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
+
+ Contrary to [`~datasets.DatasetDict.set_transform`], `with_transform` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
+
+ Args:
+ transform (`Callable`, *optional*):
+ User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
+ A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
+ This function is applied right before returning the objects in `__getitem__`.
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ If specified, then the input batch of the transform only contains those columns.
+ output_all_columns (`bool`, defaults to False):
+ Keep un-formatted columns as well in the output (as python objects).
+ If set to `True`, then the other un-formatted columns are kept with the output of the transform.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> def encode(example):
+ ... return tokenizer(example['text'], truncation=True, padding=True, return_tensors="pt")
+ >>> ds = ds.with_transform(encode)
+ >>> ds["train"][0]
+ {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
+ 'input_ids': tensor([ 101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432,
+ 112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119,
+ 112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190,
+ 170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117,
+ 179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137,
+ 188, 1566, 7912, 14516, 6997, 119, 102]),
+ 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0])}
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)
+ return dataset
+
+ def map(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[Union[str, List[str]]] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ fn_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ desc: Optional[str] = None,
+ ) -> "DatasetDict":
+ """Apply a function to all the elements in the table (individually or in batches)
+ and update the table (if function does updated examples).
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ function (`callable`): with one of the following signature:
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
+ - `function(example: Dict[str, Any], indices: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
+ - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`,
+ `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
+ drop_last_batch (`bool`, defaults to `False`):
+ Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
+ Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, default `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ features (`[datasets.Features]`, *optional*, defaults to `None`):
+ Use a specific [`Features`] to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`):
+ Disallow null values in the table.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing.
+ desc (`str`, *optional*, defaults to `None`):
+ Meaningful description to be displayed alongside with the progress bar while mapping examples.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> def add_prefix(example):
+ ... example["text"] = "Review: " + example["text"]
+ ... return example
+ >>> ds = ds.map(add_prefix)
+ >>> ds["train"][0:3]["text"]
+ ['Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
+ 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .',
+ 'Review: effective but too-tepid biopic']
+
+ # process a batch of examples
+ >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
+ # set number of processors
+ >>> ds = ds.map(add_prefix, num_proc=4)
+ ```
+ """
+ self._check_values_type()
+ if cache_file_names is None:
+ cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.map(
+ function=function,
+ with_indices=with_indices,
+ with_rank=with_rank,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ drop_last_batch=drop_last_batch,
+ remove_columns=remove_columns,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ cache_file_name=cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ features=features,
+ disable_nullable=disable_nullable,
+ fn_kwargs=fn_kwargs,
+ num_proc=num_proc,
+ desc=desc,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ desc: Optional[str] = None,
+ ) -> "DatasetDict":
+ """Apply a filter function to all the elements in the table in batches
+ and update the table so that the dataset only includes examples according to the filter function.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ function (`Callable`): Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ If no function is provided, defaults to an always `True` function: `lambda x: True`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the
+ signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`
+ `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing.
+ desc (`str`, *optional*, defaults to `None`):
+ Meaningful description to be displayed alongside with the progress bar while filtering examples.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.filter(lambda x: x["label"] == 1)
+ DatasetDict({
+ train: Dataset({
+ features: ['text', 'label'],
+ num_rows: 4265
+ })
+ validation: Dataset({
+ features: ['text', 'label'],
+ num_rows: 533
+ })
+ test: Dataset({
+ features: ['text', 'label'],
+ num_rows: 533
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ if cache_file_names is None:
+ cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.filter(
+ function=function,
+ with_indices=with_indices,
+ with_rank=with_rank,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ cache_file_name=cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ fn_kwargs=fn_kwargs,
+ num_proc=num_proc,
+ desc=desc,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def flatten_indices(
+ self,
+ keep_in_memory: bool = False,
+ cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ num_proc: Optional[int] = None,
+ new_fingerprint: Optional[str] = None,
+ ) -> "DatasetDict":
+ """Create and cache a new Dataset by flattening the indices mapping.
+
+ Args:
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ cache_file_names (`Dict[str, str]`, *optional*, default `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ features (`Optional[datasets.Features]`, defaults to `None`):
+ Use a specific [`Features`] to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`):
+ Allow null values in the table.
+ num_proc (`int`, optional, default `None`):
+ Max number of processes when generating cache. Already cached shards are loaded sequentially
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ """
+ self._check_values_type()
+ if cache_file_names is None:
+ cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.flatten_indices(
+ keep_in_memory=keep_in_memory,
+ cache_file_name=cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ features=features,
+ disable_nullable=disable_nullable,
+ num_proc=num_proc,
+ new_fingerprint=new_fingerprint,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def sort(
+ self,
+ column_names: Union[str, Sequence[str]],
+ reverse: Union[bool, Sequence[bool]] = False,
+ kind="deprecated",
+ null_placement: str = "at_end",
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ ) -> "DatasetDict":
+ """Create a new dataset sorted according to a single or multiple columns.
+
+ Args:
+ column_names (`Union[str, Sequence[str]]`):
+ Column name(s) to sort by.
+ reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
+ If `True`, sort by descending order rather than ascending. If a single bool is provided,
+ the value is applied to the sorting of all column names. Otherwise a list of bools with the
+ same length and order as column_names must be provided.
+ kind (`str`, *optional*):
+ Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`,
+ The default is `quicksort`. Note that both `stable` and `mergesort` use timsort under the covers and, in general,
+ the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility.
+
+
+ `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0.
+
+
+ null_placement (`str`, defaults to `at_end`):
+ Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the sorted indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the sorted indices
+ can be identified, use it instead of recomputing.
+ indices_cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ indices mapping instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ Higher value gives smaller cache files, lower value consume less temporary memory.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('rotten_tomatoes')
+ >>> ds['train']['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ >>> sorted_ds = ds.sort('label')
+ >>> sorted_ds['train']['label'][:10]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
+ >>> another_sorted_ds['train']['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ ```
+ """
+ self._check_values_type()
+ if indices_cache_file_names is None:
+ indices_cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.sort(
+ column_names=column_names,
+ reverse=reverse,
+ kind=kind,
+ null_placement=null_placement,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ indices_cache_file_name=indices_cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def shuffle(
+ self,
+ seeds: Optional[Union[int, Dict[str, Optional[int]]]] = None,
+ seed: Optional[int] = None,
+ generators: Optional[Dict[str, np.random.Generator]] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ ) -> "DatasetDict":
+ """Create a new Dataset where the rows are shuffled.
+
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Currently shuffling uses numpy random generators.
+ You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
+
+ Args:
+ seeds (`Dict[str, int]` or `int`, *optional*):
+ A seed to initialize the default BitGenerator if `generator=None`.
+ If `None`, then fresh, unpredictable entropy will be pulled from the OS.
+ If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
+ You can provide one `seed` per dataset in the dataset dictionary.
+ seed (`int`, *optional*):
+ A seed to initialize the default BitGenerator if `generator=None`. Alias for seeds (a `ValueError` is raised if both are provided).
+ generators (`Dict[str, *optional*, np.random.Generator]`):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ You have to provide one `generator` per dataset in the dataset dictionary.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ indices_cache_file_names (`Dict[str, str]`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ indices mappings instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds["train"]["label"][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+
+ # set a seed
+ >>> shuffled_ds = ds.shuffle(seed=42)
+ >>> shuffled_ds["train"]["label"][:10]
+ [0, 1, 0, 1, 0, 0, 0, 0, 0, 0]
+ ```
+ """
+ self._check_values_type()
+ if seed is not None and seeds is not None:
+ raise ValueError("Please specify seed or seeds, but not both")
+ seeds = seed if seed is not None else seeds
+ if seeds is None:
+ seeds = {k: None for k in self}
+ elif not isinstance(seeds, dict):
+ seeds = {k: seeds for k in self}
+ if generators is None:
+ generators = {k: None for k in self}
+ if indices_cache_file_names is None:
+ indices_cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.shuffle(
+ seed=seeds[k],
+ generator=generators[k],
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ indices_cache_file_name=indices_cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def save_to_disk(
+ self,
+ dataset_dict_path: PathLike,
+ fs="deprecated",
+ max_shard_size: Optional[Union[str, int]] = None,
+ num_shards: Optional[Dict[str, int]] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ ):
+ """
+ Saves a dataset dict to a filesystem using `fsspec.spec.AbstractFileSystem`.
+
+ For [`Image`] and [`Audio`] data:
+
+ All the Image() and Audio() data are stored in the arrow files.
+ If you want to store paths or urls, please use the Value("string") type.
+
+ Args:
+ dataset_dict_path (`str`):
+ Path (e.g. `dataset/train`) or remote URI
+ (e.g. `s3://my-bucket/dataset/train`) of the dataset dict directory where the dataset dict will be
+ saved to.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem where the dataset will be saved to.
+
+
+
+ `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
+
+
+
+ max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
+ The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
+ (like `"50MB"`).
+ num_shards (`Dict[str, int]`, *optional*):
+ Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
+ You need to provide the number of shards for each dataset in the dataset dictionary.
+ Use a dictionary to define a different num_shards for each split.
+
+
+ num_proc (`int`, *optional*, default `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Example:
+
+ ```python
+ >>> dataset_dict.save_to_disk("path/to/dataset/directory")
+ >>> dataset_dict.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
+ >>> dataset_dict.save_to_disk("path/to/dataset/directory", num_shards={"train": 1024, "test": 8})
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, _ = url_to_fs(dataset_dict_path, **(storage_options or {}))
+
+ if num_shards is None:
+ num_shards = {k: None for k in self}
+ elif not isinstance(num_shards, dict):
+ raise ValueError(
+ "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
+ )
+
+ fs.makedirs(dataset_dict_path, exist_ok=True)
+
+ with fs.open(posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME), "w", encoding="utf-8") as f:
+ json.dump({"splits": list(self)}, f)
+ for k, dataset in self.items():
+ dataset.save_to_disk(
+ posixpath.join(dataset_dict_path, k),
+ num_shards=num_shards.get(k),
+ max_shard_size=max_shard_size,
+ num_proc=num_proc,
+ storage_options=storage_options,
+ )
+
+ @staticmethod
+ def load_from_disk(
+ dataset_dict_path: PathLike,
+ fs="deprecated",
+ keep_in_memory: Optional[bool] = None,
+ storage_options: Optional[dict] = None,
+ ) -> "DatasetDict":
+ """
+ Load a dataset that was previously saved using [`save_to_disk`] from a filesystem using `fsspec.spec.AbstractFileSystem`.
+
+ Args:
+ dataset_dict_path (`str`):
+ Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
+ of the dataset dict directory where the dataset dict will be loaded from.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem where the dataset will be saved to.
+
+
+
+ `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
+
+
+
+ keep_in_memory (`bool`, defaults to `None`):
+ Whether to copy the dataset in-memory. If `None`, the
+ dataset will not be copied in-memory unless explicitly enabled by setting
+ `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
+ [improve performance](../cache#improve-performance) section.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> ds = load_from_disk('path/to/dataset/directory')
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, dataset_dict_path = url_to_fs(dataset_dict_path, **(storage_options or {}))
+
+ dataset_dict_json_path = posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME)
+ dataset_state_json_path = posixpath.join(dataset_dict_path, config.DATASET_STATE_JSON_FILENAME)
+ dataset_info_path = posixpath.join(dataset_dict_path, config.DATASET_INFO_FILENAME)
+ if not fs.isfile(dataset_dict_json_path):
+ if fs.isfile(dataset_info_path) and fs.isfile(dataset_state_json_path):
+ raise FileNotFoundError(
+ f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but got a `Dataset`. Please use either `datasets.load_from_disk` or `Dataset.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but provided path is not a `DatasetDict`."
+ )
+
+ with fs.open(dataset_dict_json_path, "r", encoding="utf-8") as f:
+ splits = json.load(f)["splits"]
+
+ dataset_dict = DatasetDict()
+ for k in splits:
+ dataset_dict_split_path = posixpath.join(fs.unstrip_protocol(dataset_dict_path), k)
+ dataset_dict[k] = Dataset.load_from_disk(
+ dataset_dict_split_path, keep_in_memory=keep_in_memory, storage_options=storage_options
+ )
+ return dataset_dict
+
+ @staticmethod
+ def from_csv(
+ path_or_paths: Dict[str, PathLike],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ **kwargs,
+ ) -> "DatasetDict":
+ """Create [`DatasetDict`] from CSV file(s).
+
+ Args:
+ path_or_paths (`dict` of path-like):
+ Path(s) of the CSV file(s).
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`pandas.read_csv`].
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetDict
+ >>> ds = DatasetDict.from_csv({'train': 'path/to/dataset.csv'})
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.csv import CsvDatasetReader
+
+ return CsvDatasetReader(
+ path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
+ ).read()
+
+ @staticmethod
+ def from_json(
+ path_or_paths: Dict[str, PathLike],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ **kwargs,
+ ) -> "DatasetDict":
+ """Create [`DatasetDict`] from JSON Lines file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the JSON Lines file(s).
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`JsonConfig`].
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetDict
+ >>> ds = DatasetDict.from_json({'train': 'path/to/dataset.json'})
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.json import JsonDatasetReader
+
+ return JsonDatasetReader(
+ path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
+ ).read()
+
+ @staticmethod
+ def from_parquet(
+ path_or_paths: Dict[str, PathLike],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ columns: Optional[List[str]] = None,
+ **kwargs,
+ ) -> "DatasetDict":
+ """Create [`DatasetDict`] from Parquet file(s).
+
+ Args:
+ path_or_paths (`dict` of path-like):
+ Path(s) of the CSV file(s).
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ columns (`List[str]`, *optional*):
+ If not `None`, only these columns will be read from the file.
+ A column name may be a prefix of a nested field, e.g. 'a' will select
+ 'a.b', 'a.c', and 'a.d.e'.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`ParquetConfig`].
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetDict
+ >>> ds = DatasetDict.from_parquet({'train': 'path/to/dataset/parquet'})
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.parquet import ParquetDatasetReader
+
+ return ParquetDatasetReader(
+ path_or_paths,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ columns=columns,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_text(
+ path_or_paths: Dict[str, PathLike],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ **kwargs,
+ ) -> "DatasetDict":
+ """Create [`DatasetDict`] from text file(s).
+
+ Args:
+ path_or_paths (`dict` of path-like):
+ Path(s) of the text file(s).
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`TextConfig`].
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetDict
+ >>> ds = DatasetDict.from_text({'train': 'path/to/dataset.txt'})
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.text import TextDatasetReader
+
+ return TextDatasetReader(
+ path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
+ ).read()
+
+ @deprecated()
+ @is_documented_by(Dataset.prepare_for_task)
+ def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "DatasetDict":
+ self._check_values_type()
+ return DatasetDict({k: dataset.prepare_for_task(task=task, id=id) for k, dataset in self.items()})
+
+ @is_documented_by(Dataset.align_labels_with_mapping)
+ def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "DatasetDict":
+ self._check_values_type()
+ return DatasetDict(
+ {
+ k: dataset.align_labels_with_mapping(label2id=label2id, label_column=label_column)
+ for k, dataset in self.items()
+ }
+ )
+
+ def push_to_hub(
+ self,
+ repo_id,
+ config_name: str = "default",
+ set_default: Optional[bool] = None,
+ data_dir: Optional[str] = None,
+ commit_message: Optional[str] = None,
+ commit_description: Optional[str] = None,
+ private: Optional[bool] = False,
+ token: Optional[str] = None,
+ revision: Optional[str] = None,
+ branch="deprecated",
+ create_pr: Optional[bool] = False,
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_shards: Optional[Dict[str, int]] = None,
+ embed_external_files: bool = True,
+ ) -> CommitInfo:
+ """Pushes the [`DatasetDict`] to the hub as a Parquet dataset.
+ The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
+
+ Each dataset split will be pushed independently. The pushed dataset will keep the original split names.
+
+ The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`]
+ data, the Parquet files will store the bytes of your images or audio files.
+ You can disable this by setting `embed_external_files` to False.
+
+ Args:
+ repo_id (`str`):
+ The ID of the repository to push to in the following format: `/` or
+ `/`. Also accepts ``, which will default to the namespace
+ of the logged-in user.
+ config_name (`str`):
+ Configuration name of a dataset. Defaults to "default".
+ set_default (`bool`, *optional*):
+ Whether to set this configuration as the default one. Otherwise, the default configuration is the one
+ named "default".
+ data_dir (`str`, *optional*):
+ Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
+ from "default", else "data".
+
+
+ commit_message (`str`, *optional*):
+ Message to commit while pushing. Will default to `"Upload dataset"`.
+ commit_description (`str`, *optional*):
+ Description of the commit that will be created.
+ Additionally, description of the PR if a PR is created (`create_pr` is True).
+
+
+ private (`bool`, *optional*):
+ Whether the dataset repository should be set to private or not. Only affects repository creation:
+ a repository that already exists will not be affected by that parameter.
+ token (`str`, *optional*):
+ An optional authentication token for the Hugging Face Hub. If no token is passed, will default
+ to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
+ if no token is passed and the user is not logged-in.
+ revision (`str`, *optional*):
+ Branch to push the uploaded files to. Defaults to the `"main"` branch.
+
+
+ branch (`str`, *optional*):
+ The git branch on which to push the dataset. This defaults to the default branch as specified
+ in your repository, which defaults to `"main"`.
+
+
+
+ `branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0.
+
+
+ create_pr (`bool`, *optional*, defaults to `False`):
+ Whether to create a PR with the uploaded files or directly commit.
+
+
+ max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
+ The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
+ (like `"500MB"` or `"1GB"`).
+ num_shards (`Dict[str, int]`, *optional*):
+ Number of shards to write. By default, the number of shards depends on `max_shard_size`.
+ Use a dictionary to define a different num_shards for each split.
+
+
+ embed_external_files (`bool`, defaults to `True`):
+ Whether to embed file bytes in the shards.
+ In particular, this will do the following before the push for the fields of type:
+
+ - [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files.
+
+ Return:
+ huggingface_hub.CommitInfo
+
+ Example:
+
+ ```python
+ >>> dataset_dict.push_to_hub("/")
+ >>> dataset_dict.push_to_hub("/", private=True)
+ >>> dataset_dict.push_to_hub("/", max_shard_size="1GB")
+ >>> dataset_dict.push_to_hub("/", num_shards={"train": 1024, "test": 8})
+ ```
+
+ If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
+
+ ```python
+ >>> english_dataset.push_to_hub("/", "en")
+ >>> french_dataset.push_to_hub("/", "fr")
+ >>> # later
+ >>> english_dataset = load_dataset("/", "en")
+ >>> french_dataset = load_dataset("/", "fr")
+ ```
+ """
+
+ if num_shards is None:
+ num_shards = {k: None for k in self}
+ elif not isinstance(num_shards, dict):
+ raise ValueError(
+ "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
+ )
+
+ if branch != "deprecated":
+ warnings.warn(
+ "'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'revision={branch}' instead.",
+ FutureWarning,
+ )
+ revision = branch
+
+ self._check_values_type()
+ self._check_values_features()
+ total_uploaded_size = 0
+ total_dataset_nbytes = 0
+ info_to_dump: DatasetInfo = next(iter(self.values())).info.copy()
+ info_to_dump.config_name = config_name
+ info_to_dump.splits = SplitDict()
+
+ for split in self.keys():
+ if not re.match(_split_re, split):
+ raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
+
+ api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
+
+ repo_url = api.create_repo(
+ repo_id,
+ token=token,
+ repo_type="dataset",
+ private=private,
+ exist_ok=True,
+ )
+ repo_id = repo_url.repo_id
+
+ if revision is not None:
+ api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True)
+
+ if not data_dir:
+ data_dir = config_name if config_name != "default" else "data" # for backward compatibility
+
+ additions = []
+ for split in self.keys():
+ logger.info(f"Pushing split {split} to the Hub.")
+ # The split=key needs to be removed before merging
+ split_additions, uploaded_size, dataset_nbytes = self[split]._push_parquet_shards_to_hub(
+ repo_id,
+ data_dir=data_dir,
+ split=split,
+ token=token,
+ revision=revision,
+ create_pr=create_pr,
+ max_shard_size=max_shard_size,
+ num_shards=num_shards.get(split),
+ embed_external_files=embed_external_files,
+ )
+ additions += split_additions
+ total_uploaded_size += uploaded_size
+ total_dataset_nbytes += dataset_nbytes
+ info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=len(self[split]))
+ info_to_dump.download_checksums = None
+ info_to_dump.download_size = total_uploaded_size
+ info_to_dump.dataset_size = total_dataset_nbytes
+ info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes
+
+ # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
+ # and delete old split shards (if they exist)
+ repo_with_dataset_card, repo_with_dataset_infos = False, False
+ repo_splits = [] # use a list to keep the order of the splits
+ deletions = []
+ repo_files_to_add = [addition.path_in_repo for addition in additions]
+ for repo_file in api.list_repo_tree(
+ repo_id=repo_id, revision=revision, repo_type="dataset", token=token, recursive=True
+ ):
+ if not isinstance(repo_file, RepoFile):
+ continue
+ if repo_file.rfilename == config.REPOCARD_FILENAME:
+ repo_with_dataset_card = True
+ elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
+ repo_with_dataset_infos = True
+ elif (
+ repo_file.rfilename.startswith(tuple(f"{data_dir}/{split}-" for split in self.keys()))
+ and repo_file.rfilename not in repo_files_to_add
+ ):
+ deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
+ elif fnmatch.fnmatch(
+ repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*")
+ ):
+ repo_split = string_to_dict(
+ repo_file.rfilename,
+ glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED),
+ )["split"]
+ if repo_split not in repo_splits:
+ repo_splits.append(split)
+
+ # get the info from the README to update them
+ if repo_with_dataset_card:
+ dataset_card_path = api.hf_hub_download(
+ repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision
+ )
+ dataset_card = DatasetCard.load(Path(dataset_card_path))
+ dataset_card_data = dataset_card.data
+ metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
+ # get the deprecated dataset_infos.json to update them
+ elif repo_with_dataset_infos:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ metadata_configs = MetadataConfigs()
+ else:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ metadata_configs = MetadataConfigs()
+ # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
+ if not metadata_configs and repo_splits:
+ default_metadata_configs_to_dump = {
+ "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
+ }
+ MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
+ metadata_config_to_dump = {
+ "data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()],
+ }
+ if set_default and config_name != "default":
+ if metadata_configs:
+ default_config_name = metadata_configs.get_default_config_name()
+ if default_config_name == "default":
+ raise ValueError(
+ "There exists a configuration named 'default'. To set a different configuration as default, "
+ "rename the 'default' one first."
+ )
+ else:
+ _ = metadata_configs[default_config_name].pop("default")
+ metadata_config_to_dump["default"] = True
+ # push to the deprecated dataset_infos.json
+ if repo_with_dataset_infos:
+ dataset_infos_path = api.hf_hub_download(
+ repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
+ )
+ with open(dataset_infos_path, encoding="utf-8") as f:
+ dataset_infos: dict = json.load(f)
+ dataset_infos[config_name] = asdict(info_to_dump)
+ buffer = BytesIO()
+ buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
+ additions.append(
+ CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)
+ )
+ # push to README
+ DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
+ MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
+ dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
+ additions.append(
+ CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
+ )
+
+ commit_message = commit_message if commit_message is not None else "Upload dataset"
+ if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT:
+ commit_info = api.create_commit(
+ repo_id,
+ operations=additions + deletions,
+ commit_message=commit_message,
+ commit_description=commit_description,
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ else:
+ logger.info(
+ f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
+ )
+ num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
+ for i in range(0, num_commits):
+ operations = additions[
+ i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT
+ ] + (deletions if i == 0 else [])
+ commit_info = api.create_commit(
+ repo_id,
+ operations=operations,
+ commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
+ commit_description=commit_description,
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ logger.info(
+ f"Commit #{i+1} completed"
+ + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
+ + "."
+ )
+ return commit_info
+
+
+class IterableDatasetDict(dict):
+ def __repr__(self):
+ repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
+ repr = re.sub(r"^", " " * 4, repr, 0, re.M)
+ return f"IterableDatasetDict({{\n{repr}\n}})"
+
+ def with_format(
+ self,
+ type: Optional[str] = None,
+ ) -> "IterableDatasetDict":
+ """
+ Return a dataset with the specified format.
+ This method only supports the "torch" format for now.
+ The format is set to all the datasets of the dataset dictionary.
+
+ Args:
+ type (`str`, *optional*, defaults to `None`):
+ If set to "torch", the returned dataset
+ will be a subclass of `torch.utils.data.IterableDataset` to be used in a `DataLoader`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> from transformers import AutoTokenizer
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
+ >>> def encode(example):
+ ... return tokenizer(examples["text"], truncation=True, padding="max_length")
+ >>> ds = ds.map(encode, batched=True, remove_columns=["text"])
+ >>> ds = ds.with_format("torch")
+ ```
+ """
+ return IterableDatasetDict({k: dataset.with_format(type=type) for k, dataset in self.items()})
+
+ def map(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: int = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[Union[str, List[str]]] = None,
+ fn_kwargs: Optional[dict] = None,
+ ) -> "IterableDatasetDict":
+ """
+ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
+ If your function returns a column that already exists, then it overwrites it.
+ The function is applied on-the-fly on the examples when iterating over the dataset.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ You can specify whether the function should be batched or not with the `batched` parameter:
+
+ - If batched is `False`, then the function takes 1 example in and should return 1 example.
+ An example is a dictionary, e.g. `{"text": "Hello there !"}`.
+ - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
+ A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
+ - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
+ Note that the last batch may have less than `n` examples.
+ A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
+
+ Args:
+ function (`Callable`, *optional*, defaults to `None`):
+ Function applied on-the-fly on the examples when you iterate on the dataset.
+ It must have one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
+ - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
+ - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: `lambda x: x`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
+ input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
+ The columns to be passed into `function`
+ as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ drop_last_batch (`bool`, defaults to `False`):
+ Whether a last batch smaller than the `batch_size` should be
+ dropped instead of being processed by the function.
+ remove_columns (`[List[str]]`, *optional*, defaults to `None`):
+ Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> def add_prefix(example):
+ ... example["text"] = "Review: " + example["text"]
+ ... return example
+ >>> ds = ds.map(add_prefix)
+ >>> next(iter(ds["train"]))
+ {'label': 1,
+ 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return IterableDatasetDict(
+ {
+ k: dataset.map(
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ drop_last_batch=drop_last_batch,
+ remove_columns=remove_columns,
+ fn_kwargs=fn_kwargs,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices=False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ ) -> "IterableDatasetDict":
+ """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
+ The filtering is done on-the-fly when iterating over the dataset.
+ The filtering is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ function (`Callable`):
+ Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
+ - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
+ - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
+ - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
+
+ If no function is provided, defaults to an always True function: `lambda x: True`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
+ input_columns (`str` or `List[str]`, *optional*):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.filter(lambda x: x["label"] == 0)
+ >>> list(ds["train"].take(3))
+ [{'label': 0, 'text': 'Review: simplistic , silly and tedious .'},
+ {'label': 0,
+ 'text': "Review: it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
+ {'label': 0,
+ 'text': 'Review: exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
+ ```
+ """
+ return IterableDatasetDict(
+ {
+ k: dataset.filter(
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ fn_kwargs=fn_kwargs,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def shuffle(
+ self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
+ ) -> "IterableDatasetDict":
+ """
+ Randomly shuffles the elements of this dataset.
+ The shuffling is applied to all the datasets of the dataset dictionary.
+
+ This dataset fills a buffer with buffer_size elements, then randomly samples elements from this buffer,
+ replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
+ equal to the full size of the dataset is required.
+
+ For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
+ initially select a random element from only the first 1000 elements in the buffer. Once an element is
+ selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
+ maintaining the 1000 element buffer.
+
+ If the dataset is made of several shards, it also does `shuffle` the order of the shards.
+ However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
+ then the order of the shards is kept unchanged.
+
+ Args:
+ seed (`int`, *optional*, defaults to `None`):
+ Random seed that will be used to shuffle the dataset.
+ It is used to sample from the shuffle buffer and also to shuffle the data shards.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ buffer_size (`int`, defaults to `1000`):
+ Size of the buffer.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> list(ds["train"].take(3))
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'}]
+ >>> ds = ds.shuffle(seed=42)
+ >>> list(ds["train"].take(3))
+ [{'label': 1,
+ 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
+ {'label': 1,
+ 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
+ {'label': 1,
+ 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
+ ```
+ """
+ return IterableDatasetDict(
+ {
+ k: dataset.shuffle(seed=seed, generator=generator, buffer_size=buffer_size)
+ for k, dataset in self.items()
+ }
+ )
+
+ def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDatasetDict":
+ """
+ Rename a column in the dataset, and move the features associated to the original column under the new column
+ name.
+ The renaming is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ original_column_name (`str`):
+ Name of the column to rename.
+ new_column_name (`str`):
+ New name for the column.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset with a renamed column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.rename_column("text", "movie_review")
+ >>> next(iter(ds["train"]))
+ {'label': 1,
+ 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return IterableDatasetDict(
+ {
+ k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
+ for k, dataset in self.items()
+ }
+ )
+
+ def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDatasetDict":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+ The renaming is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ column_mapping (`Dict[str, str]`):
+ A mapping of columns to rename to their new names.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset with renamed columns
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.rename_columns({"text": "movie_review", "label": "rating"})
+ >>> next(iter(ds["train"]))
+ {'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
+ 'rating': 1}
+ ```
+ """
+ return IterableDatasetDict(
+ {k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()}
+ )
+
+ def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
+ """
+ Remove one or several column(s) in the dataset and the features associated to them.
+ The removal is done on-the-fly on the examples when iterating over the dataset.
+ The removal is applied to all the datasets of the dataset dictionary.
+
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to remove.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset object without the columns to remove.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.remove_columns("label")
+ >>> next(iter(ds["train"]))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return IterableDatasetDict({k: dataset.remove_columns(column_names) for k, dataset in self.items()})
+
+ def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
+ """Select one or several column(s) in the dataset and the features
+ associated to them. The selection is done on-the-fly on the examples
+ when iterating over the dataset. The selection is applied to all the
+ datasets of the dataset dictionary.
+
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to keep.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset object with only selected columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.select("text")
+ >>> next(iter(ds["train"]))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return IterableDatasetDict({k: dataset.select_columns(column_names) for k, dataset in self.items()})
+
+ def cast_column(self, column: str, feature: FeatureType) -> "IterableDatasetDict":
+ """Cast column to feature for decoding.
+ The type casting is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ column (`str`):
+ Column name.
+ feature ([`Feature`]):
+ Target feature.
+
+ Returns:
+ [`IterableDatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ return IterableDatasetDict(
+ {k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()}
+ )
+
+ def cast(
+ self,
+ features: Features,
+ ) -> "IterableDatasetDict":
+ """
+ Cast the dataset to a new set of features.
+ The type casting is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ features (`Features`):
+ New features to cast the dataset to.
+ The name of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`map`] to update the Dataset.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset with casted features.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> new_features = ds["train"].features.copy()
+ >>> new_features['label'] = ClassLabel(names=['bad', 'good'])
+ >>> new_features['text'] = Value('large_string')
+ >>> ds = ds.cast(new_features)
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='large_string', id=None)}
+ ```
+ """
+ return IterableDatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
diff --git a/venv/lib/python3.10/site-packages/datasets/exceptions.py b/venv/lib/python3.10/site-packages/datasets/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..619f2a10117dc16c20002b4cdcaf17a7f2350a8c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/exceptions.py
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+from typing import Any, Dict, List, Optional, Union
+
+from huggingface_hub import HfFileSystem
+
+from . import config
+from .table import CastError
+from .utils.track import TrackedIterable, tracked_list, tracked_str
+
+
+class DatasetsError(Exception):
+ """Base class for exceptions in this library."""
+
+
+class DefunctDatasetError(DatasetsError):
+ """The dataset has been defunct."""
+
+
+class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError):
+ """FileNotFoundError raised by this library."""
+
+
+class DataFilesNotFoundError(FileNotFoundDatasetsError):
+ """No (supported) data files found."""
+
+
+class DatasetNotFoundError(FileNotFoundDatasetsError):
+ """Dataset not found.
+
+ Raised when trying to access:
+ - a missing dataset, or
+ - a private/gated dataset and the user is not authenticated.
+ """
+
+
+class DatasetBuildError(DatasetsError):
+ pass
+
+
+class ManualDownloadError(DatasetBuildError):
+ pass
+
+
+class FileFormatError(DatasetBuildError):
+ pass
+
+
+class DatasetGenerationError(DatasetBuildError):
+ pass
+
+
+class DatasetGenerationCastError(DatasetGenerationError):
+ @classmethod
+ def from_cast_error(
+ cls,
+ cast_error: CastError,
+ builder_name: str,
+ gen_kwargs: Dict[str, Any],
+ token: Optional[Union[bool, str]],
+ ) -> "DatasetGenerationCastError":
+ explanation_message = (
+ f"\n\nAll the data files must have the same columns, but at some point {cast_error.details()}"
+ )
+ formatted_tracked_gen_kwargs: List[str] = []
+ for gen_kwarg in gen_kwargs.values():
+ if not isinstance(gen_kwarg, (tracked_str, tracked_list, TrackedIterable)):
+ continue
+ while isinstance(gen_kwarg, (tracked_list, TrackedIterable)) and gen_kwarg.last_item is not None:
+ gen_kwarg = gen_kwarg.last_item
+ if isinstance(gen_kwarg, tracked_str):
+ gen_kwarg = gen_kwarg.get_origin()
+ if isinstance(gen_kwarg, str) and gen_kwarg.startswith("hf://"):
+ resolved_path = HfFileSystem(endpoint=config.HF_ENDPOINT, token=token).resolve_path(gen_kwarg)
+ gen_kwarg = "hf://" + resolved_path.unresolve()
+ if "@" + resolved_path.revision in gen_kwarg:
+ gen_kwarg = (
+ gen_kwarg.replace("@" + resolved_path.revision, "", 1)
+ + f" (at revision {resolved_path.revision})"
+ )
+ formatted_tracked_gen_kwargs.append(str(gen_kwarg))
+ if formatted_tracked_gen_kwargs:
+ explanation_message += f"\n\nThis happened while the {builder_name} dataset builder was generating data using\n\n{', '.join(formatted_tracked_gen_kwargs)}"
+ help_message = "\n\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)"
+ return cls("An error occurred while generating the dataset" + explanation_message + help_message)
diff --git a/venv/lib/python3.10/site-packages/datasets/inspect.py b/venv/lib/python3.10/site-packages/datasets/inspect.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6178b52d5af912799d952106ca81d9ed54f8299
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/inspect.py
@@ -0,0 +1,582 @@
+# Copyright 2020 The HuggingFace Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""List and inspect datasets."""
+
+import inspect
+import os
+import shutil
+import warnings
+from pathlib import Path, PurePath
+from typing import Dict, List, Mapping, Optional, Sequence, Union
+
+import huggingface_hub
+
+from . import config
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadMode
+from .download.streaming_download_manager import StreamingDownloadManager
+from .info import DatasetInfo
+from .load import (
+ dataset_module_factory,
+ get_dataset_builder_class,
+ import_main_class,
+ load_dataset_builder,
+ metric_module_factory,
+)
+from .utils.deprecation_utils import deprecated
+from .utils.file_utils import relative_to_absolute_path
+from .utils.logging import get_logger
+from .utils.version import Version
+
+
+logger = get_logger(__name__)
+
+
+class SplitsNotFoundError(ValueError):
+ pass
+
+
+@deprecated("Use 'huggingface_hub.list_datasets' instead.")
+def list_datasets(with_community_datasets=True, with_details=False):
+ """List all the datasets scripts available on the Hugging Face Hub.
+
+ Args:
+ with_community_datasets (`bool`, *optional*, defaults to `True`):
+ Include the community provided datasets.
+ with_details (`bool`, *optional*, defaults to `False`):
+ Return the full details on the datasets instead of only the short name.
+
+ Example:
+
+ ```py
+ >>> from datasets import list_datasets
+ >>> list_datasets()
+ ['acronym_identification',
+ 'ade_corpus_v2',
+ 'adversarial_qa',
+ 'aeslc',
+ 'afrikaans_ner_corpus',
+ 'ag_news',
+ ...
+ ]
+ ```
+ """
+ datasets = huggingface_hub.list_datasets(full=with_details)
+ if not with_community_datasets:
+ datasets = [dataset for dataset in datasets if "/" not in dataset.id]
+ if not with_details:
+ datasets = [dataset.id for dataset in datasets]
+ return list(datasets)
+
+
+@deprecated(
+ "Use 'evaluate.list_evaluation_modules' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate"
+)
+def list_metrics(with_community_metrics=True, with_details=False):
+ """List all the metrics script available on the Hugging Face Hub.
+
+
+
+ Use `evaluate.list_evaluation_modules` instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+ with_community_metrics (:obj:`bool`, optional, default ``True``): Include the community provided metrics.
+ with_details (:obj:`bool`, optional, default ``False``): Return the full details on the metrics instead of only the short name.
+
+ Example:
+
+ ```py
+ >>> from datasets import list_metrics
+ >>> list_metrics()
+ ['accuracy',
+ 'bertscore',
+ 'bleu',
+ 'bleurt',
+ 'cer',
+ 'chrf',
+ ...
+ ]
+ ```
+ """
+ metrics = huggingface_hub.list_metrics()
+ if not with_community_metrics:
+ metrics = [metric for metric in metrics if "/" not in metric.id]
+ if not with_details:
+ metrics = [metric.id for metric in metrics]
+ return metrics
+
+
+@deprecated("Clone the dataset repository from the Hugging Face Hub instead.")
+def inspect_dataset(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
+ """
+ Allow inspection/modification of a dataset script by copying on local drive at local_path.
+
+ Args:
+ path (`str`): Path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name
+ as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`.
+ local_path (`str`):
+ Path to the local folder to copy the dataset script to.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ **download_kwargs (additional keyword arguments):
+ Optional arguments for [`DownloadConfig`] which will override
+ the attributes of `download_config` if supplied.
+ """
+ if download_config is None:
+ download_config = DownloadConfig(**download_kwargs)
+ if os.path.isfile(path):
+ path = str(Path(path).parent)
+ if os.path.isdir(path):
+ shutil.copytree(path, local_path, dirs_exist_ok=True)
+ else:
+ huggingface_hub.HfApi(endpoint=config.HF_ENDPOINT, token=download_config.token).snapshot_download(
+ repo_id=path, repo_type="dataset", local_dir=local_path, force_download=download_config.force_download
+ )
+ print(
+ f"The dataset {path} can be inspected at {local_path}. "
+ f'You can modify this loading script if it has one and use it with `datasets.load_dataset("{PurePath(local_path).as_posix()}")`.'
+ )
+
+
+@deprecated(
+ "Use 'evaluate.inspect_evaluation_module' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate"
+)
+def inspect_metric(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
+ r"""
+ Allow inspection/modification of a metric script by copying it on local drive at local_path.
+
+
+
+ Use `evaluate.inspect_evaluation_module` instead, from the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+ path (``str``): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
+ e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
+ local_path (``str``): path to the local folder to copy the datset script to.
+ download_config (Optional ``datasets.DownloadConfig``): specific download configuration parameters.
+ **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
+ """
+ metric_module = metric_module_factory(path, download_config=download_config, **download_kwargs)
+ metric_cls = import_main_class(metric_module.module_path, dataset=False)
+ module_source_path = inspect.getsourcefile(metric_cls)
+ module_source_dirpath = os.path.dirname(module_source_path)
+ for dirpath, dirnames, filenames in os.walk(module_source_dirpath):
+ dst_dirpath = os.path.join(local_path, os.path.relpath(dirpath, module_source_dirpath))
+ os.makedirs(dst_dirpath, exist_ok=True)
+ # skipping hidden directories; prune the search
+ dirnames[:] = [dirname for dirname in dirnames if not dirname.startswith((".", "__"))]
+ for filename in filenames:
+ shutil.copy2(os.path.join(dirpath, filename), os.path.join(dst_dirpath, filename))
+ shutil.copystat(dirpath, dst_dirpath)
+ local_path = relative_to_absolute_path(local_path)
+ print(
+ f"The processing scripts for metric {path} can be inspected at {local_path}. "
+ f"The main class is in {module_source_dirpath}. "
+ f'You can modify this processing scripts and use it with `datasets.load_metric("{PurePath(local_path).as_posix()}")`.'
+ )
+
+
+def get_dataset_infos(
+ path: str,
+ data_files: Optional[Union[Dict, List, str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ **config_kwargs,
+):
+ """Get the meta information about a dataset, returned as a dict mapping config name to DatasetInfoDict.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or``'openai/webtext'`
+ revision (`Union[str, datasets.Version]`, *optional*):
+ If specified, the dataset module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ data_files (`Union[Dict, List, str]`, *optional*):
+ Defining the data_files of the dataset configuration.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+
+ **config_kwargs (additional keyword arguments):
+ Optional attributes for builder class which will override the attributes if supplied.
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_infos
+ >>> get_dataset_infos('rotten_tomatoes')
+ {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews...), ...}
+ ```
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+
+ config_names = get_dataset_config_names(
+ path=path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ data_files=data_files,
+ token=token,
+ )
+ return {
+ config_name: get_dataset_config_info(
+ path=path,
+ config_name=config_name,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ **config_kwargs,
+ )
+ for config_name in config_names
+ }
+
+
+def get_dataset_config_names(
+ path: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ data_files: Optional[Union[Dict, List, str]] = None,
+ **download_kwargs,
+):
+ """Get the list of available config names for a particular dataset.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
+ revision (`Union[str, datasets.Version]`, *optional*):
+ If specified, the dataset module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
+ By default the datasets and metrics are stored inside the `datasets_modules` module.
+ data_files (`Union[Dict, List, str]`, *optional*):
+ Defining the data_files of the dataset configuration.
+ **download_kwargs (additional keyword arguments):
+ Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
+ for example `token`.
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_config_names
+ >>> get_dataset_config_names("glue")
+ ['cola',
+ 'sst2',
+ 'mrpc',
+ 'qqp',
+ 'stsb',
+ 'mnli',
+ 'mnli_mismatched',
+ 'mnli_matched',
+ 'qnli',
+ 'rte',
+ 'wnli',
+ 'ax']
+ ```
+ """
+ dataset_module = dataset_module_factory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ data_files=data_files,
+ **download_kwargs,
+ )
+ builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
+ return list(builder_cls.builder_configs.keys()) or [
+ dataset_module.builder_kwargs.get("config_name", builder_cls.DEFAULT_CONFIG_NAME or "default")
+ ]
+
+
+def get_dataset_default_config_name(
+ path: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ data_files: Optional[Union[Dict, List, str]] = None,
+ **download_kwargs,
+) -> Optional[str]:
+ """Get the default config name for a particular dataset.
+ Can return None only if the dataset has multiple configurations and no default configuration.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
+ revision (`Union[str, datasets.Version]`, *optional*):
+ If specified, the dataset module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
+ By default the datasets and metrics are stored inside the `datasets_modules` module.
+ data_files (`Union[Dict, List, str]`, *optional*):
+ Defining the data_files of the dataset configuration.
+ **download_kwargs (additional keyword arguments):
+ Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
+ for example `token`.
+
+ Returns:
+ Optional[str]: the default config name if there is one
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_default_config_name
+ >>> get_dataset_default_config_name("openbookqa")
+ 'main'
+ ```
+ """
+ dataset_module = dataset_module_factory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ data_files=data_files,
+ **download_kwargs,
+ )
+ builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
+ builder_configs = list(builder_cls.builder_configs.keys())
+ if builder_configs:
+ default_config_name = builder_configs[0] if len(builder_configs) == 1 else None
+ else:
+ default_config_name = "default"
+ return builder_cls.DEFAULT_CONFIG_NAME or default_config_name
+
+
+def get_dataset_config_info(
+ path: str,
+ config_name: Optional[str] = None,
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ **config_kwargs,
+) -> DatasetInfo:
+ """Get the meta information (DatasetInfo) about a dataset for a particular config
+
+ Args:
+ path (``str``): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
+ e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
+ config_name (:obj:`str`, optional): Defining the name of the dataset configuration.
+ data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s).
+ download_config (:class:`~download.DownloadConfig`, optional): Specific download configuration parameters.
+ download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
+ revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If True, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If True, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+
+ **config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied.
+
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+
+ builder = load_dataset_builder(
+ path,
+ name=config_name,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ **config_kwargs,
+ )
+ info = builder.info
+ if info.splits is None:
+ download_config = download_config.copy() if download_config else DownloadConfig()
+ if token is not None:
+ download_config.token = token
+ builder._check_manual_download(
+ StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
+ )
+ try:
+ info.splits = {
+ split_generator.name: {"name": split_generator.name, "dataset_name": path}
+ for split_generator in builder._split_generators(
+ StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
+ )
+ }
+ except Exception as err:
+ raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err
+ return info
+
+
+def get_dataset_split_names(
+ path: str,
+ config_name: Optional[str] = None,
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ **config_kwargs,
+):
+ """Get the list of available splits for a particular config and dataset.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
+ config_name (`str`, *optional*):
+ Defining the name of the dataset configuration.
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ revision ([`Version`] or `str`, *optional*):
+ Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+
+ **config_kwargs (additional keyword arguments):
+ Optional attributes for builder class which will override the attributes if supplied.
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_split_names
+ >>> get_dataset_split_names('rotten_tomatoes')
+ ['train', 'validation', 'test']
+ ```
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+
+ info = get_dataset_config_info(
+ path,
+ config_name=config_name,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ **config_kwargs,
+ )
+ return list(info.splits.keys())
diff --git a/venv/lib/python3.10/site-packages/datasets/streaming.py b/venv/lib/python3.10/site-packages/datasets/streaming.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6d8768ac616444dfe8ff7cd2840df3996b76604
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/streaming.py
@@ -0,0 +1,142 @@
+import importlib
+import inspect
+from functools import wraps
+from typing import TYPE_CHECKING, Optional
+
+from .download.download_config import DownloadConfig
+from .utils.file_utils import (
+ xbasename,
+ xdirname,
+ xet_parse,
+ xexists,
+ xgetsize,
+ xglob,
+ xgzip_open,
+ xisdir,
+ xisfile,
+ xjoin,
+ xlistdir,
+ xnumpy_load,
+ xopen,
+ xpandas_read_csv,
+ xpandas_read_excel,
+ xPath,
+ xpyarrow_parquet_read_table,
+ xrelpath,
+ xsio_loadmat,
+ xsplit,
+ xsplitext,
+ xwalk,
+ xxml_dom_minidom_parse,
+)
+from .utils.logging import get_logger
+from .utils.patching import patch_submodule
+from .utils.py_utils import get_imports, lock_importable_file
+
+
+logger = get_logger(__name__)
+
+
+if TYPE_CHECKING:
+ from .builder import DatasetBuilder
+
+
+def extend_module_for_streaming(module_path, download_config: Optional[DownloadConfig] = None):
+ """Extend the module to support streaming.
+
+ We patch some functions in the module to use `fsspec` to support data streaming:
+ - We use `fsspec.open` to open and read remote files. We patch the module function:
+ - `open`
+ - We use the "::" hop separator to join paths and navigate remote compressed/archive files. We patch the module
+ functions:
+ - `os.path.join`
+ - `pathlib.Path.joinpath` and `pathlib.Path.__truediv__` (called when using the "/" operator)
+
+ The patched functions are replaced with custom functions defined to work with the
+ :class:`~download.streaming_download_manager.StreamingDownloadManager`.
+
+ Args:
+ module_path: Path to the module to be extended.
+ download_config : mainly use use_auth_token or storage_options to support different platforms and auth types.
+ """
+
+ module = importlib.import_module(module_path)
+
+ # TODO(QL): always update the module to add subsequent new authentication without removing old ones
+ if hasattr(module, "_patched_for_streaming") and module._patched_for_streaming:
+ if isinstance(module._patched_for_streaming, DownloadConfig):
+ module._patched_for_streaming.token = download_config.token
+ module._patched_for_streaming.storage_options = download_config.storage_options
+ return
+
+ def wrap_auth(function):
+ @wraps(function)
+ def wrapper(*args, **kwargs):
+ return function(*args, download_config=download_config, **kwargs)
+
+ wrapper._decorator_name_ = "wrap_auth"
+ return wrapper
+
+ # open files in a streaming fashion
+ patch_submodule(module, "open", wrap_auth(xopen)).start()
+ patch_submodule(module, "os.listdir", wrap_auth(xlistdir)).start()
+ patch_submodule(module, "os.walk", wrap_auth(xwalk)).start()
+ patch_submodule(module, "glob.glob", wrap_auth(xglob)).start()
+ # allow to navigate in remote zip files
+ patch_submodule(module, "os.path.join", xjoin).start()
+ patch_submodule(module, "os.path.dirname", xdirname).start()
+ patch_submodule(module, "os.path.basename", xbasename).start()
+ patch_submodule(module, "os.path.relpath", xrelpath).start()
+ patch_submodule(module, "os.path.split", xsplit).start()
+ patch_submodule(module, "os.path.splitext", xsplitext).start()
+ # allow checks on paths
+ patch_submodule(module, "os.path.exists", wrap_auth(xexists)).start()
+ patch_submodule(module, "os.path.isdir", wrap_auth(xisdir)).start()
+ patch_submodule(module, "os.path.isfile", wrap_auth(xisfile)).start()
+ patch_submodule(module, "os.path.getsize", wrap_auth(xgetsize)).start()
+ patch_submodule(module, "pathlib.Path", xPath).start()
+ # file readers
+ patch_submodule(module, "gzip.open", wrap_auth(xgzip_open)).start()
+ patch_submodule(module, "numpy.load", wrap_auth(xnumpy_load)).start()
+ patch_submodule(module, "pandas.read_csv", wrap_auth(xpandas_read_csv), attrs=["__version__"]).start()
+ patch_submodule(module, "pandas.read_excel", wrap_auth(xpandas_read_excel), attrs=["__version__"]).start()
+ patch_submodule(module, "scipy.io.loadmat", wrap_auth(xsio_loadmat), attrs=["__version__"]).start()
+ patch_submodule(module, "xml.etree.ElementTree.parse", wrap_auth(xet_parse)).start()
+ patch_submodule(module, "xml.dom.minidom.parse", wrap_auth(xxml_dom_minidom_parse)).start()
+ # pyarrow: do not patch pyarrow attribute in packaged modules
+ if not module.__name__.startswith("datasets.packaged_modules."):
+ patch_submodule(module, "pyarrow.parquet.read_table", wrap_auth(xpyarrow_parquet_read_table)).start()
+ module._patched_for_streaming = download_config
+
+
+def extend_dataset_builder_for_streaming(builder: "DatasetBuilder"):
+ """Extend the dataset builder module and the modules imported by it to support streaming.
+
+ Args:
+ builder (:class:`DatasetBuilder`): Dataset builder instance.
+ """
+ # this extends the open and os.path.join functions for data streaming
+ download_config = DownloadConfig(storage_options=builder.storage_options, token=builder.token)
+ extend_module_for_streaming(builder.__module__, download_config=download_config)
+ # if needed, we also have to extend additional internal imports (like wmt14 -> wmt_utils)
+ if not builder.__module__.startswith("datasets."): # check that it's not a packaged builder like csv
+ importable_file = inspect.getfile(builder.__class__)
+ with lock_importable_file(importable_file):
+ for imports in get_imports(importable_file):
+ if imports[0] == "internal":
+ internal_import_name = imports[1]
+ internal_module_name = ".".join(builder.__module__.split(".")[:-1] + [internal_import_name])
+ extend_module_for_streaming(internal_module_name, download_config=download_config)
+
+ # builders can inherit from other builders that might use streaming functionality
+ # (for example, ImageFolder and AudioFolder inherit from FolderBuilder which implements examples generation)
+ # but these parents builders are not patched automatically as they are not instantiated, so we patch them here
+ from .builder import DatasetBuilder
+
+ parent_builder_modules = [
+ cls.__module__
+ for cls in type(builder).__mro__[1:] # make sure it's not the same module we've already patched
+ if issubclass(cls, DatasetBuilder) and cls.__module__ != DatasetBuilder.__module__
+ ] # check it's not a standard builder from datasets.builder
+ for module in parent_builder_modules:
+ extend_module_for_streaming(module, download_config=download_config)
diff --git a/venv/lib/python3.10/site-packages/datasets/table.py b/venv/lib/python3.10/site-packages/datasets/table.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ba8d0f030454ab62bbe3c62b55e994cea40322a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/table.py
@@ -0,0 +1,2415 @@
+import copy
+import os
+from functools import partial
+from itertools import groupby
+from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union
+
+import numpy as np
+import pyarrow as pa
+import pyarrow.compute as pc
+import pyarrow.types
+
+from . import config
+from .utils.logging import get_logger
+
+
+if TYPE_CHECKING:
+ from .features.features import Features, FeatureType
+
+
+logger = get_logger(__name__)
+
+
+def inject_arrow_table_documentation(arrow_table_method):
+ def wrapper(fn):
+ fn.__doc__ = arrow_table_method.__doc__ + (fn.__doc__ if fn.__doc__ is not None else "")
+ fn.__doc__ = fn.__doc__.replace("pyarrow.Table", "Table")
+ if hasattr(arrow_table_method, "__annotations__"):
+ fn.__annotations__ = arrow_table_method.__annotations__
+ return fn
+
+ return wrapper
+
+
+def _in_memory_arrow_table_from_file(filename: str) -> pa.Table:
+ in_memory_stream = pa.input_stream(filename)
+ opened_stream = pa.ipc.open_stream(in_memory_stream)
+ pa_table = opened_stream.read_all()
+ return pa_table
+
+
+def _in_memory_arrow_table_from_buffer(buffer: pa.Buffer) -> pa.Table:
+ stream = pa.BufferReader(buffer)
+ opened_stream = pa.ipc.open_stream(stream)
+ table = opened_stream.read_all()
+ return table
+
+
+def _memory_mapped_record_batch_reader_from_file(filename: str) -> pa.RecordBatchStreamReader:
+ memory_mapped_stream = pa.memory_map(filename)
+ return pa.ipc.open_stream(memory_mapped_stream)
+
+
+def read_schema_from_file(filename: str) -> pa.Schema:
+ """
+ Infer arrow table schema from file without loading whole file into memory.
+ Usefull especially while having very big files.
+ """
+ with pa.memory_map(filename) as memory_mapped_stream:
+ schema = pa.ipc.open_stream(memory_mapped_stream).schema
+ return schema
+
+
+def _memory_mapped_arrow_table_from_file(filename: str) -> pa.Table:
+ opened_stream = _memory_mapped_record_batch_reader_from_file(filename)
+ pa_table = opened_stream.read_all()
+ return pa_table
+
+
+def _deepcopy(x, memo: dict):
+ """deepcopy a regular class instance"""
+ cls = x.__class__
+ result = cls.__new__(cls)
+ memo[id(x)] = result
+ for k, v in x.__dict__.items():
+ setattr(result, k, copy.deepcopy(v, memo))
+ return result
+
+
+def _interpolation_search(arr: List[int], x: int) -> int:
+ """
+ Return the position i of a sorted array so that arr[i] <= x < arr[i+1]
+
+ Args:
+ arr (`List[int]`): non-empty sorted list of integers
+ x (`int`): query
+
+ Returns:
+ `int`: the position i so that arr[i] <= x < arr[i+1]
+
+ Raises:
+ `IndexError`: if the array is empty or if the query is outside the array values
+ """
+ i, j = 0, len(arr) - 1
+ while i < j and arr[i] <= x < arr[j]:
+ k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i]))
+ if arr[k] <= x < arr[k + 1]:
+ return k
+ elif arr[k] < x:
+ i, j = k + 1, j
+ else:
+ i, j = i, k
+ raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.")
+
+
+class IndexedTableMixin:
+ def __init__(self, table: pa.Table):
+ self._schema: pa.Schema = table.schema
+ self._batches: List[pa.RecordBatch] = [
+ recordbatch for recordbatch in table.to_batches() if len(recordbatch) > 0
+ ]
+ self._offsets: np.ndarray = np.cumsum([0] + [len(b) for b in self._batches], dtype=np.int64)
+
+ def fast_gather(self, indices: Union[List[int], np.ndarray]) -> pa.Table:
+ """
+ Create a pa.Table by gathering the records at the records at the specified indices. Should be faster
+ than pa.concat_tables(table.fast_slice(int(i) % table.num_rows, 1) for i in indices) since NumPy can compute
+ the binary searches in parallel, highly optimized C
+ """
+ if not len(indices):
+ raise ValueError("Indices must be non-empty")
+ batch_indices = np.searchsorted(self._offsets, indices, side="right") - 1
+ return pa.Table.from_batches(
+ [
+ self._batches[batch_idx].slice(i - self._offsets[batch_idx], 1)
+ for batch_idx, i in zip(batch_indices, indices)
+ ],
+ schema=self._schema,
+ )
+
+ def fast_slice(self, offset=0, length=None) -> pa.Table:
+ """
+ Slice the Table using interpolation search.
+ The behavior is the same as `pyarrow.Table.slice` but it's significantly faster.
+
+ Interpolation search is used to find the start and end indexes of the batches we want to keep.
+ The batches to keep are then concatenated to form the sliced Table.
+ """
+ if offset < 0:
+ raise IndexError("Offset must be non-negative")
+ elif offset >= self._offsets[-1] or (length is not None and length <= 0):
+ return pa.Table.from_batches([], schema=self._schema)
+ i = _interpolation_search(self._offsets, offset)
+ if length is None or length + offset >= self._offsets[-1]:
+ batches = self._batches[i:]
+ batches[0] = batches[0].slice(offset - self._offsets[i])
+ else:
+ j = _interpolation_search(self._offsets, offset + length - 1)
+ batches = self._batches[i : j + 1]
+ batches[-1] = batches[-1].slice(0, offset + length - self._offsets[j])
+ batches[0] = batches[0].slice(offset - self._offsets[i])
+ return pa.Table.from_batches(batches, schema=self._schema)
+
+
+class Table(IndexedTableMixin):
+ """
+ Wraps a pyarrow Table by using composition.
+ This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`.
+
+ It implements all the basic attributes/methods of the pyarrow Table class except
+ the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column,
+ append_column, remove_column, set_column, rename_columns` and `drop`.
+
+ The implementation of these methods differs for the subclasses.
+ """
+
+ def __init__(self, table: pa.Table):
+ super().__init__(table)
+ self.table = table
+
+ def __deepcopy__(self, memo: dict):
+ # arrow tables are immutable, so there's no need to copy self.table
+ # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason
+ # by adding it to the memo, self.table won't be copied
+ memo[id(self.table)] = self.table
+ # same for the recordbatches used by the index
+ memo[id(self._batches)] = list(self._batches)
+ return _deepcopy(self, memo)
+
+ def validate(self, *args, **kwargs):
+ """
+ Perform validation checks. An exception is raised if validation fails.
+
+ By default only cheap validation checks are run. Pass `full=True`
+ for thorough validation checks (potentially `O(n)`).
+
+ Args:
+ full (`bool`, defaults to `False`):
+ If `True`, run expensive checks, otherwise cheap checks only.
+
+ Raises:
+ `pa.lib.ArrowInvalid`: if validation fails
+ """
+ return self.table.validate(*args, **kwargs)
+
+ def equals(self, *args, **kwargs):
+ """
+ Check if contents of two tables are equal.
+
+ Args:
+ other ([`~datasets.table.Table`]):
+ Table to compare against.
+ check_metadata `bool`, defaults to `False`):
+ Whether schema metadata equality should be checked as well.
+
+ Returns:
+ `bool`
+ """
+ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args)
+ kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs}
+ return self.table.equals(*args, **kwargs)
+
+ def to_batches(self, *args, **kwargs):
+ """
+ Convert Table to list of (contiguous) `RecordBatch` objects.
+
+ Args:
+ max_chunksize (`int`, defaults to `None`):
+ Maximum size for `RecordBatch` chunks. Individual chunks may be
+ smaller depending on the chunk layout of individual columns.
+
+ Returns:
+ `List[pyarrow.RecordBatch]`
+ """
+ return self.table.to_batches(*args, **kwargs)
+
+ def to_pydict(self, *args, **kwargs):
+ """
+ Convert the Table to a `dict` or `OrderedDict`.
+
+ Returns:
+ `dict`
+ """
+ return self.table.to_pydict(*args, **kwargs)
+
+ def to_pylist(self, *args, **kwargs):
+ """
+ Convert the Table to a list
+
+ Returns:
+ `list`
+ """
+ return self.table.to_pylist(*args, **kwargs)
+
+ def to_pandas(self, *args, **kwargs):
+ """
+ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ Arrow MemoryPool to use for allocations. Uses the default memory
+ pool is not passed.
+ strings_to_categorical (`bool`, defaults to `False`):
+ Encode string (UTF8) and binary types to `pandas.Categorical`.
+ categories (`list`, defaults to `empty`):
+ List of fields that should be returned as `pandas.Categorical`. Only
+ applies to table-like data structures.
+ zero_copy_only (`bool`, defaults to `False`):
+ Raise an `ArrowException` if this function call would require copying
+ the underlying data.
+ integer_object_nulls (`bool`, defaults to `False`):
+ Cast integers with nulls to objects.
+ date_as_object (`bool`, defaults to `True`):
+ Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype.
+ timestamp_as_object (`bool`, defaults to `False`):
+ Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is
+ useful if you have timestamps that don't fit in the normal date
+ range of nanosecond timestamps (1678 CE-2262 CE).
+ If `False`, all timestamps are converted to `datetime64[ns]` dtype.
+ use_threads (`bool`, defaults to `True`):
+ Whether to parallelize the conversion using multiple threads.
+ deduplicate_objects (`bool`, defaults to `False`):
+ Do not create multiple copies Python objects when created, to save
+ on memory use. Conversion will be slower.
+ ignore_metadata (`bool`, defaults to `False`):
+ If `True`, do not use the 'pandas' metadata to reconstruct the
+ DataFrame index, if present.
+ safe (`bool`, defaults to `True`):
+ For certain data types, a cast is needed in order to store the
+ data in a pandas DataFrame or Series (e.g. timestamps are always
+ stored as nanoseconds in pandas). This option controls whether it
+ is a safe cast or not.
+ split_blocks (`bool`, defaults to `False`):
+ If `True`, generate one internal "block" for each column when
+ creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this
+ can temporarily reduce memory note that various pandas operations
+ can trigger "consolidation" which may balloon memory use.
+ self_destruct (`bool`, defaults to `False`):
+ EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow
+ memory while converting the Arrow object to pandas. If you use the
+ object after calling `to_pandas` with this option it will crash your
+ program.
+ types_mapper (`function`, defaults to `None`):
+ A function mapping a pyarrow DataType to a pandas `ExtensionDtype`.
+ This can be used to override the default pandas type for conversion
+ of built-in pyarrow types or in absence of `pandas_metadata` in the
+ Table schema. The function receives a pyarrow DataType and is
+ expected to return a pandas `ExtensionDtype` or `None` if the
+ default conversion should be used for that type. If you have
+ a dictionary mapping, you can pass `dict.get` as function.
+
+ Returns:
+ `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object
+ """
+ return self.table.to_pandas(*args, **kwargs)
+
+ def to_string(self, *args, **kwargs):
+ return self.table.to_string(*args, **kwargs)
+
+ def to_reader(self, max_chunksize: Optional[int] = None):
+ """
+ Convert the Table to a RecordBatchReader.
+
+ Note that this method is zero-copy, it merely exposes the same data under a different API.
+
+ Args:
+ max_chunksize (`int`, defaults to `None`)
+ Maximum size for RecordBatch chunks. Individual chunks may be smaller depending
+ on the chunk layout of individual columns.
+
+ Returns:
+ `pyarrow.RecordBatchReader`
+ """
+ return self.table.to_reader(max_chunksize=max_chunksize)
+
+ def field(self, *args, **kwargs):
+ """
+ Select a schema field by its column name or numeric index.
+
+ Args:
+ i (`Union[int, str]`):
+ The index or name of the field to retrieve.
+
+ Returns:
+ `pyarrow.Field`
+ """
+ return self.table.field(*args, **kwargs)
+
+ def column(self, *args, **kwargs):
+ """
+ Select a column by its column name, or numeric index.
+
+ Args:
+ i (`Union[int, str]`):
+ The index or name of the column to retrieve.
+
+ Returns:
+ `pyarrow.ChunkedArray`
+ """
+ return self.table.column(*args, **kwargs)
+
+ def itercolumns(self, *args, **kwargs):
+ """
+ Iterator over all columns in their numerical order.
+
+ Yields:
+ `pyarrow.ChunkedArray`
+ """
+ return self.table.itercolumns(*args, **kwargs)
+
+ @property
+ def schema(self):
+ """
+ Schema of the table and its columns.
+
+ Returns:
+ `pyarrow.Schema`
+ """
+ return self.table.schema
+
+ @property
+ def columns(self):
+ """
+ List of all columns in numerical order.
+
+ Returns:
+ `List[pa.ChunkedArray]`
+ """
+ return self.table.columns
+
+ @property
+ def num_columns(self):
+ """
+ Number of columns in this table.
+
+ Returns:
+ int
+ """
+ return self.table.num_columns
+
+ @property
+ def num_rows(self):
+ """
+ Number of rows in this table.
+
+ Due to the definition of a table, all columns have the same number of
+ rows.
+
+ Returns:
+ int
+ """
+ return self.table.num_rows
+
+ @property
+ def shape(self):
+ """
+ Dimensions of the table: (#rows, #columns).
+
+ Returns:
+ `(int, int)`: Number of rows and number of columns.
+ """
+ return self.table.shape
+
+ @property
+ def nbytes(self):
+ """
+ Total number of bytes consumed by the elements of the table.
+ """
+ return self.table.nbytes
+
+ @property
+ def column_names(self):
+ """
+ Names of the table's columns.
+ """
+ return self.table.column_names
+
+ def __eq__(self, other):
+ return self.equals(other)
+
+ def __getitem__(self, i):
+ return self.table[i]
+
+ def __len__(self):
+ return len(self.table)
+
+ def __repr__(self):
+ return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__)
+
+ def __str__(self):
+ return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__)
+
+ def slice(self, *args, **kwargs):
+ """
+ Compute zero-copy slice of this Table.
+
+ Args:
+ offset (`int`, defaults to `0`):
+ Offset from start of table to slice.
+ length (`int`, defaults to `None`):
+ Length of slice (default is until end of table starting from
+ offset).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ raise NotImplementedError()
+
+ def filter(self, *args, **kwargs):
+ """
+ Select records from a Table. See `pyarrow.compute.filter` for full usage.
+ """
+ raise NotImplementedError()
+
+ def flatten(self, *args, **kwargs):
+ """
+ Flatten this Table. Each column with a struct type is flattened
+ into one column per struct field. Other columns are left unchanged.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ raise NotImplementedError()
+
+ def combine_chunks(self, *args, **kwargs):
+ """
+ Make a new table by combining the chunks this table has.
+
+ All the underlying chunks in the `ChunkedArray` of each column are
+ concatenated into zero or one chunk.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ raise NotImplementedError()
+
+ def cast(self, *args, **kwargs):
+ """
+ Cast table values to another schema.
+
+ Args:
+ target_schema (`Schema`):
+ Schema to cast to, the names and order of fields must match.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ raise NotImplementedError()
+
+ def replace_schema_metadata(self, *args, **kwargs):
+ """
+ EXPERIMENTAL: Create shallow copy of table by replacing schema
+ key-value metadata with the indicated new metadata (which may be None,
+ which deletes any existing metadata
+
+ Args:
+ metadata (`dict`, defaults to `None`):
+
+ Returns:
+ `datasets.table.Table`: shallow_copy
+ """
+ raise NotImplementedError()
+
+ def add_column(self, *args, **kwargs):
+ """
+ Add column to Table at position.
+
+ A new table is returned with the column added, the original table
+ object is left unchanged.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ raise NotImplementedError()
+
+ def append_column(self, *args, **kwargs):
+ """
+ Append column at end of columns.
+
+ Args:
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ raise NotImplementedError()
+
+ def remove_column(self, *args, **kwargs):
+ """
+ Create new Table with the indicated column removed.
+
+ Args:
+ i (`int`):
+ Index of column to remove.
+
+ Returns:
+ `datasets.table.Table`: New table without the column.
+ """
+ raise NotImplementedError()
+
+ def set_column(self, *args, **kwargs):
+ """
+ Replace column in Table at position.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column set.
+ """
+ raise NotImplementedError()
+
+ def rename_columns(self, *args, **kwargs):
+ """
+ Create new table with columns renamed to provided names.
+ """
+ raise NotImplementedError()
+
+ def drop(self, *args, **kwargs):
+ """
+ Drop one or more columns and return a new table.
+
+ Args:
+ columns (`List[str]`):
+ List of field names referencing existing columns.
+
+ Raises:
+ `KeyError` : if any of the passed columns name are not existing.
+
+ Returns:
+ `datasets.table.Table`: New table without the columns.
+ """
+ raise NotImplementedError()
+
+ def select(self, *args, **kwargs):
+ """
+ Select columns of the table.
+
+ Returns a new table with the specified columns, and metadata preserved.
+
+ Args:
+ columns (:obj:`Union[List[str], List[int]]`):
+ The column names or integer indices to select.
+
+ Returns:
+ `datasets.table.Table`: table with only a subset of the columns
+ """
+ raise NotImplementedError()
+
+
+class TableBlock(Table):
+ """
+ `TableBlock` is the allowed class inside a `ConcanetationTable`.
+ Only `MemoryMappedTable` and `InMemoryTable` are `TableBlock`.
+ This is because we don't want a `ConcanetationTable` made out of other `ConcanetationTables`.
+ """
+
+ pass
+
+
+class InMemoryTable(TableBlock):
+ """
+ The table is said in-memory when it is loaded into the user's RAM.
+
+ Pickling it does copy all the data using memory.
+ Its implementation is simple and uses the underlying pyarrow Table methods directly.
+
+ This is different from the `MemoryMapped` table, for which pickling doesn't copy all the
+ data in memory. For a `MemoryMapped`, unpickling instead reloads the table from the disk.
+
+ `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for
+ data bigger than memory or when you want the memory footprint of your application to
+ stay low.
+ """
+
+ @classmethod
+ def from_file(cls, filename: str):
+ table = _in_memory_arrow_table_from_file(filename)
+ return cls(table)
+
+ @classmethod
+ def from_buffer(cls, buffer: pa.Buffer):
+ table = _in_memory_arrow_table_from_buffer(buffer)
+ return cls(table)
+
+ @classmethod
+ def from_pandas(cls, *args, **kwargs):
+ """
+ Convert pandas.DataFrame to an Arrow Table.
+
+ The column types in the resulting Arrow Table are inferred from the
+ dtypes of the pandas.Series in the DataFrame. In the case of non-object
+ Series, the NumPy dtype is translated to its Arrow equivalent. In the
+ case of `object`, we need to guess the datatype by looking at the
+ Python objects in this Series.
+
+ Be aware that Series of the `object` dtype don't carry enough
+ information to always lead to a meaningful Arrow type. In the case that
+ we cannot infer a type, e.g. because the DataFrame is of length 0 or
+ the Series only contains `None/nan` objects, the type is set to
+ null. This behavior can be avoided by constructing an explicit schema
+ and passing it to this function.
+
+ Args:
+ df (`pandas.DataFrame`):
+ schema (`pyarrow.Schema`, *optional*):
+ The expected schema of the Arrow Table. This can be used to
+ indicate the type of columns if we cannot infer it automatically.
+ If passed, the output will have exactly this schema. Columns
+ specified in the schema that are not found in the DataFrame columns
+ or its index will raise an error. Additional columns or index
+ levels in the DataFrame which are not specified in the schema will
+ be ignored.
+ preserve_index (`bool`, *optional*):
+ Whether to store the index as an additional column in the resulting
+ `Table`. The default of None will store the index as a column,
+ except for RangeIndex which is stored as metadata only. Use
+ `preserve_index=True` to force it to be stored as a column.
+ nthreads (`int`, defaults to `None` (may use up to system CPU count threads))
+ If greater than 1, convert columns to Arrow in parallel using
+ indicated number of threads.
+ columns (`List[str]`, *optional*):
+ List of column to be converted. If `None`, use all columns.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions,
+
+ Returns:
+ `datasets.table.Table`:
+
+ Examples:
+ ```python
+ >>> import pandas as pd
+ >>> import pyarrow as pa
+ >>> df = pd.DataFrame({
+ ... 'int': [1, 2],
+ ... 'str': ['a', 'b']
+ ... })
+ >>> pa.Table.from_pandas(df)
+
+ ```
+ """
+ return cls(pa.Table.from_pandas(*args, **kwargs))
+
+ @classmethod
+ def from_arrays(cls, *args, **kwargs):
+ """
+ Construct a Table from Arrow arrays.
+
+ Args:
+ arrays (`List[Union[pyarrow.Array, pyarrow.ChunkedArray]]`):
+ Equal-length arrays that should form the table.
+ names (`List[str]`, *optional*):
+ Names for the table columns. If not passed, schema must be passed.
+ schema (`Schema`, defaults to `None`):
+ Schema for the created table. If not passed, names must be passed.
+ metadata (`Union[dict, Mapping]`, defaults to `None`):
+ Optional metadata for the schema (if inferred).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return cls(pa.Table.from_arrays(*args, **kwargs))
+
+ @classmethod
+ def from_pydict(cls, *args, **kwargs):
+ """
+ Construct a Table from Arrow arrays or columns.
+
+ Args:
+ mapping (`Union[dict, Mapping]`):
+ A mapping of strings to Arrays or Python lists.
+ schema (`Schema`, defaults to `None`):
+ If not passed, will be inferred from the Mapping values
+ metadata (`Union[dict, Mapping]`, defaults to `None`):
+ Optional metadata for the schema (if inferred).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return cls(pa.Table.from_pydict(*args, **kwargs))
+
+ @classmethod
+ def from_pylist(cls, mapping, *args, **kwargs):
+ """
+ Construct a Table from list of rows / dictionaries.
+
+ Args:
+ mapping (`List[dict]`):
+ A mapping of strings to row values.
+ schema (`Schema`, defaults to `None`):
+ If not passed, will be inferred from the Mapping values
+ metadata (`Union[dict, Mapping]`, defaults to `None`):
+ Optional metadata for the schema (if inferred).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return cls(pa.Table.from_pylist(mapping, *args, **kwargs))
+
+ @classmethod
+ def from_batches(cls, *args, **kwargs):
+ """
+ Construct a Table from a sequence or iterator of Arrow `RecordBatches`.
+
+ Args:
+ batches (`Union[Sequence[pyarrow.RecordBatch], Iterator[pyarrow.RecordBatch]]`):
+ Sequence of `RecordBatch` to be converted, all schemas must be equal.
+ schema (`Schema`, defaults to `None`):
+ If not passed, will be inferred from the first `RecordBatch`.
+
+ Returns:
+ `datasets.table.Table`:
+ """
+ return cls(pa.Table.from_batches(*args, **kwargs))
+
+ def slice(self, offset=0, length=None):
+ """
+ Compute zero-copy slice of this Table.
+
+ Args:
+ offset (`int`, defaults to `0`):
+ Offset from start of table to slice.
+ length (`int`, defaults to `None`):
+ Length of slice (default is until end of table starting from
+ offset).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ # Use fast slicing here
+ return InMemoryTable(self.fast_slice(offset=offset, length=length))
+
+ def filter(self, *args, **kwargs):
+ """
+ Select records from a Table. See `pyarrow.compute.filter` for full usage.
+ """
+ return InMemoryTable(self.table.filter(*args, **kwargs))
+
+ def flatten(self, *args, **kwargs):
+ """
+ Flatten this Table. Each column with a struct type is flattened
+ into one column per struct field. Other columns are left unchanged.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return InMemoryTable(table_flatten(self.table, *args, **kwargs))
+
+ def combine_chunks(self, *args, **kwargs):
+ """
+ Make a new table by combining the chunks this table has.
+
+ All the underlying chunks in the `ChunkedArray` of each column are
+ concatenated into zero or one chunk.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return InMemoryTable(self.table.combine_chunks(*args, **kwargs))
+
+ def cast(self, *args, **kwargs):
+ """
+ Cast table values to another schema.
+
+ Args:
+ target_schema (`Schema`):
+ Schema to cast to, the names and order of fields must match.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return InMemoryTable(table_cast(self.table, *args, **kwargs))
+
+ def replace_schema_metadata(self, *args, **kwargs):
+ """
+ EXPERIMENTAL: Create shallow copy of table by replacing schema
+ key-value metadata with the indicated new metadata (which may be `None`,
+ which deletes any existing metadata).
+
+ Args:
+ metadata (`dict`, defaults to `None`):
+
+ Returns:
+ `datasets.table.Table`: shallow_copy
+ """
+ return InMemoryTable(self.table.replace_schema_metadata(*args, **kwargs))
+
+ def add_column(self, *args, **kwargs):
+ """
+ Add column to Table at position.
+
+ A new table is returned with the column added, the original table
+ object is left unchanged.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ return InMemoryTable(self.table.add_column(*args, **kwargs))
+
+ def append_column(self, *args, **kwargs):
+ """
+ Append column at end of columns.
+
+ Args:
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column added.
+ """
+ return InMemoryTable(self.table.append_column(*args, **kwargs))
+
+ def remove_column(self, *args, **kwargs):
+ """
+ Create new Table with the indicated column removed.
+
+ Args:
+ i (`int`):
+ Index of column to remove.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the column.
+ """
+ return InMemoryTable(self.table.remove_column(*args, **kwargs))
+
+ def set_column(self, *args, **kwargs):
+ """
+ Replace column in Table at position.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column set.
+ """
+ return InMemoryTable(self.table.set_column(*args, **kwargs))
+
+ def rename_columns(self, *args, **kwargs):
+ """
+ Create new table with columns renamed to provided names.
+ """
+ return InMemoryTable(self.table.rename_columns(*args, **kwargs))
+
+ def drop(self, *args, **kwargs):
+ """
+ Drop one or more columns and return a new table.
+
+ Args:
+ columns (`List[str]`):
+ List of field names referencing existing columns.
+
+ Raises:
+ `KeyError` : if any of the passed columns name are not existing.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the columns.
+ """
+ return InMemoryTable(self.table.drop(*args, **kwargs))
+
+ def select(self, *args, **kwargs):
+ """
+ Select columns of the table.
+
+ Returns a new table with the specified columns, and metadata preserved.
+
+ Args:
+ columns (:obj:`Union[List[str], List[int]]`):
+ The column names or integer indices to select.
+
+ Returns:
+ :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
+ """
+ return InMemoryTable(self.table.select(*args, **kwargs))
+
+
+# The MemoryMappedTable needs replays to properly reload tables from the disk
+Replay = Tuple[str, tuple, dict]
+
+
+class MemoryMappedTable(TableBlock):
+ """
+ The table is said memory mapped when it doesn't use the user's RAM but loads the data
+ from the disk instead.
+
+ Pickling it doesn't copy the data into memory.
+ Instead, only the path to the memory mapped arrow file is pickled, as well as the list
+ of transforms to "replay" when reloading the table from the disk.
+
+ Its implementation requires to store an history of all the transforms that were applied
+ to the underlying pyarrow Table, so that they can be "replayed" when reloading the Table
+ from the disk.
+
+ This is different from the `InMemoryTable` table, for which pickling does copy all the
+ data in memory.
+
+ `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for
+ data bigger than memory or when you want the memory footprint of your application to
+ stay low.
+ """
+
+ def __init__(self, table: pa.Table, path: str, replays: Optional[List[Replay]] = None):
+ super().__init__(table)
+ self.path = os.path.abspath(path)
+ self.replays: List[Replay] = replays if replays is not None else []
+
+ @classmethod
+ def from_file(cls, filename: str, replays=None):
+ table = _memory_mapped_arrow_table_from_file(filename)
+ table = cls._apply_replays(table, replays)
+ return cls(table, filename, replays)
+
+ def __getstate__(self):
+ return {"path": self.path, "replays": self.replays}
+
+ def __setstate__(self, state):
+ path = state["path"]
+ replays = state["replays"]
+ table = _memory_mapped_arrow_table_from_file(path)
+ table = self._apply_replays(table, replays)
+ MemoryMappedTable.__init__(self, table, path=path, replays=replays)
+
+ @staticmethod
+ def _apply_replays(table: pa.Table, replays: Optional[List[Replay]] = None) -> pa.Table:
+ if replays is not None:
+ for name, args, kwargs in replays:
+ if name == "cast":
+ table = table_cast(table, *args, **kwargs)
+ elif name == "flatten":
+ table = table_flatten(table, *args, **kwargs)
+ else:
+ table = getattr(table, name)(*args, **kwargs)
+ return table
+
+ def _append_replay(self, replay: Replay) -> List[Replay]:
+ replays = copy.deepcopy(self.replays)
+ replays.append(replay)
+ return replays
+
+ def slice(self, offset=0, length=None):
+ """
+ Compute zero-copy slice of this Table.
+
+ Args:
+ offset (`int`, defaults to `0`):
+ Offset from start of table to slice.
+ length (`int`, defaults to `None`):
+ Length of slice (default is until end of table starting from
+ offset).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ replay = ("slice", (offset, length), {})
+ replays = self._append_replay(replay)
+ # Use fast slicing here
+ return MemoryMappedTable(self.fast_slice(offset=offset, length=length), self.path, replays)
+
+ def filter(self, *args, **kwargs):
+ """
+ Select records from a Table. See `pyarrow.compute.filter` for full usage.
+ """
+ replay = ("filter", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.filter(*args, **kwargs), self.path, replays)
+
+ def flatten(self, *args, **kwargs):
+ """
+ Flatten this Table. Each column with a struct type is flattened
+ into one column per struct field. Other columns are left unchanged.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ replay = ("flatten", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(table_flatten(self.table, *args, **kwargs), self.path, replays)
+
+ def combine_chunks(self, *args, **kwargs):
+ """
+ Make a new table by combining the chunks this table has.
+
+ All the underlying chunks in the ChunkedArray of each column are
+ concatenated into zero or one chunk.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ replay = ("combine_chunks", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.combine_chunks(*args, **kwargs), self.path, replays)
+
+ def cast(self, *args, **kwargs):
+ """
+ Cast table values to another schema
+
+ Args:
+ target_schema (`Schema`):
+ Schema to cast to, the names and order of fields must match.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ replay = ("cast", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(table_cast(self.table, *args, **kwargs), self.path, replays)
+
+ def replace_schema_metadata(self, *args, **kwargs):
+ """
+ EXPERIMENTAL: Create shallow copy of table by replacing schema
+ key-value metadata with the indicated new metadata (which may be None,
+ which deletes any existing metadata.
+
+ Args:
+ metadata (`dict`, defaults to `None`):
+
+ Returns:
+ `datasets.table.Table`: shallow_copy
+ """
+ replay = ("replace_schema_metadata", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.replace_schema_metadata(*args, **kwargs), self.path, replays)
+
+ def add_column(self, *args, **kwargs):
+ """
+ Add column to Table at position.
+
+ A new table is returned with the column added, the original table
+ object is left unchanged.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ replay = ("add_column", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.add_column(*args, **kwargs), self.path, replays)
+
+ def append_column(self, *args, **kwargs):
+ """
+ Append column at end of columns.
+
+ Args:
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column added.
+ """
+ replay = ("append_column", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.append_column(*args, **kwargs), self.path, replays)
+
+ def remove_column(self, *args, **kwargs):
+ """
+ Create new Table with the indicated column removed.
+
+ Args:
+ i (`int`):
+ Index of column to remove.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the column.
+ """
+ replay = ("remove_column", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.remove_column(*args, **kwargs), self.path, replays)
+
+ def set_column(self, *args, **kwargs):
+ """
+ Replace column in Table at position.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column set.
+ """
+ replay = ("set_column", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.set_column(*args, **kwargs), self.path, replays)
+
+ def rename_columns(self, *args, **kwargs):
+ """
+ Create new table with columns renamed to provided names.
+ """
+ replay = ("rename_columns", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.rename_columns(*args, **kwargs), self.path, replays)
+
+ def drop(self, *args, **kwargs):
+ """
+ Drop one or more columns and return a new table.
+
+ Args:
+ columns (`List[str]`):
+ List of field names referencing existing columns.
+
+ Raises:
+ `KeyError` : if any of the passed columns name are not existing.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the columns.
+ """
+ replay = ("drop", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.drop(*args, **kwargs), self.path, replays)
+
+ def select(self, *args, **kwargs):
+ """
+ Select columns of the table.
+
+ Returns a new table with the specified columns, and metadata preserved.
+
+ Args:
+ columns (:obj:`Union[List[str], List[int]]`):
+ The column names or integer indices to select.
+
+ Returns:
+ :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
+ """
+ replay = ("select", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.select(*args, **kwargs), self.path, replays)
+
+
+# A ConcatenationTable is the concatenation of several tables.
+# The ``blocks`` attributes stores a list of list of blocks.
+# The first axis concatenates the tables along the axis 0 (it appends rows),
+# while the second axis concatenates tables along the axis 1 (it appends columns).
+TableBlockContainer = TypeVar("TableBlockContainer", TableBlock, List[TableBlock], List[List[TableBlock]])
+
+
+class ConcatenationTable(Table):
+ """
+ The table comes from the concatenation of several tables called blocks.
+ It enables concatenation on both axis 0 (append rows) and axis 1 (append columns).
+
+ The underlying tables are called "blocks" and can be either `InMemoryTable`
+ or `MemoryMappedTable` objects.
+ This allows to combine tables that come from memory or that are memory mapped.
+ When a `ConcatenationTable` is pickled, then each block is pickled:
+ - the `InMemoryTable` objects are pickled by copying all the data in memory.
+ - the MemoryMappedTable objects are pickled without copying the data into memory.
+ Instead, only the path to the memory mapped arrow file is pickled, as well as the list
+ of transforms to "replays" when reloading the table from the disk.
+
+ Its implementation requires to store each block separately.
+ The `blocks` attributes stores a list of list of blocks.
+ The first axis concatenates the tables along the axis 0 (it appends rows),
+ while the second axis concatenates tables along the axis 1 (it appends columns).
+
+ If some columns are missing when concatenating on axis 0, they are filled with null values.
+ This is done using `pyarrow.concat_tables(tables, promote=True)`.
+
+ You can access the fully combined table by accessing the `ConcatenationTable.table` attribute,
+ and the blocks by accessing the `ConcatenationTable.blocks` attribute.
+ """
+
+ def __init__(self, table: pa.Table, blocks: List[List[TableBlock]]):
+ super().__init__(table)
+ self.blocks = blocks
+ # Check that all the blocks have the right type.
+ # Only InMemoryTable and MemoryMappedTable are allowed.
+ for subtables in blocks:
+ for subtable in subtables:
+ if not isinstance(subtable, TableBlock):
+ raise TypeError(
+ "The blocks of a ConcatenationTable must be InMemoryTable or MemoryMappedTable objects"
+ f", but got {subtable}."
+ )
+
+ def __getstate__(self):
+ return {"blocks": self.blocks, "schema": self.table.schema}
+
+ def __setstate__(self, state):
+ blocks = state["blocks"]
+ schema = state["schema"]
+ table = self._concat_blocks_horizontally_and_vertically(blocks)
+ if schema is not None and table.schema != schema:
+ # We fix the columns by concatenating with an empty table with the right columns
+ empty_table = pa.Table.from_batches([], schema=schema)
+ # we set promote=True to fill missing columns with null values
+ if config.PYARROW_VERSION.major < 14:
+ table = pa.concat_tables([table, empty_table], promote=True)
+ else:
+ table = pa.concat_tables([table, empty_table], promote_options="default")
+ ConcatenationTable.__init__(self, table, blocks=blocks)
+
+ @staticmethod
+ def _concat_blocks(blocks: List[Union[TableBlock, pa.Table]], axis: int = 0) -> pa.Table:
+ pa_tables = [table.table if hasattr(table, "table") else table for table in blocks]
+ if axis == 0:
+ # we set promote=True to fill missing columns with null values
+ if config.PYARROW_VERSION.major < 14:
+ return pa.concat_tables(pa_tables, promote=True)
+ else:
+ return pa.concat_tables(pa_tables, promote_options="default")
+ elif axis == 1:
+ for i, table in enumerate(pa_tables):
+ if i == 0:
+ pa_table = table
+ else:
+ for name, col in zip(table.column_names, table.columns):
+ pa_table = pa_table.append_column(name, col)
+ return pa_table
+ else:
+ raise ValueError("'axis' must be either 0 or 1")
+
+ @classmethod
+ def _concat_blocks_horizontally_and_vertically(cls, blocks: List[List[TableBlock]]) -> pa.Table:
+ pa_tables_to_concat_vertically = []
+ for i, tables in enumerate(blocks):
+ if not tables:
+ continue
+ pa_table_horizontally_concatenated = cls._concat_blocks(tables, axis=1)
+ pa_tables_to_concat_vertically.append(pa_table_horizontally_concatenated)
+ return cls._concat_blocks(pa_tables_to_concat_vertically, axis=0)
+
+ @classmethod
+ def _merge_blocks(cls, blocks: TableBlockContainer, axis: Optional[int] = None) -> TableBlockContainer:
+ if axis is not None:
+ merged_blocks = []
+ for is_in_memory, block_group in groupby(blocks, key=lambda x: isinstance(x, InMemoryTable)):
+ if is_in_memory:
+ block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))]
+ merged_blocks += list(block_group)
+ else: # both
+ merged_blocks = [cls._merge_blocks(row_block, axis=1) for row_block in blocks]
+ if all(len(row_block) == 1 for row_block in merged_blocks):
+ merged_blocks = cls._merge_blocks(
+ [block for row_block in merged_blocks for block in row_block], axis=0
+ )
+ return merged_blocks
+
+ @classmethod
+ def _consolidate_blocks(cls, blocks: TableBlockContainer) -> TableBlockContainer:
+ if isinstance(blocks, TableBlock):
+ return blocks
+ elif isinstance(blocks[0], TableBlock):
+ return cls._merge_blocks(blocks, axis=0)
+ else:
+ return cls._merge_blocks(blocks)
+
+ @classmethod
+ def from_blocks(cls, blocks: TableBlockContainer) -> "ConcatenationTable":
+ blocks = cls._consolidate_blocks(blocks)
+ if isinstance(blocks, TableBlock):
+ table = blocks
+ return cls(table.table, [[table]])
+ elif isinstance(blocks[0], TableBlock):
+ table = cls._concat_blocks(blocks, axis=0)
+ blocks = [[t] for t in blocks]
+ return cls(table, blocks)
+ else:
+ table = cls._concat_blocks_horizontally_and_vertically(blocks)
+ return cls(table, blocks)
+
+ @classmethod
+ def from_tables(cls, tables: List[Union[pa.Table, Table]], axis: int = 0) -> "ConcatenationTable":
+ """Create `ConcatenationTable` from list of tables.
+
+ Args:
+ tables (list of `Table` or list of `pyarrow.Table`):
+ List of tables.
+ axis (`{0, 1}`, defaults to `0`, meaning over rows):
+ Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
+ (horizontally).
+
+
+ """
+
+ def to_blocks(table: Union[pa.Table, Table]) -> List[List[TableBlock]]:
+ if isinstance(table, pa.Table):
+ return [[InMemoryTable(table)]]
+ elif isinstance(table, ConcatenationTable):
+ return copy.deepcopy(table.blocks)
+ else:
+ return [[table]]
+
+ def _slice_row_block(row_block: List[TableBlock], length: int) -> Tuple[List[TableBlock], List[TableBlock]]:
+ sliced = [table.slice(0, length) for table in row_block]
+ remainder = [table.slice(length, len(row_block[0]) - length) for table in row_block]
+ return sliced, remainder
+
+ def _split_both_like(
+ result: List[List[TableBlock]], blocks: List[List[TableBlock]]
+ ) -> Tuple[List[List[TableBlock]], List[List[TableBlock]]]:
+ """
+ Make sure each row_block contain the same num_rows to be able to concatenate them on axis=1.
+
+ To do so, we modify both blocks sets to have the same row_blocks boundaries.
+ For example, if `result` has 2 row_blocks of 3 rows and `blocks` has 3 row_blocks of 2 rows,
+ we modify both to have 4 row_blocks of size 2, 1, 1 and 2:
+
+ [ x x x | x x x ]
+ + [ y y | y y | y y ]
+ -----------------------------
+ = [ x x | x | x | x x ]
+ [ y y | y | y | y y ]
+
+ """
+ result, blocks = list(result), list(blocks)
+ new_result, new_blocks = [], []
+ while result and blocks:
+ # we slice the longest row block to save two row blocks of same length
+ # and we replace the long row block by its remainder if necessary
+ if len(result[0][0]) > len(blocks[0][0]):
+ new_blocks.append(blocks[0])
+ sliced, result[0] = _slice_row_block(result[0], len(blocks.pop(0)[0]))
+ new_result.append(sliced)
+ elif len(result[0][0]) < len(blocks[0][0]):
+ new_result.append(result[0])
+ sliced, blocks[0] = _slice_row_block(blocks[0], len(result.pop(0)[0]))
+ new_blocks.append(sliced)
+ else:
+ new_result.append(result.pop(0))
+ new_blocks.append(blocks.pop(0))
+ if result or blocks:
+ raise ValueError("Failed to concatenate on axis=1 because tables don't have the same number of rows")
+ return new_result, new_blocks
+
+ def _extend_blocks(
+ result: List[List[TableBlock]], blocks: List[List[TableBlock]], axis: int = 0
+ ) -> List[List[TableBlock]]:
+ if axis == 0:
+ result.extend(blocks)
+ elif axis == 1:
+ # We make sure each row_block have the same num_rows
+ result, blocks = _split_both_like(result, blocks)
+ for i, row_block in enumerate(blocks):
+ result[i].extend(row_block)
+ return result
+
+ blocks = to_blocks(tables[0])
+ for table in tables[1:]:
+ table_blocks = to_blocks(table)
+ blocks = _extend_blocks(blocks, table_blocks, axis=axis)
+ return cls.from_blocks(blocks)
+
+ @property
+ def _slices(self):
+ offset = 0
+ for tables in self.blocks:
+ length = len(tables[0])
+ yield (offset, length)
+ offset += length
+
+ def slice(self, offset=0, length=None):
+ """
+ Compute zero-copy slice of this Table.
+
+ Args:
+ offset (`int`, defaults to `0`):
+ Offset from start of table to slice.
+ length (`int`, defaults to `None`):
+ Length of slice (default is until end of table starting from
+ offset).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ table = self.table.slice(offset, length=length)
+ length = length if length is not None else self.num_rows - offset
+ blocks = []
+ for tables in self.blocks:
+ n_rows = len(tables[0])
+ if length == 0:
+ break
+ elif n_rows <= offset:
+ offset = offset - n_rows
+ elif n_rows <= offset + length:
+ blocks.append([t.slice(offset) for t in tables])
+ length, offset = length + offset - n_rows, 0
+ else:
+ blocks.append([t.slice(offset, length) for t in tables])
+ length, offset = 0, 0
+ return ConcatenationTable(table, blocks)
+
+ def filter(self, mask, *args, **kwargs):
+ """
+ Select records from a Table. See `pyarrow.compute.filter` for full usage.
+ """
+ table = self.table.filter(mask, *args, **kwargs)
+ blocks = []
+ for (offset, length), tables in zip(self._slices, self.blocks):
+ submask = mask.slice(offset, length)
+ blocks.append([t.filter(submask, *args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+ def flatten(self, *args, **kwargs):
+ """
+ Flatten this Table. Each column with a struct type is flattened
+ into one column per struct field. Other columns are left unchanged.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ table = table_flatten(self.table, *args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.flatten(*args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+ def combine_chunks(self, *args, **kwargs):
+ """
+ Make a new table by combining the chunks this table has.
+
+ All the underlying chunks in the `ChunkedArray` of each column are
+ concatenated into zero or one chunk.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ table = self.table.combine_chunks(*args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.combine_chunks(*args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+ def cast(self, target_schema, *args, **kwargs):
+ """
+ Cast table values to another schema.
+
+ Args:
+ target_schema (`Schema`):
+ Schema to cast to, the names and order of fields must match.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ from .features import Features
+
+ table = table_cast(self.table, target_schema, *args, **kwargs)
+ target_features = Features.from_arrow_schema(target_schema)
+ blocks = []
+ for subtables in self.blocks:
+ new_tables = []
+ fields = list(target_schema)
+ for subtable in subtables:
+ subfields = []
+ for name in subtable.column_names:
+ subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name)))
+ subfeatures = Features({subfield.name: target_features[subfield.name] for subfield in subfields})
+ subschema = subfeatures.arrow_schema
+ new_tables.append(subtable.cast(subschema, *args, **kwargs))
+ blocks.append(new_tables)
+ return ConcatenationTable(table, blocks)
+
+ def replace_schema_metadata(self, *args, **kwargs):
+ """
+ EXPERIMENTAL: Create shallow copy of table by replacing schema
+ key-value metadata with the indicated new metadata (which may be `None`,
+ which deletes any existing metadata).
+
+ Args:
+ metadata (`dict`, defaults to `None`):
+
+ Returns:
+ `datasets.table.Table`: shallow_copy
+ """
+ table = self.table.replace_schema_metadata(*args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.replace_schema_metadata(*args, **kwargs) for t in tables])
+ return ConcatenationTable(table, self.blocks)
+
+ def add_column(self, *args, **kwargs):
+ """
+ Add column to Table at position.
+
+ A new table is returned with the column added, the original table
+ object is left unchanged.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ raise NotImplementedError()
+
+ def append_column(self, *args, **kwargs):
+ """
+ Append column at end of columns.
+
+ Args:
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column added.
+ """
+ raise NotImplementedError()
+
+ def remove_column(self, i, *args, **kwargs):
+ """
+ Create new Table with the indicated column removed.
+
+ Args:
+ i (`int`):
+ Index of column to remove.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the column.
+ """
+ table = self.table.remove_column(i, *args, **kwargs)
+ name = self.table.column_names[i]
+ blocks = []
+ for tables in self.blocks:
+ blocks.append(
+ [
+ t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t
+ for t in tables
+ ]
+ )
+ return ConcatenationTable(table, blocks)
+
+ def set_column(self, *args, **kwargs):
+ """
+ Replace column in Table at position.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column set.
+ """
+ raise NotImplementedError()
+
+ def rename_columns(self, names, *args, **kwargs):
+ """
+ Create new table with columns renamed to provided names.
+ """
+ table = self.table.rename_columns(names, *args, **kwargs)
+ names = dict(zip(self.table.column_names, names))
+ blocks = []
+ for tables in self.blocks:
+ blocks.append(
+ [t.rename_columns([names[name] for name in t.column_names], *args, **kwargs) for t in tables]
+ )
+ return ConcatenationTable(table, blocks)
+
+ def drop(self, columns, *args, **kwargs):
+ """
+ Drop one or more columns and return a new table.
+
+ Args:
+ columns (`List[str]`):
+ List of field names referencing existing columns.
+
+ Raises:
+ `KeyError` : if any of the passed columns name are not existing.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the columns.
+ """
+ table = self.table.drop(columns, *args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.drop([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+ def select(self, columns, *args, **kwargs):
+ """
+ Select columns of the table.
+
+ Returns a new table with the specified columns, and metadata preserved.
+
+ Args:
+ columns (:obj:`Union[List[str], List[int]]`):
+ The column names or integer indices to select.
+
+ Returns:
+ :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
+ """
+ table = self.table.select(columns, *args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.select([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+
+def concat_tables(tables: List[Table], axis: int = 0) -> Table:
+ """
+ Concatenate tables.
+
+ Args:
+ tables (list of `Table`):
+ List of tables to be concatenated.
+ axis (`{0, 1}`, defaults to `0`, meaning over rows):
+ Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
+ (horizontally).
+
+
+ Returns:
+ `datasets.table.Table`:
+ If the number of input tables is > 1, then the returned table is a `datasets.table.ConcatenationTable`.
+ Otherwise if there's only one table, it is returned as is.
+ """
+ tables = list(tables)
+ if len(tables) == 1:
+ return tables[0]
+ return ConcatenationTable.from_tables(tables, axis=axis)
+
+
+def list_table_cache_files(table: Table) -> List[str]:
+ """
+ Get the cache files that are loaded by the table.
+ Cache file are used when parts of the table come from the disk via memory mapping.
+
+ Returns:
+ `List[str]`:
+ A list of paths to the cache files loaded by the table.
+ """
+ if isinstance(table, ConcatenationTable):
+ cache_files = []
+ for subtables in table.blocks:
+ for subtable in subtables:
+ cache_files += list_table_cache_files(subtable)
+ return cache_files
+ elif isinstance(table, MemoryMappedTable):
+ return [table.path]
+ else:
+ return []
+
+
+def _wrap_for_chunked_arrays(func):
+ """Apply the function on each chunk of a `pyarrow.ChunkedArray`, or on the array directly"""
+
+ def wrapper(array, *args, **kwargs):
+ if isinstance(array, pa.ChunkedArray):
+ return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
+ else:
+ return func(array, *args, **kwargs)
+
+ return wrapper
+
+
+def _are_list_values_of_length(array: pa.ListArray, length: int) -> bool:
+ """Check if all the sub-lists of a `pa.ListArray` have the specified length."""
+ return pc.all(pc.equal(array.value_lengths(), length)).as_py() or array.null_count == len(array)
+
+
+def _combine_list_array_offsets_with_mask(array: pa.ListArray) -> pa.Array:
+ """Add the null bitmap to the offsets of a `pa.ListArray`."""
+ offsets = array.offsets
+ if array.null_count > 0:
+ offsets = pa.concat_arrays(
+ [
+ pc.replace_with_mask(offsets[:-1], array.is_null(), pa.nulls(len(array), pa.int32())),
+ offsets[-1:],
+ ]
+ )
+ return offsets
+
+
+def _storage_type(type: pa.DataType) -> pa.DataType:
+ """Convert a (possibly nested) `pa.ExtensionType` to its storage type."""
+ if isinstance(type, pa.ExtensionType):
+ return _storage_type(type.storage_type)
+ elif isinstance(type, pa.StructType):
+ return pa.struct([pa.field(field.name, _storage_type(field.type)) for field in type])
+ elif isinstance(type, pa.ListType):
+ return pa.list_(_storage_type(type.value_type))
+ elif isinstance(type, pa.FixedSizeListType):
+ return pa.list_(_storage_type(type.value_type), type.list_size)
+ return type
+
+
+@_wrap_for_chunked_arrays
+def array_cast(
+ array: pa.Array, pa_type: pa.DataType, allow_primitive_to_str: bool = True, allow_decimal_to_str: bool = True
+) -> Union[pa.Array, pa.FixedSizeListArray, pa.ListArray, pa.StructArray, pa.ExtensionArray]:
+ """Improved version of `pa.Array.cast`
+
+ It supports casting `pa.StructArray` objects to re-order the fields.
+ It also let you control certain aspects of the casting, e.g. whether
+ to disable casting primitives (`booleans`, `floats` or `ints`) or
+ disable casting decimals to strings.
+
+ Args:
+ array (`pa.Array`):
+ PyArrow array to cast
+ pa_type (`pa.DataType`):
+ Target PyArrow type
+ allow_primitive_to_str (`bool`, defaults to `True`):
+ Whether to allow casting primitives to strings.
+ Defaults to `True`.
+ allow_decimal_to_str (`bool`, defaults to `True`):
+ Whether to allow casting decimals to strings.
+ Defaults to `True`.
+
+ Raises:
+ `pa.ArrowInvalidError`: if the arrow data casting fails
+ `TypeError`: if the target type is not supported according, e.g.
+
+ - if a field is missing
+ - if casting from primitives to strings and `allow_primitive_to_str` is `False`
+ - if casting from decimals to strings and `allow_decimal_to_str` is `False`
+
+ Returns:
+ `List[pyarrow.Array]`: the casted array
+ """
+ _c = partial(array_cast, allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str)
+ if isinstance(array, pa.ExtensionArray):
+ array = array.storage
+ if isinstance(pa_type, pa.ExtensionType):
+ return pa_type.wrap_array(_c(array, pa_type.storage_type))
+ elif array.type == pa_type:
+ return array
+ elif pa.types.is_struct(array.type):
+ if pa.types.is_struct(pa_type) and ({field.name for field in pa_type} == {field.name for field in array.type}):
+ if array.type.num_fields == 0:
+ return array
+ arrays = [_c(array.field(field.name), field.type) for field in pa_type]
+ return pa.StructArray.from_arrays(arrays, fields=list(pa_type), mask=array.is_null())
+ elif pa.types.is_list(array.type):
+ if pa.types.is_fixed_size_list(pa_type):
+ if _are_list_values_of_length(array, pa_type.list_size):
+ if array.null_count > 0:
+ # Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array
+ array_type = array.type
+ storage_type = _storage_type(array_type)
+ if array_type != storage_type:
+ # Temporarily convert to the storage type to support extension types in the slice operation
+ array = _c(array, storage_type)
+ array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True)
+ array = _c(array, array_type)
+ else:
+ array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True)
+ array_values = array.values
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa_type,
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[_c(array_values, pa_type.value_type)],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(
+ _c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null()
+ )
+ else:
+ array_values = array.values[
+ array.offset * pa_type.length : (array.offset + len(array)) * pa_type.length
+ ]
+ return pa.FixedSizeListArray.from_arrays(_c(array_values, pa_type.value_type), pa_type.list_size)
+ elif pa.types.is_list(pa_type):
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type))
+ elif pa.types.is_fixed_size_list(array.type):
+ if pa.types.is_fixed_size_list(pa_type):
+ if pa_type.list_size == array.type.list_size:
+ array_values = array.values[
+ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
+ ]
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa_type,
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[_c(array_values, pa_type.value_type)],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(
+ _c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null()
+ )
+ elif pa.types.is_list(pa_type):
+ array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type), mask=array.is_null())
+ else:
+ if pa.types.is_string(pa_type):
+ if not allow_primitive_to_str and pa.types.is_primitive(array.type):
+ raise TypeError(
+ f"Couldn't cast array of type {array.type} to {pa_type} "
+ f"since allow_primitive_to_str is set to {allow_primitive_to_str} "
+ )
+ if not allow_decimal_to_str and pa.types.is_decimal(array.type):
+ raise TypeError(
+ f"Couldn't cast array of type {array.type} to {pa_type} "
+ f"and allow_decimal_to_str is set to {allow_decimal_to_str}"
+ )
+ if pa.types.is_null(pa_type) and not pa.types.is_null(array.type):
+ raise TypeError(f"Couldn't cast array of type {array.type} to {pa_type}")
+ return array.cast(pa_type)
+ raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{pa_type}")
+
+
+@_wrap_for_chunked_arrays
+def cast_array_to_feature(
+ array: pa.Array, feature: "FeatureType", allow_primitive_to_str: bool = True, allow_decimal_to_str: bool = True
+) -> pa.Array:
+ """Cast an array to the arrow type that corresponds to the requested feature type.
+ For custom features like [`Audio`] or [`Image`], it takes into account the "cast_storage" methods
+ they defined to enable casting from other arrow types.
+
+ Args:
+ array (`pa.Array`):
+ The PyArrow array to cast.
+ feature (`datasets.features.FeatureType`):
+ The target feature type.
+ allow_primitive_to_str (`bool`, defaults to `True`):
+ Whether to allow casting primitives to strings.
+ Defaults to `True`.
+ allow_decimal_to_str (`bool`, defaults to `True`):
+ Whether to allow casting decimals to strings.
+ Defaults to `True`.
+
+ Raises:
+ `pa.ArrowInvalidError`: if the arrow data casting fails
+ `TypeError`: if the target type is not supported according, e.g.
+
+ - if a field is missing
+ - if casting from primitives and `allow_primitive_to_str` is `False`
+ - if casting from decimals and `allow_decimal_to_str` is `False`
+
+ Returns:
+ array (`pyarrow.Array`): the casted array
+ """
+ from .features.features import Sequence, get_nested_type
+
+ _c = partial(
+ cast_array_to_feature,
+ allow_primitive_to_str=allow_primitive_to_str,
+ allow_decimal_to_str=allow_decimal_to_str,
+ )
+
+ if isinstance(array, pa.ExtensionArray):
+ array = array.storage
+ if hasattr(feature, "cast_storage"):
+ return feature.cast_storage(array)
+
+ elif pa.types.is_struct(array.type):
+ # feature must be a dict or Sequence(subfeatures_dict)
+ if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
+ feature = {
+ name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items()
+ }
+ if isinstance(feature, dict) and {field.name for field in array.type} == set(feature):
+ if array.type.num_fields == 0:
+ return array
+ arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()]
+ return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null())
+ elif pa.types.is_list(array.type):
+ # feature must be either [subfeature] or Sequence(subfeature)
+ if isinstance(feature, list):
+ casted_array_values = _c(array.values, feature[0])
+ if casted_array_values.type == array.values.type:
+ return array
+ else:
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ return pa.ListArray.from_arrays(array_offsets, casted_array_values)
+ elif isinstance(feature, Sequence):
+ if feature.length > -1:
+ if _are_list_values_of_length(array, feature.length):
+ if array.null_count > 0:
+ # Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array
+ array_type = array.type
+ storage_type = _storage_type(array_type)
+ if array_type != storage_type:
+ # Temporarily convert to the storage type to support extension types in the slice operation
+ array = array_cast(
+ array,
+ storage_type,
+ allow_primitive_to_str=allow_primitive_to_str,
+ allow_decimal_to_str=allow_decimal_to_str,
+ )
+ array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True)
+ array = array_cast(
+ array,
+ array_type,
+ allow_primitive_to_str=allow_primitive_to_str,
+ allow_decimal_to_str=allow_decimal_to_str,
+ )
+ else:
+ array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True)
+ array_values = array.values
+ casted_array_values = _c(array_values, feature.feature)
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa.list_(casted_array_values.type, feature.length),
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[casted_array_values],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(
+ casted_array_values, feature.length, mask=array.is_null()
+ )
+ else:
+ array_values = array.values[
+ array.offset * feature.length : (array.offset + len(array)) * feature.length
+ ]
+ return pa.FixedSizeListArray.from_arrays(_c(array_values, feature.feature), feature.length)
+ else:
+ casted_array_values = _c(array.values, feature.feature)
+ if casted_array_values.type == array.values.type:
+ return array
+ else:
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ return pa.ListArray.from_arrays(array_offsets, casted_array_values)
+ elif pa.types.is_fixed_size_list(array.type):
+ # feature must be either [subfeature] or Sequence(subfeature)
+ if isinstance(feature, list):
+ array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature[0]), mask=array.is_null())
+ elif isinstance(feature, Sequence):
+ if feature.length > -1:
+ if feature.length == array.type.list_size:
+ array_values = array.values[
+ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
+ ]
+ casted_array_values = _c(array_values, feature.feature)
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa.list_(casted_array_values.type, feature.length),
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[casted_array_values],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(
+ casted_array_values, feature.length, mask=array.is_null()
+ )
+ else:
+ array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature.feature), mask=array.is_null())
+ if pa.types.is_null(array.type):
+ return array_cast(
+ array,
+ get_nested_type(feature),
+ allow_primitive_to_str=allow_primitive_to_str,
+ allow_decimal_to_str=allow_decimal_to_str,
+ )
+ elif not isinstance(feature, (Sequence, dict, list, tuple)):
+ return array_cast(
+ array,
+ feature(),
+ allow_primitive_to_str=allow_primitive_to_str,
+ allow_decimal_to_str=allow_decimal_to_str,
+ )
+ raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{feature}")
+
+
+@_wrap_for_chunked_arrays
+def embed_array_storage(array: pa.Array, feature: "FeatureType"):
+ """Embed data into an arrays's storage.
+ For custom features like Audio or Image, it takes into account the "embed_storage" methods
+ they define to embed external data (e.g. an image file) into an array.
+
+
+
+ Args:
+ array (`pa.Array`):
+ The PyArrow array in which to embed data.
+ feature (`datasets.features.FeatureType`):
+ Array features.
+
+ Raises:
+ `TypeError`: if the target type is not supported according, e.g.
+
+ - if a field is missing
+
+ Returns:
+ array (`pyarrow.Array`): the casted array
+ """
+ from .features import Sequence
+
+ _e = embed_array_storage
+
+ if isinstance(array, pa.ExtensionArray):
+ array = array.storage
+ if hasattr(feature, "embed_storage"):
+ return feature.embed_storage(array)
+ elif pa.types.is_struct(array.type):
+ # feature must be a dict or Sequence(subfeatures_dict)
+ if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
+ feature = {
+ name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items()
+ }
+ if isinstance(feature, dict):
+ arrays = [_e(array.field(name), subfeature) for name, subfeature in feature.items()]
+ return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null())
+ elif pa.types.is_list(array.type):
+ # feature must be either [subfeature] or Sequence(subfeature)
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ if isinstance(feature, list):
+ return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature[0]))
+ if isinstance(feature, Sequence) and feature.length == -1:
+ return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature.feature))
+ elif pa.types.is_fixed_size_list(array.type):
+ # feature must be Sequence(subfeature)
+ if isinstance(feature, Sequence) and feature.length > -1:
+ array_values = array.values[
+ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
+ ]
+ embedded_array_values = _e(array_values, feature.feature)
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa.list_(array_values.type, feature.length),
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[embedded_array_values],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(embedded_array_values, feature.length, mask=array.is_null())
+ if not isinstance(feature, (Sequence, dict, list, tuple)):
+ return array
+ raise TypeError(f"Couldn't embed array of type\n{array.type}\nwith\n{feature}")
+
+
+class CastError(ValueError):
+ """When it's not possible to cast an Arrow table to a specific schema or set of features"""
+
+ def __init__(self, *args, table_column_names: List[str], requested_column_names: List[str]) -> None:
+ super().__init__(*args)
+ self.table_column_names = table_column_names
+ self.requested_column_names = requested_column_names
+
+ def __reduce__(self):
+ # Fix unpickling: TypeError: __init__() missing 2 required keyword-only arguments: 'table_column_names' and 'requested_column_names'
+ return partial(
+ CastError, table_column_names=self.table_column_names, requested_column_names=self.requested_column_names
+ ), ()
+
+ def details(self):
+ new_columns = set(self.table_column_names) - set(self.requested_column_names)
+ missing_columns = set(self.requested_column_names) - set(self.table_column_names)
+ if new_columns and missing_columns:
+ return f"there are {len(new_columns)} new columns ({', '.join(new_columns)}) and {len(missing_columns)} missing columns ({', '.join(missing_columns)})."
+ elif new_columns:
+ return f"there are {len(new_columns)} new columns ({new_columns})"
+ else:
+ return f"there are {len(missing_columns)} missing columns ({missing_columns})"
+
+
+def cast_table_to_features(table: pa.Table, features: "Features"):
+ """Cast a table to the arrow schema that corresponds to the requested features.
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table to cast.
+ features ([`Features`]):
+ Target features.
+
+ Returns:
+ table (`pyarrow.Table`): the casted table
+ """
+ if sorted(table.column_names) != sorted(features):
+ raise CastError(
+ f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match",
+ table_column_names=table.column_names,
+ requested_column_names=list(features),
+ )
+ arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
+ return pa.Table.from_arrays(arrays, schema=features.arrow_schema)
+
+
+def cast_table_to_schema(table: pa.Table, schema: pa.Schema):
+ """Cast a table to the arrow schema. Different from `cast_table_to_features`, this method can preserve nullability.
+
+ Args:
+ table (`pa.Table`):
+ PyArrow table to cast.
+ features ([`Features`]):
+ Target features.
+
+ Returns:
+ `pa.Table`: the casted table
+ """
+ from .features import Features
+
+ features = Features.from_arrow_schema(schema)
+ if sorted(table.column_names) != sorted(features):
+ raise CastError(
+ f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match",
+ table_column_names=table.column_names,
+ requested_column_names=list(features),
+ )
+ arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
+ return pa.Table.from_arrays(arrays, schema=schema)
+
+
+def embed_table_storage(table: pa.Table):
+ """Embed external data into a table's storage.
+
+
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table in which to embed data.
+
+ Returns:
+ table (`pyarrow.Table`): the table with embedded data
+ """
+ from .features.features import Features, require_storage_embed
+
+ features = Features.from_arrow_schema(table.schema)
+ arrays = [
+ embed_array_storage(table[name], feature) if require_storage_embed(feature) else table[name]
+ for name, feature in features.items()
+ ]
+ return pa.Table.from_arrays(arrays, schema=features.arrow_schema)
+
+
+def table_cast(table: pa.Table, schema: pa.Schema):
+ """Improved version of `pa.Table.cast`.
+
+ It supports casting to feature types stored in the schema metadata.
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table to cast.
+ schema (`pyarrow.Schema`):
+ Target PyArrow schema.
+
+ Returns:
+ table (`pyarrow.Table`): the casted table
+ """
+ if table.schema != schema:
+ return cast_table_to_schema(table, schema)
+ elif table.schema.metadata != schema.metadata:
+ return table.replace_schema_metadata(schema.metadata)
+ else:
+ return table
+
+
+def table_flatten(table: pa.Table):
+ """Improved version of `pa.Table.flatten`.
+
+ It behaves as `pa.Table.flatten` in a sense it does 1-step flatten of the columns with a struct type into one column per struct field,
+ but updates the metadata and skips decodable features unless the `decode` attribute of these features is set to False.
+
+ Args:
+ table (`pa.Table`):
+ PyArrow table to flatten.
+
+ Returns:
+ `Table`: the flattened table
+ """
+ from .features import Features
+
+ features = Features.from_arrow_schema(table.schema)
+ if any(hasattr(subfeature, "flatten") and subfeature.flatten() == subfeature for subfeature in features.values()):
+ flat_arrays = []
+ flat_column_names = []
+ for field in table.schema:
+ array = table.column(field.name)
+ subfeature = features[field.name]
+ if pa.types.is_struct(field.type) and (
+ not hasattr(subfeature, "flatten") or subfeature.flatten() != subfeature
+ ):
+ flat_arrays.extend(array.flatten())
+ flat_column_names.extend([f"{field.name}.{subfield.name}" for subfield in field.type])
+ else:
+ flat_arrays.append(array)
+ flat_column_names.append(field.name)
+ flat_table = pa.Table.from_arrays(
+ flat_arrays,
+ names=flat_column_names,
+ )
+ else:
+ flat_table = table.flatten()
+ # Preserve complex types in the metadata
+ flat_features = features.flatten(max_depth=2)
+ flat_features = Features({column_name: flat_features[column_name] for column_name in flat_table.column_names})
+ return flat_table.replace_schema_metadata(flat_features.arrow_schema.metadata)
+
+
+def table_visitor(table: pa.Table, function: Callable[[pa.Array], None]):
+ """Visit all arrays in a table and apply a function to them.
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table to visit.
+ function (`Callable[[pa.Array], None]`):
+ Function to apply to each array.
+ """
+ from .features import Features, Sequence
+
+ features = Features.from_arrow_schema(table.schema)
+
+ def _visit(array, feature):
+ if isinstance(array, pa.ChunkedArray):
+ for chunk in array.chunks:
+ _visit(chunk, feature)
+ else:
+ if isinstance(array, pa.ExtensionArray):
+ array = array.storage
+ function(array, feature)
+ if pa.types.is_struct(array.type) and not hasattr(feature, "cast_storage"):
+ if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
+ feature = {
+ name: Sequence(subfeature, length=feature.length)
+ for name, subfeature in feature.feature.items()
+ }
+ for name, subfeature in feature.items():
+ _visit(array.field(name), subfeature)
+ elif pa.types.is_list(array.type):
+ if isinstance(feature, list):
+ _visit(array.values, feature[0])
+ elif isinstance(feature, Sequence):
+ _visit(array.values, feature.feature)
+
+ for name, feature in features.items():
+ _visit(table[name], feature)
+
+
+def table_iter(table: Table, batch_size: int, drop_last_batch=False) -> Iterator[pa.Table]:
+ """Iterate over sub-tables of size `batch_size`.
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table to iterate over.
+ batch_size (`int`):
+ Size of each sub-table to yield.
+ drop_last_batch (`bool`, defaults to `False`):
+ Drop the last batch if it is smaller than `batch_size`.
+ """
+ chunks_buffer = []
+ chunks_buffer_size = 0
+ for chunk in table.to_reader(max_chunksize=batch_size):
+ if len(chunk) == 0:
+ continue
+ elif chunks_buffer_size + len(chunk) < batch_size:
+ chunks_buffer.append(chunk)
+ chunks_buffer_size += len(chunk)
+ continue
+ elif chunks_buffer_size + len(chunk) == batch_size:
+ chunks_buffer.append(chunk)
+ yield pa.Table.from_batches(chunks_buffer)
+ chunks_buffer = []
+ chunks_buffer_size = 0
+ else:
+ cropped_chunk_length = batch_size - chunks_buffer_size
+ chunks_buffer.append(chunk.slice(0, cropped_chunk_length))
+ yield pa.Table.from_batches(chunks_buffer)
+ chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)]
+ chunks_buffer_size = len(chunk) - cropped_chunk_length
+ if not drop_last_batch and chunks_buffer:
+ yield pa.Table.from_batches(chunks_buffer)
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Bermuda b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Bermuda
new file mode 100644
index 0000000000000000000000000000000000000000..527524ed295aba41b9a0448ffd7993c489a2cb99
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Bermuda differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Canary b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Canary
new file mode 100644
index 0000000000000000000000000000000000000000..f3192156ff043a529461aa9004a8de9dda326f7d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Canary differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Cape_Verde b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Cape_Verde
new file mode 100644
index 0000000000000000000000000000000000000000..0d0d31a2f092d03f8512ed9c34f36a3f3f21209b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Cape_Verde differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faeroe b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faeroe
new file mode 100644
index 0000000000000000000000000000000000000000..4dab7ef0859c244b916d61b7489d7371881e0ca2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faeroe differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faroe b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faroe
new file mode 100644
index 0000000000000000000000000000000000000000..4dab7ef0859c244b916d61b7489d7371881e0ca2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faroe differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Jan_Mayen b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Jan_Mayen
new file mode 100644
index 0000000000000000000000000000000000000000..7f6d958f8630cba512d8e58ca8edfbd516291522
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Jan_Mayen differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Reykjavik b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Reykjavik
new file mode 100644
index 0000000000000000000000000000000000000000..28b32ab2e0b9053f39a91d9f28b6072e41423954
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Reykjavik differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/South_Georgia b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/South_Georgia
new file mode 100644
index 0000000000000000000000000000000000000000..a2b59a9d1088690cb2f9ad9011bfa59e6cb5c658
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/South_Georgia differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Stanley b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Stanley
new file mode 100644
index 0000000000000000000000000000000000000000..1527d0e1a762674748735a95b6c3034d162f4240
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Stanley differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/BajaNorte b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/BajaNorte
new file mode 100644
index 0000000000000000000000000000000000000000..63dfdf48a68d02240737ecd6af081e02eb0b6317
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/BajaNorte differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/BajaSur b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/BajaSur
new file mode 100644
index 0000000000000000000000000000000000000000..06fa22749d0a3157b5a693f0797086f78e1201c5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/BajaSur differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/General b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/General
new file mode 100644
index 0000000000000000000000000000000000000000..68176daa4976b015fb79026f3053e74e4a7457ab
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Mexico/General differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Apia b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Apia
new file mode 100644
index 0000000000000000000000000000000000000000..e592a68e53f6215de9e1d1ac61ce062a333283c0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Apia differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Auckland b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Auckland
new file mode 100644
index 0000000000000000000000000000000000000000..6575fdce31183d8238b18f2f30ab5b9227c7071c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Auckland differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Bougainville b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Bougainville
new file mode 100644
index 0000000000000000000000000000000000000000..c535acdabda1b6ed96420e589d4f6868d23d8933
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Bougainville differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Chatham b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Chatham
new file mode 100644
index 0000000000000000000000000000000000000000..bde46cf7e4b7909714b93cee39a5d953387d756c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Chatham differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Chuuk b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Chuuk
new file mode 100644
index 0000000000000000000000000000000000000000..7be2474dd91c8a7da181fcda09d838254b890d75
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Chuuk differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Easter b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Easter
new file mode 100644
index 0000000000000000000000000000000000000000..184cb6a83b3392d0492c42297531c85e7e38c4f5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Easter differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Enderbury b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Enderbury
new file mode 100644
index 0000000000000000000000000000000000000000..b1c4b0734483033722c28c8b2884ac9dddd2ab45
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Enderbury differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Fakaofo b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Fakaofo
new file mode 100644
index 0000000000000000000000000000000000000000..4905ea72b1640ca67e35b06395e1c700dffa8d21
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Fakaofo differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Fiji b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Fiji
new file mode 100644
index 0000000000000000000000000000000000000000..acf8091ac85151a6c887e2b5049ee40754b7b0d3
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Fiji differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Funafuti b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Funafuti
new file mode 100644
index 0000000000000000000000000000000000000000..47661d40a4188eb39e8d52e5af8ab23ef7f23766
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Funafuti differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Galapagos b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Galapagos
new file mode 100644
index 0000000000000000000000000000000000000000..40051ddf63a32d79e5233b916e3911d6dc3ef759
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Galapagos differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Guadalcanal b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Guadalcanal
new file mode 100644
index 0000000000000000000000000000000000000000..1ab8353464ddb93947f871f07cfd12540373269c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Guadalcanal differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Honolulu b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Honolulu
new file mode 100644
index 0000000000000000000000000000000000000000..c7cd060159bd22fc5e6f10ac5a2089afb2c19c6a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Honolulu differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Kanton b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Kanton
new file mode 100644
index 0000000000000000000000000000000000000000..b1c4b0734483033722c28c8b2884ac9dddd2ab45
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Kanton differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Kiritimati b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Kiritimati
new file mode 100644
index 0000000000000000000000000000000000000000..b4c6037a2d2a8f89539c3df05c32b6f52b1b1e92
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Kiritimati differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Kosrae b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Kosrae
new file mode 100644
index 0000000000000000000000000000000000000000..0666fb0dd161cb732b29f84d49c49cc985a3559a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Kosrae differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Majuro b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Majuro
new file mode 100644
index 0000000000000000000000000000000000000000..47661d40a4188eb39e8d52e5af8ab23ef7f23766
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Majuro differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Marquesas b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Marquesas
new file mode 100644
index 0000000000000000000000000000000000000000..f546c03f96b24859521aab5b9997bfc5dd124ead
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Marquesas differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Midway b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Midway
new file mode 100644
index 0000000000000000000000000000000000000000..cb56709a77dedb471150f4907771bf38f1879ba4
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Midway differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Niue b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Niue
new file mode 100644
index 0000000000000000000000000000000000000000..f76972f8849a7d6ed8c3e2649b02f44de891094f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Niue differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Norfolk b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Norfolk
new file mode 100644
index 0000000000000000000000000000000000000000..3b4186d61152629b764efc4222e41647b65f7fbb
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Norfolk differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Pago_Pago b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Pago_Pago
new file mode 100644
index 0000000000000000000000000000000000000000..cb56709a77dedb471150f4907771bf38f1879ba4
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Pago_Pago differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Palau b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Palau
new file mode 100644
index 0000000000000000000000000000000000000000..1cbebe28afd90c0a2e02786655e18a157284a412
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Palau differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Pitcairn b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Pitcairn
new file mode 100644
index 0000000000000000000000000000000000000000..5ee90e70203d72484b8752475c86139671bcb102
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Pitcairn differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Ponape b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Ponape
new file mode 100644
index 0000000000000000000000000000000000000000..1ab8353464ddb93947f871f07cfd12540373269c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Ponape differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Port_Moresby b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Port_Moresby
new file mode 100644
index 0000000000000000000000000000000000000000..7be2474dd91c8a7da181fcda09d838254b890d75
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Port_Moresby differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Saipan b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Saipan
new file mode 100644
index 0000000000000000000000000000000000000000..66490d25dff9bcc8f710b0141f1a02e64aeb32f3
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Saipan differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Samoa b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Samoa
new file mode 100644
index 0000000000000000000000000000000000000000..cb56709a77dedb471150f4907771bf38f1879ba4
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Samoa differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Tarawa b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Tarawa
new file mode 100644
index 0000000000000000000000000000000000000000..47661d40a4188eb39e8d52e5af8ab23ef7f23766
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Tarawa differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Tongatapu b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Tongatapu
new file mode 100644
index 0000000000000000000000000000000000000000..c8824ab5434985650f33e12eace1981f8b116207
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Tongatapu differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Wallis b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Wallis
new file mode 100644
index 0000000000000000000000000000000000000000..47661d40a4188eb39e8d52e5af8ab23ef7f23766
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Wallis differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Yap b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Yap
new file mode 100644
index 0000000000000000000000000000000000000000..7be2474dd91c8a7da181fcda09d838254b890d75
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Yap differ