Commit
·
856a31b
1
Parent(s):
210a627
breaking down download of files
Browse files
P3.py
CHANGED
|
@@ -41,11 +41,50 @@ _HOMEPAGE = "https://github.com/bigscience-workshop/promptsource"
|
|
| 41 |
_DATA_PATH = "data"
|
| 42 |
|
| 43 |
|
| 44 |
-
def load_cached_task(cache_dir, split):
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
# Use `FixedLenSequenceFeature` for sequences with variable length.
|
| 51 |
def _feature_config(shape, dtype):
|
|
@@ -62,10 +101,10 @@ def load_cached_task(cache_dir, split):
|
|
| 62 |
feat: _feature_config(**desc) for feat, desc in features.items()
|
| 63 |
}
|
| 64 |
|
| 65 |
-
tfrecords = os.path.join(
|
| 66 |
-
|
| 67 |
-
)
|
| 68 |
-
ds = tf.data.TFRecordDataset(tf.io.gfile.glob(
|
| 69 |
ds = ds.map(
|
| 70 |
lambda pb: tf.io.parse_single_example(pb, feature_description),
|
| 71 |
num_parallel_calls=tf.data.experimental.AUTOTUNE
|
|
@@ -78,7 +117,6 @@ def load_cached_task(cache_dir, split):
|
|
| 78 |
)
|
| 79 |
return ds
|
| 80 |
|
| 81 |
-
|
| 82 |
def find_task_splits_and_features():
|
| 83 |
"""Find the available tasks under ./data and their available splits and features."""
|
| 84 |
task_and_their_splits = defaultdict(dict)
|
|
@@ -100,6 +138,7 @@ def find_task_splits_and_features():
|
|
| 100 |
with open(os.path.join(folder_path, f"info.{split_name}.json")) as f:
|
| 101 |
split_info = json.load(f)
|
| 102 |
features = split_info["features"]
|
|
|
|
| 103 |
|
| 104 |
# All splits under the same task have the same features dictionary (and thus the same features list)
|
| 105 |
if task_and_their_splits[task_name] == {}:
|
|
@@ -118,7 +157,16 @@ def find_task_splits_and_features():
|
|
| 118 |
|
| 119 |
|
| 120 |
_TASK_SPLITS_AND_FEATURES = find_task_splits_and_features()
|
| 121 |
-
_URLs = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
|
| 123 |
|
| 124 |
class P3Config(datasets.BuilderConfig):
|
|
@@ -184,13 +232,13 @@ class P3(datasets.GeneratorBasedBuilder):
|
|
| 184 |
def _split_generators(self, dl_manager):
|
| 185 |
split_generators = []
|
| 186 |
data_dir = dl_manager.download_and_extract(_URLs)
|
| 187 |
-
import pdb; pdb.set_trace()
|
| 188 |
if "train" in self.config.splits:
|
| 189 |
split_generators.append(
|
| 190 |
datasets.SplitGenerator(
|
| 191 |
name=datasets.Split.TRAIN,
|
| 192 |
gen_kwargs={
|
| 193 |
-
"
|
|
|
|
| 194 |
"split": "train",
|
| 195 |
}
|
| 196 |
)
|
|
@@ -200,7 +248,8 @@ class P3(datasets.GeneratorBasedBuilder):
|
|
| 200 |
datasets.SplitGenerator(
|
| 201 |
name=datasets.Split.VALIDATION,
|
| 202 |
gen_kwargs={
|
| 203 |
-
"
|
|
|
|
| 204 |
"split": "validation",
|
| 205 |
}
|
| 206 |
)
|
|
@@ -210,7 +259,8 @@ class P3(datasets.GeneratorBasedBuilder):
|
|
| 210 |
datasets.SplitGenerator(
|
| 211 |
name=datasets.Split.TEST,
|
| 212 |
gen_kwargs={
|
| 213 |
-
"
|
|
|
|
| 214 |
"split": "test",
|
| 215 |
}
|
| 216 |
)
|
|
@@ -222,7 +272,8 @@ class P3(datasets.GeneratorBasedBuilder):
|
|
| 222 |
datasets.SplitGenerator(
|
| 223 |
name=datasets.Split(special_split_name),
|
| 224 |
gen_kwargs={
|
| 225 |
-
"
|
|
|
|
| 226 |
"split": special_split_name,
|
| 227 |
}
|
| 228 |
)
|
|
@@ -230,7 +281,7 @@ class P3(datasets.GeneratorBasedBuilder):
|
|
| 230 |
return split_generators
|
| 231 |
|
| 232 |
|
| 233 |
-
def _generate_examples(self,
|
| 234 |
"""This function returns the examples in the raw (text) form."""
|
| 235 |
_FEAT_MAPPING_FUNCTIONS = {
|
| 236 |
"answer_choices": lambda x: [choice.decode("utf-8") for choice in x],
|
|
@@ -244,7 +295,7 @@ class P3(datasets.GeneratorBasedBuilder):
|
|
| 244 |
}
|
| 245 |
|
| 246 |
key = 0
|
| 247 |
-
ds = load_cached_task(
|
| 248 |
for ex in ds.as_numpy_iterator():
|
| 249 |
ex_dict = {}
|
| 250 |
for feat_name, feat_value in ex.items():
|
|
|
|
| 41 |
_DATA_PATH = "data"
|
| 42 |
|
| 43 |
|
| 44 |
+
# def load_cached_task(cache_dir, split):
|
| 45 |
+
# # TODO(Victor): this info.*.json is actually done twice... -> factorize
|
| 46 |
+
# with tf.io.gfile.GFile(os.path.join(cache_dir, f"info.{split}.json")) as f:
|
| 47 |
+
# split_info = json.load(f)
|
| 48 |
+
# features = split_info["features"]
|
| 49 |
+
|
| 50 |
+
# # Use `FixedLenSequenceFeature` for sequences with variable length.
|
| 51 |
+
# def _feature_config(shape, dtype):
|
| 52 |
+
# if dtype in ("int32", "bool"):
|
| 53 |
+
# # int32 and bool are stored as int64 in the tf.train.Example protobuf.
|
| 54 |
+
# dtype = "int64"
|
| 55 |
+
# if shape and shape[0] is None:
|
| 56 |
+
# return tf.io.FixedLenSequenceFeature(
|
| 57 |
+
# shape[1:], dtype, allow_missing=True
|
| 58 |
+
# )
|
| 59 |
+
# return tf.io.FixedLenFeature(shape, dtype)
|
| 60 |
+
|
| 61 |
+
# feature_description = {
|
| 62 |
+
# feat: _feature_config(**desc) for feat, desc in features.items()
|
| 63 |
+
# }
|
| 64 |
+
|
| 65 |
+
# tfrecords = os.path.join(
|
| 66 |
+
# cache_dir, f"{split}.tfrecord-*-of-*{split_info['num_shards']}"
|
| 67 |
+
# )
|
| 68 |
+
# ds = tf.data.TFRecordDataset(tf.io.gfile.glob(tfrecords))
|
| 69 |
+
# ds = ds.map(
|
| 70 |
+
# lambda pb: tf.io.parse_single_example(pb, feature_description),
|
| 71 |
+
# num_parallel_calls=tf.data.experimental.AUTOTUNE
|
| 72 |
+
# )
|
| 73 |
+
# # Cast features back to the types from the info JSON since some features
|
| 74 |
+
# # must be cast for storage (e.g., in32 is stored as int64).
|
| 75 |
+
# ds = ds.map(
|
| 76 |
+
# lambda x: {k: tf.cast(v, features[k]["dtype"]) for k, v in x.items()},
|
| 77 |
+
# num_parallel_calls=tf.data.experimental.AUTOTUNE
|
| 78 |
+
# )
|
| 79 |
+
# return ds
|
| 80 |
+
|
| 81 |
+
def load_cached_task(features_file, tfrecord, split):
|
| 82 |
+
# # TODO(Victor): this info.*.json is actually done twice... -> factorize
|
| 83 |
+
# with tf.io.gfile.GFile(os.path.join(cache_dir, f"info.{split}.json")) as f:
|
| 84 |
+
# split_info = json.load(f)
|
| 85 |
+
# features = split_info["features"]
|
| 86 |
+
with tf.io.gfile.GFile(features_file) as f:
|
| 87 |
+
features = json.load(f)
|
| 88 |
|
| 89 |
# Use `FixedLenSequenceFeature` for sequences with variable length.
|
| 90 |
def _feature_config(shape, dtype):
|
|
|
|
| 101 |
feat: _feature_config(**desc) for feat, desc in features.items()
|
| 102 |
}
|
| 103 |
|
| 104 |
+
# tfrecords = os.path.join(
|
| 105 |
+
# cache_dir, f"{split}.tfrecord-*-of-*{split_info['num_shards']}"
|
| 106 |
+
# )
|
| 107 |
+
ds = tf.data.TFRecordDataset(tf.io.gfile.glob([tfrecord]))
|
| 108 |
ds = ds.map(
|
| 109 |
lambda pb: tf.io.parse_single_example(pb, feature_description),
|
| 110 |
num_parallel_calls=tf.data.experimental.AUTOTUNE
|
|
|
|
| 117 |
)
|
| 118 |
return ds
|
| 119 |
|
|
|
|
| 120 |
def find_task_splits_and_features():
|
| 121 |
"""Find the available tasks under ./data and their available splits and features."""
|
| 122 |
task_and_their_splits = defaultdict(dict)
|
|
|
|
| 138 |
with open(os.path.join(folder_path, f"info.{split_name}.json")) as f:
|
| 139 |
split_info = json.load(f)
|
| 140 |
features = split_info["features"]
|
| 141 |
+
assert split_info["num_shards"] == 1
|
| 142 |
|
| 143 |
# All splits under the same task have the same features dictionary (and thus the same features list)
|
| 144 |
if task_and_their_splits[task_name] == {}:
|
|
|
|
| 157 |
|
| 158 |
|
| 159 |
_TASK_SPLITS_AND_FEATURES = find_task_splits_and_features()
|
| 160 |
+
_URLs = {
|
| 161 |
+
task_name: {
|
| 162 |
+
split_name: {
|
| 163 |
+
"tfrecord": f"{_DATA_PATH}/{task_name}/{split_name}.tfrecord-00000-of-00001",
|
| 164 |
+
"features_file": f"{_DATA_PATH}/{task_name}/info.{split_name}.json",
|
| 165 |
+
}
|
| 166 |
+
for split_name in splits_and_features["splits"]
|
| 167 |
+
}
|
| 168 |
+
for task_name, splits_and_features in _TASK_SPLITS_AND_FEATURES.items()
|
| 169 |
+
}
|
| 170 |
|
| 171 |
|
| 172 |
class P3Config(datasets.BuilderConfig):
|
|
|
|
| 232 |
def _split_generators(self, dl_manager):
|
| 233 |
split_generators = []
|
| 234 |
data_dir = dl_manager.download_and_extract(_URLs)
|
|
|
|
| 235 |
if "train" in self.config.splits:
|
| 236 |
split_generators.append(
|
| 237 |
datasets.SplitGenerator(
|
| 238 |
name=datasets.Split.TRAIN,
|
| 239 |
gen_kwargs={
|
| 240 |
+
"features_file": data_dir["features_file"],
|
| 241 |
+
"tfrecord": data_dir["tfrecord"],
|
| 242 |
"split": "train",
|
| 243 |
}
|
| 244 |
)
|
|
|
|
| 248 |
datasets.SplitGenerator(
|
| 249 |
name=datasets.Split.VALIDATION,
|
| 250 |
gen_kwargs={
|
| 251 |
+
"features_file": data_dir["features_file"],
|
| 252 |
+
"tfrecord": data_dir["tfrecord"],
|
| 253 |
"split": "validation",
|
| 254 |
}
|
| 255 |
)
|
|
|
|
| 259 |
datasets.SplitGenerator(
|
| 260 |
name=datasets.Split.TEST,
|
| 261 |
gen_kwargs={
|
| 262 |
+
"features_file": data_dir["features_file"],
|
| 263 |
+
"tfrecord": data_dir["tfrecord"],
|
| 264 |
"split": "test",
|
| 265 |
}
|
| 266 |
)
|
|
|
|
| 272 |
datasets.SplitGenerator(
|
| 273 |
name=datasets.Split(special_split_name),
|
| 274 |
gen_kwargs={
|
| 275 |
+
"features_file": data_dir["features_file"],
|
| 276 |
+
"tfrecord": data_dir["tfrecord"],
|
| 277 |
"split": special_split_name,
|
| 278 |
}
|
| 279 |
)
|
|
|
|
| 281 |
return split_generators
|
| 282 |
|
| 283 |
|
| 284 |
+
def _generate_examples(self, features_file, tfrecord, split):
|
| 285 |
"""This function returns the examples in the raw (text) form."""
|
| 286 |
_FEAT_MAPPING_FUNCTIONS = {
|
| 287 |
"answer_choices": lambda x: [choice.decode("utf-8") for choice in x],
|
|
|
|
| 295 |
}
|
| 296 |
|
| 297 |
key = 0
|
| 298 |
+
ds = load_cached_task(features_file, tfrecord, split)
|
| 299 |
for ex in ds.as_numpy_iterator():
|
| 300 |
ex_dict = {}
|
| 301 |
for feat_name, feat_value in ex.items():
|