tomatotest / tomatotest.py
XingjianLi's picture
Update tomatotest.py
4dc4127 verified
import io
from PIL import Image
from datasets import GeneratorBasedBuilder, DatasetInfo, Features, SplitGenerator, Value, Array2D, Split
import datasets
import numpy as np
import h5py
from huggingface_hub import HfFileSystem
import time
import os
import hashlib
from pathlib import Path
class CustomConfig(datasets.BuilderConfig):
def __init__(self, extract, remove_tar, chunk, **kwargs):
super().__init__(**kwargs)
self.dataset_type = kwargs.pop("name", "all")
self.extract = kwargs.pop("extract", extract)
self.remove_tar = kwargs.pop("remove_tar", remove_tar)
self.chunk = kwargs.pop("chunk", chunk)
_metadata_urls = {
"train":"https://huggingface.co/datasets/XingjianLi/tomatotest/resolve/main/train.txt",
"val":"https://huggingface.co/datasets/XingjianLi/tomatotest/resolve/main/val.txt"
}
class RGBSemanticDepthDataset(GeneratorBasedBuilder):
BUILDER_CONFIGS = [
CustomConfig(name="full", version="1.0.0", description="download and extract the dataset to h5 pairs (all tar files automatically removed by default, 160GB)",
extract=True, remove_tar=True, chunk=1),
CustomConfig(name="sample", version="1.0.0", description="load both segmentation and depth (for 1 tar file, 870MB)",
extract=False, remove_tar=False, chunk=1),
CustomConfig(name="depth", version="1.0.0", description="only load depth (sample)",
extract=False, remove_tar=False, chunk=1),
CustomConfig(name="seg", version="1.0.0", description="only load segmentation (sample)",
extract=False, remove_tar=False, chunk=1),
] # Configs initialization
BUILDER_CONFIG_CLASS = CustomConfig
def _info(self):
return DatasetInfo(
features=Features({
"left_rgb": datasets.Image(),
"right_rgb": datasets.Image(),
"left_semantic": datasets.Image(),
"left_instance": datasets.Image(),
"left_depth": datasets.Image(),
"right_depth": datasets.Image(),
})
)
def _h5_loader(self, fileobj, type_dataset):
# Reference: https://github.com/dwofk/fast-depth/blob/master/dataloaders/dataloader.py#L8-L13
file_bytes = fileobj.read()
file_bytes = io.BytesIO(file_bytes)
with h5py.File(file_bytes, "r") as h5f:
left_rgb = self._read_jpg(h5f['rgb_left'][:])
if type_dataset == 'depth':
right_rgb = self._read_jpg(h5f['rgb_right'][:])
left_depth = h5f['depth_left'][:].astype(np.float32)
right_depth = h5f['depth_right'][:].astype(np.float32)
return left_rgb, right_rgb, np.zeros((1,1)), np.zeros((1,1)), left_depth, right_depth
elif type_dataset == 'seg':
seg_left = h5f['seg_left'][:]
left_semantic = seg_left[:,:,2]
left_instance = seg_left[:,:,0] + seg_left[:,:,1] * 256
return left_rgb, np.zeros((1,1)), left_semantic, left_instance, np.zeros((1,1)), np.zeros((1,1))
else:
right_rgb = self._read_jpg(h5f['rgb_right'][:])
seg_left = h5f['seg_left'][:]
left_semantic = seg_left[:,:,2]
left_instance = seg_left[:,:,0] + seg_left[:,:,1] * 256
left_depth = h5f['depth_left'][:].astype(np.float32)
right_depth = h5f['depth_right'][:].astype(np.float32)
return left_rgb, right_rgb, left_semantic, left_instance, left_depth, right_depth
def _read_jpg(self, bytes_stream):
img = Image.open(io.BytesIO(bytes_stream))
return img
def _split_generators(self, dl_manager):
if 'full' == self.config.dataset_type:
dataset_names = self._get_dataset_filenames()
if self.config.extract:
dataset_chunk_list = [dataset_names[i:i+self.config.chunk] for i in range(0, len(dataset_names), self.config.chunk)]
for dataset_chunk in dataset_chunk_list:
print(self.config.chunk)
print(dataset_chunk)
archives = dl_manager.download({"train":dataset_chunk,
"val":dataset_chunk})
for archive, tar_file_name in zip(archives["train"], dataset_chunk):
extracted_dir = dl_manager.extract(archive)
print(f"\tExtracted {archive} to {extracted_dir}")
if self.config.remove_tar and os.path.exists(archive):
os.remove(archive)
print(f"\tDeleted tar file {archive}")
blob_folder = '/'.join(archive.replace("snapshots","blobs").split('/')[:-3])
if self.config.remove_tar and os.path.exists(blob_folder):
for filename in os.listdir(blob_folder):
filepath = os.path.join(blob_folder, filename)
os.remove(filepath)
print(f"\tDeleted tar file {blob_folder}")
print("All extracted. exiting")
exit()
archives = dl_manager.download({"train":self._get_dataset_filenames(),
"val":self._get_dataset_filenames()})
else:
archives = dl_manager.download({"train":self._get_dataset_filenames()[0:2],
"val":self._get_dataset_filenames()[0:2]})
split_metadata = dl_manager.download(_metadata_urls)
train_archives = [dl_manager.iter_archive(archive) for archive in archives["train"]]
val_archives = [dl_manager.iter_archive(archive) for archive in archives["val"]]
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"archives": train_archives,
"split_txt": split_metadata["train"]
},
),
SplitGenerator(
name=Split.VALIDATION,
gen_kwargs={
"archives": val_archives,
"split_txt": split_metadata["val"]
},
),
]
def _generate_examples(self, archives, split_txt):
with open(split_txt, encoding="utf-8") as split_f:
all_splits = set(split_f.read().split('\n'))
for archive in archives:
for path, file in archive:
archive_start_time = time.time()
if path.split('/')[-1][:-3] not in all_splits:
# skip the image pairs not in train.txt or val.txt
continue
left_rgb, right_rgb, left_semantic, left_instance, left_depth, right_depth = self._h5_loader(file, self.config.dataset_type)
yield path, {
"left_rgb": left_rgb,
"right_rgb": right_rgb,
"left_semantic": left_semantic,
"left_instance": left_instance,
"left_depth": left_depth,
"right_depth": right_depth,
}
def _get_dataset_filenames(self):
fs = HfFileSystem()
all_files = fs.ls("datasets/xingjianli/tomatotest/data")
filenames = sorted(['/'.join(f['name'].split('/')[-2:]) for f in all_files])
return filenames