File size: 7,597 Bytes
5a009ef
 
 
 
 
 
fed1122
b0a80e9
 
34fbdcc
 
09039e6
9d195cb
b0a80e9
04a1354
9d195cb
 
 
b0a80e9
de764b8
6052bc2
 
de764b8
 
09039e6
5a009ef
09039e6
b0a80e9
d1d93e5
b0a80e9
d1d93e5
b0a80e9
d1d93e5
b0a80e9
d1d93e5
09039e6
 
5a009ef
 
 
 
 
baa822b
 
5a009ef
 
 
 
b0a80e9
5a009ef
b0a80e9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a009ef
b0a80e9
 
5a009ef
 
e3e0292
b25e96b
 
 
d1d93e5
85f592d
a4f75a2
d1d93e5
85f592d
4e8d166
 
 
 
 
 
41da041
4dc4127
4e8d166
4dc4127
 
 
 
 
 
 
b25e96b
 
e3e0292
 
 
b0a80e9
 
de764b8
b25e96b
 
 
5a009ef
 
 
 
b0a80e9
de764b8
c2447c9
 
 
be55668
c2447c9
b0a80e9
de764b8
5a009ef
 
 
 
b0a80e9
abd471b
b0a80e9
13f81de
 
b0a80e9
d638aaf
b0a80e9
13f81de
b0a80e9
13f81de
 
 
baa822b
 
13f81de
 
fed1122
 
 
 
e3e0292
fed1122
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import io
from PIL import Image
from datasets import GeneratorBasedBuilder, DatasetInfo, Features, SplitGenerator, Value, Array2D, Split
import datasets
import numpy as np
import h5py
from huggingface_hub import HfFileSystem
import time
import os
import hashlib
from pathlib import Path
class CustomConfig(datasets.BuilderConfig):
    def __init__(self, extract, remove_tar, chunk, **kwargs):
        super().__init__(**kwargs)
        self.dataset_type = kwargs.pop("name", "all")
        self.extract = kwargs.pop("extract", extract)
        self.remove_tar = kwargs.pop("remove_tar", remove_tar)
        self.chunk = kwargs.pop("chunk", chunk)

_metadata_urls = {
    "train":"https://huggingface.co/datasets/XingjianLi/tomatotest/resolve/main/train.txt",
    "val":"https://huggingface.co/datasets/XingjianLi/tomatotest/resolve/main/val.txt"

}

class RGBSemanticDepthDataset(GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        CustomConfig(name="full", version="1.0.0", description="download and extract the dataset to h5 pairs (all tar files automatically removed by default, 160GB)",
                     extract=True, remove_tar=True, chunk=1),
        CustomConfig(name="sample", version="1.0.0", description="load both segmentation and depth (for 1 tar file, 870MB)",
                     extract=False, remove_tar=False, chunk=1),
        CustomConfig(name="depth", version="1.0.0", description="only load depth (sample)",
                     extract=False, remove_tar=False, chunk=1),
        CustomConfig(name="seg", version="1.0.0", description="only load segmentation (sample)",
                     extract=False, remove_tar=False, chunk=1),
    ]    # Configs initialization
    BUILDER_CONFIG_CLASS = CustomConfig
    def _info(self):
        return DatasetInfo(
            features=Features({
                "left_rgb": datasets.Image(),
                "right_rgb": datasets.Image(),
                "left_semantic": datasets.Image(),
                "left_instance": datasets.Image(),
                "left_depth": datasets.Image(),
                "right_depth": datasets.Image(),
            })
        )
    def _h5_loader(self, fileobj, type_dataset):
        # Reference: https://github.com/dwofk/fast-depth/blob/master/dataloaders/dataloader.py#L8-L13
        file_bytes = fileobj.read()
        file_bytes = io.BytesIO(file_bytes)
        with h5py.File(file_bytes, "r") as h5f:
            left_rgb = self._read_jpg(h5f['rgb_left'][:])
            if type_dataset == 'depth':
                right_rgb = self._read_jpg(h5f['rgb_right'][:])
                left_depth = h5f['depth_left'][:].astype(np.float32)
                right_depth = h5f['depth_right'][:].astype(np.float32)
                return left_rgb, right_rgb, np.zeros((1,1)), np.zeros((1,1)), left_depth, right_depth
            elif type_dataset == 'seg':
                seg_left = h5f['seg_left'][:]
                left_semantic = seg_left[:,:,2]
                left_instance = seg_left[:,:,0] + seg_left[:,:,1] * 256
                return left_rgb, np.zeros((1,1)), left_semantic, left_instance, np.zeros((1,1)), np.zeros((1,1))
            else:
                right_rgb = self._read_jpg(h5f['rgb_right'][:])
                seg_left = h5f['seg_left'][:]
                left_semantic = seg_left[:,:,2]
                left_instance = seg_left[:,:,0] + seg_left[:,:,1] * 256
                left_depth = h5f['depth_left'][:].astype(np.float32)
                right_depth = h5f['depth_right'][:].astype(np.float32)
                return left_rgb, right_rgb, left_semantic, left_instance, left_depth, right_depth
    def _read_jpg(self, bytes_stream):
        img = Image.open(io.BytesIO(bytes_stream))
        return img
    
    def _split_generators(self, dl_manager):
        if 'full' == self.config.dataset_type:
            
            dataset_names = self._get_dataset_filenames()
            if self.config.extract:
                dataset_chunk_list = [dataset_names[i:i+self.config.chunk] for i in range(0, len(dataset_names), self.config.chunk)]
                
                for dataset_chunk in dataset_chunk_list:
                    print(self.config.chunk)
                    print(dataset_chunk)
                    archives = dl_manager.download({"train":dataset_chunk,
                                                    "val":dataset_chunk})
                    for archive, tar_file_name in zip(archives["train"], dataset_chunk):
                        extracted_dir = dl_manager.extract(archive) 
                        print(f"\tExtracted {archive} to {extracted_dir}")
                        if self.config.remove_tar and os.path.exists(archive):
                            os.remove(archive)
                            
                            print(f"\tDeleted tar file {archive}")
                    blob_folder = '/'.join(archive.replace("snapshots","blobs").split('/')[:-3])

                    if self.config.remove_tar and os.path.exists(blob_folder):
                        for filename in os.listdir(blob_folder):
                            filepath = os.path.join(blob_folder, filename)
                            os.remove(filepath)
                        print(f"\tDeleted tar file {blob_folder}")
                print("All extracted. exiting")
                exit()
            archives = dl_manager.download({"train":self._get_dataset_filenames(),
                                            "val":self._get_dataset_filenames()})
        else:
            archives = dl_manager.download({"train":self._get_dataset_filenames()[0:2],
                                            "val":self._get_dataset_filenames()[0:2]})
        split_metadata = dl_manager.download(_metadata_urls)

        train_archives = [dl_manager.iter_archive(archive) for archive in archives["train"]]
        val_archives = [dl_manager.iter_archive(archive) for archive in archives["val"]]
        return [
            SplitGenerator(
                name=Split.TRAIN,
                gen_kwargs={
                    "archives": train_archives,
                    "split_txt": split_metadata["train"]
                },
            ),
            SplitGenerator(
                name=Split.VALIDATION,
                gen_kwargs={
                    "archives": val_archives,
                    "split_txt": split_metadata["val"]
                },
            ),
        ]

    def _generate_examples(self, archives, split_txt):      
        with open(split_txt, encoding="utf-8") as split_f:
            all_splits = set(split_f.read().split('\n'))
        for archive in archives:
            for path, file in archive:
                archive_start_time = time.time()
                if path.split('/')[-1][:-3] not in all_splits:
                    # skip the image pairs not in train.txt or val.txt
                    continue
                left_rgb, right_rgb, left_semantic, left_instance, left_depth, right_depth = self._h5_loader(file, self.config.dataset_type)
                yield path, {
                    "left_rgb": left_rgb,
                    "right_rgb": right_rgb,
                    "left_semantic": left_semantic,
                    "left_instance": left_instance,
                    "left_depth": left_depth,
                    "right_depth": right_depth,
                }
    def _get_dataset_filenames(self):
        fs = HfFileSystem()
        all_files = fs.ls("datasets/xingjianli/tomatotest/data")
        filenames = sorted(['/'.join(f['name'].split('/')[-2:]) for f in all_files])
        return filenames