Datasets:
Tasks:
Image Segmentation
Modalities:
Image
Formats:
parquet
Sub-tasks:
semantic-segmentation
Languages:
English
Size:
10K - 100K
License:
File size: 4,146 Bytes
28f8c87 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from typing import List
from cleanvision import Imagelab
from PIL import Image
from datasets import Dataset, Features, Image as ImageFeature
def images_with_deduplication(data_path):
imagelab = Imagelab(data_path=data_path)
# Automatically check for a predefined list of issues within your dataset
imagelab.find_issues({"near_duplicates": {}, "exact_duplicates": {}})
# load names of all images in the dataset
image_paths = list(Path(data_path).rglob("*.png"))
image_paths = [str(path.resolve()) for path in image_paths]
print(f"Number of images before deduplication: {len(image_paths)}")
duplicate_sets = imagelab.info["near_duplicates"]["sets"]
num_duplicates = sum([len(duplicate_set) - 1 for duplicate_set in duplicate_sets])
for duplicate_set in duplicate_sets:
for i in range(len(duplicate_set)):
if i > 0:
image_name = duplicate_set[i]
del image_paths[image_paths.index(image_name)]
print(f"Number of images after deduplication: {len(image_paths)}")
print(f"Number of images removed: {num_duplicates}")
return image_paths
def find_closest_pair(ref_timestamp, search_dir, threshold_ms=100):
search_files = list(search_dir.glob("*"))
search_timestamps = [int(f.stem.split("_")[0]) for f in search_files]
diffs = np.abs(np.array(search_timestamps) - ref_timestamp) / 1e6
min_idx = np.argmin(diffs)
if diffs[min_idx] <= threshold_ms:
return str(search_files[min_idx])
return None
def find_image_groups(
base_dir, ref_subdir, search_subdirs: List[str], threshold_ms=100
):
base_path = Path(base_dir)
ref_dir = base_path / ref_subdir
search_dirs = [base_path / subdir for subdir in search_subdirs]
# deduplicate images from the reference directory
ref_dir_files = images_with_deduplication(ref_dir)
pairs = []
for ref_file in ref_dir_files:
ref_ts = int(ref_file.split("/")[-1].split("_")[0])
image_group = (ref_file,)
for search_dir in search_dirs:
assert search_dir.exists(), f"{search_dir} does not exist"
match = find_closest_pair(ref_ts, search_dir, threshold_ms)
if match:
image_group += (match,)
else:
image_group += (None,)
pairs.append(image_group)
return pairs
def visualize_images(image_tuple):
n = len(image_tuple)
fig, axes = plt.subplots(1, n, figsize=(6 * n, 4))
if n == 1:
axes = [axes]
for ax, img_path in zip(axes, image_tuple):
if img_path is None:
ax.axis("off")
continue
img = Image.open(img_path)
if "DEPTH" in str(img_path):
ax.imshow(img, cmap="viridis")
elif "THERMAL" in str(img_path):
ax.imshow(img, cmap="hot")
else:
img = Image.open(img_path)
ax.imshow(img)
ax.set_title(img_path.split("/")[-2])
plt.show()
# prepare the dataset for upload to huggingface
def create_image_dataset(image_tuples):
"""
Create a HuggingFace dataset from a list of image tuples.
Args:
image_tuples: List of tuples, each containing (color, depth, depth_16bit, thermal, thermal_rgb) image paths
"""
features = Features(
{
"color": ImageFeature(decode=True),
"depth": ImageFeature(decode=True),
"depth_16bit": ImageFeature(decode=True),
"thermal": ImageFeature(decode=True),
"thermal_rgb": ImageFeature(decode=True),
}
)
# Unzip the tuples into separate lists
color_imgs, depth_imgs, depth_16bit_imgs, thermal_imgs, thermal_rgb_imgs = zip(
*image_tuples
)
dataset_dict = {
"color": list(color_imgs),
"depth": list(depth_imgs),
"depth_16bit": list(depth_16bit_imgs),
"thermal": list(thermal_imgs),
"thermal_rgb": list(thermal_rgb_imgs),
}
dataset = Dataset.from_dict(dataset_dict, features=features)
return dataset
|