Spaces:
Sleeping
Sleeping
File size: 5,471 Bytes
bc6cd18 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import os
import json
import math
import random
import shutil
import tempfile
from urllib.parse import urlparse
from PIL import Image
import cv2
import numpy as np
import gradio as gr
from roboflow import Roboflow
def parse_roboflow_url(url):
"""Extract workspace, project name, and version from a Roboflow URL."""
parsed = urlparse(url)
parts = parsed.path.strip('/').split('/')
# Expect at least [workspace, project, ..., version]
workspace = parts[0]
project = parts[1]
try:
version = int(parts[-1])
except ValueError:
version = int(parts[-2])
return workspace, project, version
def convert_seg_to_bbox(api_key, dataset_url):
# Initialize Roboflow client
rf = Roboflow(api_key=api_key)
workspace, project_name, version = parse_roboflow_url(dataset_url)
project = rf.workspace(workspace).project(project_name)
version_obj = project.version(version)
# Download the segmentation dataset in COCO format
dataset = version_obj.download("coco-segmentation")
root = dataset.location # root of downloaded dataset
# Load COCO train annotations
ann_dir = os.path.join(root, "coco-annotations")
ann_file = os.path.join(ann_dir, "train.json")
with open(ann_file, 'r') as f:
coco = json.load(f)
images_info = {img['id']: img for img in coco['images']}
# Map original category IDs to contiguous YOLO class indices
categories = coco.get('categories', [])
cat_ids = sorted(cat['id'] for cat in categories)
id_to_index = {cid: idx for idx, cid in enumerate(cat_ids)}
# Prepare output directories for YOLOv8 dataset
out_root = tempfile.mkdtemp(prefix="yolov8_")
img_out = os.path.join(out_root, "images")
lbl_out = os.path.join(out_root, "labels")
os.makedirs(img_out, exist_ok=True)
os.makedirs(lbl_out, exist_ok=True)
# Build YOLO annotation strings grouped by image
annos = {}
for anno in coco['annotations']:
img_id = anno['image_id']
poly = anno['segmentation'][0]
xs = poly[0::2]
ys = poly[1::2]
x_min, x_max = min(xs), max(xs)
y_min, y_max = min(ys), max(ys)
width = x_max - x_min
height = y_max - y_min
cx = x_min + width / 2
cy = y_min + height / 2
info = images_info[img_id]
img_w, img_h = info['width'], info['height']
cxn = cx / img_w
cyn = cy / img_h
wnorm = width / img_w
hnorm = height / img_h
cls_idx = id_to_index[anno['category_id']]
line = f"{cls_idx} {cxn:.6f} {cyn:.6f} {wnorm:.6f} {hnorm:.6f}"
annos.setdefault(img_id, []).append(line)
# Determine train images directory
train_img_dir = os.path.join(root, "train", "images")
if not os.path.isdir(train_img_dir):
train_img_dir = os.path.join(root, "train")
# Map filenames to image IDs
name_to_id = {img['file_name']: img['id'] for img in coco['images']}
# Copy images and write YOLO label files
for fname, img_id in name_to_id.items():
src_img = os.path.join(train_img_dir, fname)
if not os.path.isfile(src_img):
continue
dst_img = os.path.join(img_out, fname)
shutil.copy(src_img, dst_img)
lbl_path = os.path.join(lbl_out, os.path.splitext(fname)[0] + ".txt")
with open(lbl_path, 'w') as lf:
for line in annos.get(img_id, []):
lf.write(line + '\n')
# Prepare before/after example galleries
before_imgs, after_imgs = [], []
example_files = random.sample(list(name_to_id.keys()), min(5, len(name_to_id)))
for fname in example_files:
src_img = os.path.join(train_img_dir, fname)
img = cv2.cvtColor(cv2.imread(src_img), cv2.COLOR_BGR2RGB)
# Overlay segmentation polygons
seg_vis = img.copy()
img_id = name_to_id[fname]
for anno in coco['annotations']:
if anno['image_id'] != img_id:
continue
poly = anno['segmentation'][0]
pts = np.array(poly, dtype=np.int32).reshape(-1, 2)
cv2.polylines(seg_vis, [pts], True, (255, 0, 0), 2)
# Overlay bounding boxes
box_vis = img.copy()
for line in annos.get(img_id, []):
_, cxn, cyn, wnorm, hnorm = line.split()
cxn, cyn, wnorm, hnorm = map(float, (cxn, cyn, wnorm, hnorm))
iw, ih = images_info[img_id]['width'], images_info[img_id]['height']
w0 = int(wnorm * iw)
h0 = int(hnorm * ih)
x0 = int(cxn * iw - w0/2)
y0 = int(cyn * ih - h0/2)
cv2.rectangle(box_vis, (x0, y0), (x0 + w0, y0 + h0), (0, 255, 0), 2)
before_imgs.append(Image.fromarray(seg_vis))
after_imgs.append(Image.fromarray(box_vis))
return before_imgs, after_imgs
# Build Gradio interface
with gr.Blocks() as app:
gr.Markdown("# Segmentation → YOLOv8 Converter")
api_input = gr.Textbox(label="Roboflow API Key", type="password")
url_input = gr.Textbox(label="Roboflow Dataset URL (Segmentation)")
run_btn = gr.Button("Convert")
before_gallery = gr.Gallery(label="Before (Segmentation)").style(grid=[5], height="auto")
after_gallery = gr.Gallery(label="After (Bounding Boxes)").style(grid=[5], height="auto")
run_btn.click(convert_seg_to_bbox, inputs=[api_input, url_input], outputs=[before_gallery, after_gallery])
if __name__ == "__main__":
app.launch()
|