wuhp commited on
Commit
b506212
·
verified ·
1 Parent(s): 371a532

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -27
app.py CHANGED
@@ -24,7 +24,7 @@ def parse_roboflow_url(url: str):
24
  return workspace, project, version
25
 
26
 
27
- def convert_seg_to_bbox(api_key: str, dataset_url: str):
28
  rf = Roboflow(api_key=api_key)
29
  ws, proj_name, ver = parse_roboflow_url(dataset_url)
30
  version_obj = rf.workspace(ws).project(proj_name).version(ver)
@@ -56,14 +56,14 @@ def convert_seg_to_bbox(api_key: str, dataset_url: str):
56
  cat_ids = sorted(c['id'] for c in coco.get('categories', []))
57
  id_to_index = {cid: idx for idx, cid in enumerate(cat_ids)}
58
 
59
- # prepare YOLO dirs
60
  out_root = tempfile.mkdtemp(prefix="yolov8_")
61
  img_out = os.path.join(out_root, "images")
62
  lbl_out = os.path.join(out_root, "labels")
63
  os.makedirs(img_out, exist_ok=True)
64
  os.makedirs(lbl_out, exist_ok=True)
65
 
66
- # convert segbbox
67
  annos = {}
68
  for anno in coco['annotations']:
69
  img_id = anno['image_id']
@@ -72,7 +72,7 @@ def convert_seg_to_bbox(api_key: str, dataset_url: str):
72
  x_min, x_max = min(xs), max(xs)
73
  y_min, y_max = min(ys), max(ys)
74
  w, h = x_max - x_min, y_max - y_min
75
- cx, cy = x_min + w/2, y_min + h/2
76
 
77
  iw, ih = images_info[img_id]['width'], images_info[img_id]['height']
78
  line = (
@@ -81,26 +81,52 @@ def convert_seg_to_bbox(api_key: str, dataset_url: str):
81
  )
82
  annos.setdefault(img_id, []).append(line)
83
 
84
- # locate images
85
  train_img_dir = None
86
  for dp, _, files in os.walk(root):
87
- if any(f.lower().endswith(('.jpg','.png','.jpeg')) for f in files):
88
  train_img_dir = dp
89
  break
90
  if not train_img_dir:
91
  raise FileNotFoundError(f"No images under {root}")
92
 
93
- # copy images + write labels
94
  name_to_id = {img['file_name']: img['id'] for img in coco['images']}
95
  for fname, img_id in name_to_id.items():
96
  src = os.path.join(train_img_dir, fname)
97
  if not os.path.isfile(src):
98
  continue
99
  shutil.copy(src, os.path.join(img_out, fname))
100
- with open(os.path.join(lbl_out, fname.rsplit('.',1)[0] + ".txt"), 'w') as lf:
101
  lf.write("\n".join(annos.get(img_id, [])))
102
 
103
- # build before/after samples
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  before, after = [], []
105
  sample = random.sample(list(name_to_id.keys()), min(5, len(name_to_id)))
106
  for fname in sample:
@@ -111,17 +137,17 @@ def convert_seg_to_bbox(api_key: str, dataset_url: str):
111
  for anno in coco['annotations']:
112
  if anno['image_id'] != name_to_id[fname]:
113
  continue
114
- pts = np.array(anno['segmentation'][0], np.int32).reshape(-1,2)
115
- cv2.polylines(seg_vis, [pts], True, (255,0,0), 2)
116
 
117
  box_vis = img.copy()
118
  for line in annos.get(name_to_id[fname], []):
119
  _, cxn, cyn, wnorm, hnorm = map(float, line.split())
120
  iw, ih = images_info[name_to_id[fname]]['width'], images_info[name_to_id[fname]]['height']
121
- w0, h0 = int(wnorm*iw), int(hnorm*ih)
122
- x0 = int(cxn*iw - w0/2)
123
- y0 = int(cyn*ih - h0/2)
124
- cv2.rectangle(box_vis, (x0,y0), (x0+w0,y0+h0), (0,255,0), 2)
125
 
126
  before.append(Image.fromarray(seg_vis))
127
  after.append(Image.fromarray(box_vis))
@@ -140,7 +166,7 @@ def upload_and_train_detection(
140
  rf = Roboflow(api_key=api_key)
141
  ws = rf.workspace()
142
 
143
- # get-or-create detection project
144
  try:
145
  proj = ws.project(project_slug)
146
  except Exception:
@@ -151,7 +177,7 @@ def upload_and_train_detection(
151
  project_license=project_license
152
  )
153
 
154
- # upload the YOLO dataset
155
  ws.upload_dataset(
156
  dataset_path,
157
  project_slug,
@@ -159,16 +185,16 @@ def upload_and_train_detection(
159
  project_type=project_type
160
  )
161
 
162
- # generate a new version (must pass both keys—even if empty)
163
  version_num = proj.generate_version(settings={
164
  "augmentation": {},
165
  "preprocessing": {},
166
  })
167
 
168
- # train it
169
  proj.version(str(version_num)).train()
170
 
171
- # return hosted endpoint
172
  m = proj.version(str(version_num)).model
173
  return f"{m['base_url']}{m['id']}?api_key={api_key}"
174
 
@@ -177,13 +203,13 @@ def upload_and_train_detection(
177
  with gr.Blocks() as app:
178
  gr.Markdown("## 🔄 Seg→BBox + Auto‐Upload/Train")
179
 
180
- api_input = gr.Textbox(label="Roboflow API Key", type="password")
181
- url_input = gr.Textbox(label="Segmentation Dataset URL")
182
- run_btn = gr.Button("Convert to BBoxes")
183
- before_g = gr.Gallery(columns=5, label="Before")
184
- after_g = gr.Gallery(columns=5, label="After")
185
- ds_state = gr.Textbox(visible=False)
186
- slug_state = gr.Textbox(visible=False)
187
 
188
  run_btn.click(
189
  convert_seg_to_bbox,
 
24
  return workspace, project, version
25
 
26
 
27
+ def convert_seg_to_bbox(api_key: str, dataset_url: str, split_ratios=(0.8, 0.1, 0.1)):
28
  rf = Roboflow(api_key=api_key)
29
  ws, proj_name, ver = parse_roboflow_url(dataset_url)
30
  version_obj = rf.workspace(ws).project(proj_name).version(ver)
 
56
  cat_ids = sorted(c['id'] for c in coco.get('categories', []))
57
  id_to_index = {cid: idx for idx, cid in enumerate(cat_ids)}
58
 
59
+ # prepare flat YOLO dirs
60
  out_root = tempfile.mkdtemp(prefix="yolov8_")
61
  img_out = os.path.join(out_root, "images")
62
  lbl_out = os.path.join(out_root, "labels")
63
  os.makedirs(img_out, exist_ok=True)
64
  os.makedirs(lbl_out, exist_ok=True)
65
 
66
+ # convert segmentation bounding‐box labels
67
  annos = {}
68
  for anno in coco['annotations']:
69
  img_id = anno['image_id']
 
72
  x_min, x_max = min(xs), max(xs)
73
  y_min, y_max = min(ys), max(ys)
74
  w, h = x_max - x_min, y_max - y_min
75
+ cx, cy = x_min + w / 2, y_min + h / 2
76
 
77
  iw, ih = images_info[img_id]['width'], images_info[img_id]['height']
78
  line = (
 
81
  )
82
  annos.setdefault(img_id, []).append(line)
83
 
84
+ # locate raw images folder
85
  train_img_dir = None
86
  for dp, _, files in os.walk(root):
87
+ if any(f.lower().endswith(('.jpg', '.png', '.jpeg')) for f in files):
88
  train_img_dir = dp
89
  break
90
  if not train_img_dir:
91
  raise FileNotFoundError(f"No images under {root}")
92
 
93
+ # copy images + write flat labels
94
  name_to_id = {img['file_name']: img['id'] for img in coco['images']}
95
  for fname, img_id in name_to_id.items():
96
  src = os.path.join(train_img_dir, fname)
97
  if not os.path.isfile(src):
98
  continue
99
  shutil.copy(src, os.path.join(img_out, fname))
100
+ with open(os.path.join(lbl_out, fname.rsplit('.', 1)[0] + ".txt"), 'w') as lf:
101
  lf.write("\n".join(annos.get(img_id, [])))
102
 
103
+ # split into train/valid/test
104
+ all_images = sorted([f for f in os.listdir(img_out) if f.lower().endswith(('.jpg', '.png', '.jpeg'))])
105
+ random.shuffle(all_images)
106
+ n = len(all_images)
107
+ n_train = int(n * split_ratios[0])
108
+ n_valid = int(n * split_ratios[1])
109
+ splits = {
110
+ "train": all_images[:n_train],
111
+ "valid": all_images[n_train:n_train + n_valid],
112
+ "test": all_images[n_train + n_valid:]
113
+ }
114
+
115
+ for split_name, files in splits.items():
116
+ img_dir = os.path.join(out_root, split_name, "images")
117
+ lbl_dir = os.path.join(out_root, split_name, "labels")
118
+ os.makedirs(img_dir, exist_ok=True)
119
+ os.makedirs(lbl_dir, exist_ok=True)
120
+ for fname in files:
121
+ shutil.move(os.path.join(img_out, fname), os.path.join(img_dir, fname))
122
+ lbl_fname = fname.rsplit(".", 1)[0] + ".txt"
123
+ shutil.move(os.path.join(lbl_out, lbl_fname), os.path.join(lbl_dir, lbl_fname))
124
+
125
+ # remove flat dirs
126
+ shutil.rmtree(img_out)
127
+ shutil.rmtree(lbl_out)
128
+
129
+ # build before/after galleries for a few samples
130
  before, after = [], []
131
  sample = random.sample(list(name_to_id.keys()), min(5, len(name_to_id)))
132
  for fname in sample:
 
137
  for anno in coco['annotations']:
138
  if anno['image_id'] != name_to_id[fname]:
139
  continue
140
+ pts = np.array(anno['segmentation'][0], np.int32).reshape(-1, 2)
141
+ cv2.polylines(seg_vis, [pts], True, (255, 0, 0), 2)
142
 
143
  box_vis = img.copy()
144
  for line in annos.get(name_to_id[fname], []):
145
  _, cxn, cyn, wnorm, hnorm = map(float, line.split())
146
  iw, ih = images_info[name_to_id[fname]]['width'], images_info[name_to_id[fname]]['height']
147
+ w0, h0 = int(wnorm * iw), int(hnorm * ih)
148
+ x0 = int(cxn * iw - w0 / 2)
149
+ y0 = int(cyn * ih - h0 / 2)
150
+ cv2.rectangle(box_vis, (x0, y0), (x0 + w0, y0 + h0), (0, 255, 0), 2)
151
 
152
  before.append(Image.fromarray(seg_vis))
153
  after.append(Image.fromarray(box_vis))
 
166
  rf = Roboflow(api_key=api_key)
167
  ws = rf.workspace()
168
 
169
+ # get or create project
170
  try:
171
  proj = ws.project(project_slug)
172
  except Exception:
 
177
  project_license=project_license
178
  )
179
 
180
+ # upload folder with train/valid/test
181
  ws.upload_dataset(
182
  dataset_path,
183
  project_slug,
 
185
  project_type=project_type
186
  )
187
 
188
+ # create new version
189
  version_num = proj.generate_version(settings={
190
  "augmentation": {},
191
  "preprocessing": {},
192
  })
193
 
194
+ # enqueue training (now sees splits)
195
  proj.version(str(version_num)).train()
196
 
197
+ # return endpoint
198
  m = proj.version(str(version_num)).model
199
  return f"{m['base_url']}{m['id']}?api_key={api_key}"
200
 
 
203
  with gr.Blocks() as app:
204
  gr.Markdown("## 🔄 Seg→BBox + Auto‐Upload/Train")
205
 
206
+ api_input = gr.Textbox(label="Roboflow API Key", type="password")
207
+ url_input = gr.Textbox(label="Segmentation Dataset URL")
208
+ run_btn = gr.Button("Convert to BBoxes")
209
+ before_g = gr.Gallery(columns=5, label="Before")
210
+ after_g = gr.Gallery(columns=5, label="After")
211
+ ds_state = gr.Textbox(visible=False)
212
+ slug_state = gr.Textbox(visible=False)
213
 
214
  run_btn.click(
215
  convert_seg_to_bbox,