wuhp commited on
Commit
d77f034
·
verified ·
1 Parent(s): 2d88615

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -63
app.py CHANGED
@@ -13,7 +13,6 @@ from roboflow import Roboflow
13
 
14
 
15
  def parse_roboflow_url(url: str):
16
- """Extract workspace, project name, and version from a Roboflow URL."""
17
  parsed = urlparse(url)
18
  parts = parsed.path.strip('/').split('/')
19
  workspace = parts[0]
@@ -26,19 +25,13 @@ def parse_roboflow_url(url: str):
26
 
27
 
28
  def convert_seg_to_bbox(api_key: str, dataset_url: str):
29
- """
30
- 1) Download a segmentation dataset
31
- 2) Convert all masks → YOLO‐style bboxes
32
- 3) Write out a temp YOLO dataset and return its path
33
- 4) Return before/after galleries + the dataset path + an auto slug
34
- """
35
  rf = Roboflow(api_key=api_key)
36
  ws, proj_name, ver = parse_roboflow_url(dataset_url)
37
  version_obj = rf.workspace(ws).project(proj_name).version(ver)
38
  dataset = version_obj.download("coco-segmentation")
39
  root = dataset.location
40
 
41
- # Find the annotation JSON
42
  ann_file = None
43
  for dp, _, files in os.walk(root):
44
  for f in files:
@@ -58,20 +51,19 @@ def convert_seg_to_bbox(api_key: str, dataset_url: str):
58
  if not ann_file:
59
  raise FileNotFoundError(f"No JSON annotations under {root}")
60
 
61
- with open(ann_file, 'r') as f:
62
- coco = json.load(f)
63
  images_info = {img['id']: img for img in coco['images']}
64
  cat_ids = sorted(c['id'] for c in coco.get('categories', []))
65
  id_to_index = {cid: idx for idx, cid in enumerate(cat_ids)}
66
 
67
- # Prepare YOLO folders
68
  out_root = tempfile.mkdtemp(prefix="yolov8_")
69
  img_out = os.path.join(out_root, "images")
70
  lbl_out = os.path.join(out_root, "labels")
71
  os.makedirs(img_out, exist_ok=True)
72
  os.makedirs(lbl_out, exist_ok=True)
73
 
74
- # Convert seg→bbox
75
  annos = {}
76
  for anno in coco['annotations']:
77
  img_id = anno['image_id']
@@ -89,54 +81,51 @@ def convert_seg_to_bbox(api_key: str, dataset_url: str):
89
  )
90
  annos.setdefault(img_id, []).append(line)
91
 
92
- # Find the image folder
93
  train_img_dir = None
94
  for dp, _, files in os.walk(root):
95
- if any(f.lower().endswith(('.jpg', '.png', '.jpeg')) for f in files):
96
  train_img_dir = dp
97
  break
98
  if not train_img_dir:
99
  raise FileNotFoundError(f"No images under {root}")
100
 
101
- # Copy images + write labels
102
  name_to_id = {img['file_name']: img['id'] for img in coco['images']}
103
  for fname, img_id in name_to_id.items():
104
  src = os.path.join(train_img_dir, fname)
105
  if not os.path.isfile(src):
106
  continue
107
  shutil.copy(src, os.path.join(img_out, fname))
108
- with open(os.path.join(lbl_out, fname.rsplit('.', 1)[0] + ".txt"), 'w') as lf:
109
  lf.write("\n".join(annos.get(img_id, [])))
110
 
111
- # Build before/after sample galleries
112
  before, after = [], []
113
  sample = random.sample(list(name_to_id.keys()), min(5, len(name_to_id)))
114
  for fname in sample:
115
  src = os.path.join(train_img_dir, fname)
116
  img = cv2.cvtColor(cv2.imread(src), cv2.COLOR_BGR2RGB)
117
 
118
- # segmentation overlay
119
  seg_vis = img.copy()
120
  for anno in coco['annotations']:
121
  if anno['image_id'] != name_to_id[fname]:
122
  continue
123
- pts = np.array(anno['segmentation'][0], np.int32).reshape(-1, 2)
124
- cv2.polylines(seg_vis, [pts], True, (255, 0, 0), 2)
125
 
126
- # bbox overlay
127
  box_vis = img.copy()
128
  for line in annos.get(name_to_id[fname], []):
129
  _, cxn, cyn, wnorm, hnorm = map(float, line.split())
130
  iw, ih = images_info[name_to_id[fname]]['width'], images_info[name_to_id[fname]]['height']
131
- w0, h0 = int(wnorm * iw), int(hnorm * ih)
132
- x0 = int(cxn * iw - w0/2)
133
- y0 = int(cyn * ih - h0/2)
134
- cv2.rectangle(box_vis, (x0, y0), (x0 + w0, y0 + h0), (0, 255, 0), 2)
135
 
136
  before.append(Image.fromarray(seg_vis))
137
  after.append(Image.fromarray(box_vis))
138
 
139
- # auto-generated detection project slug
140
  project_slug = f"{proj_name}-detection"
141
  return before, after, out_root, project_slug
142
 
@@ -148,20 +137,13 @@ def upload_and_train_detection(
148
  project_license: str = "MIT",
149
  project_type: str = "object-detection"
150
  ):
151
- """
152
- 1) (re)create a Detection project
153
- 2) upload the YOLO dataset
154
- 3) generate & train a new version
155
- 4) return the hosted endpoint URL
156
- """
157
  rf = Roboflow(api_key=api_key)
158
  ws = rf.workspace()
159
 
160
- # 1) get-or-create project (need annotation arg)
161
  try:
162
  proj = ws.project(project_slug)
163
  except Exception:
164
- # annotation must be provided as the 2nd positional arg
165
  proj = ws.create_project(
166
  project_slug,
167
  annotation=project_type,
@@ -169,54 +151,48 @@ def upload_and_train_detection(
169
  project_license=project_license
170
  )
171
 
172
- # 2) upload dataset
173
- ws.upload_dataset(
174
- dataset_path,
175
- project_slug,
176
- project_license=project_license,
177
- project_type=project_type
178
- )
179
 
180
- # 3) generate a new version (no args = default preprocessing/augmentation)
181
- version_num = proj.generate_version()
182
- # 4) train it
183
  proj.version(str(version_num)).train()
184
 
185
- # 5) grab its hosted endpoint
186
  m = proj.version(str(version_num)).model
187
- endpoint = f"{m['base_url']}{m['id']}?api_key={api_key}"
188
- return endpoint
189
 
190
 
191
- # --- Gradio app ---
192
  with gr.Blocks() as app:
193
- gr.Markdown("## 🔄 Segmentation YOLOv8 Converter + Auto‐Upload")
194
 
195
- # single API key input
196
  api_input = gr.Textbox(label="Roboflow API Key", type="password")
197
  url_input = gr.Textbox(label="Segmentation Dataset URL")
198
  run_btn = gr.Button("Convert to BBoxes")
199
- before_g = gr.Gallery(label="Before (Segmentation)", columns=5, height="auto")
200
- after_g = gr.Gallery(label="After (BBoxes)", columns=5, height="auto")
201
-
202
- # hidden states for the YOLO dataset path & auto‐slug
203
- dataset_path_state = gr.Textbox(visible=False)
204
- project_slug_state = gr.Textbox(visible=False)
205
 
206
  run_btn.click(
207
- fn=convert_seg_to_bbox,
208
  inputs=[api_input, url_input],
209
- outputs=[before_g, after_g, dataset_path_state, project_slug_state]
210
  )
211
 
212
  gr.Markdown("## 🚀 Upload & Train Detection Model")
213
- train_btn = gr.Button("Upload & Train Detection Model")
214
- url_output = gr.Textbox(label="Model Endpoint URL")
215
 
216
  train_btn.click(
217
- fn=upload_and_train_detection,
218
- inputs=[api_input, project_slug_state, dataset_path_state],
219
- outputs=[url_output]
220
  )
221
 
222
  if __name__ == "__main__":
 
13
 
14
 
15
  def parse_roboflow_url(url: str):
 
16
  parsed = urlparse(url)
17
  parts = parsed.path.strip('/').split('/')
18
  workspace = parts[0]
 
25
 
26
 
27
  def convert_seg_to_bbox(api_key: str, dataset_url: str):
 
 
 
 
 
 
28
  rf = Roboflow(api_key=api_key)
29
  ws, proj_name, ver = parse_roboflow_url(dataset_url)
30
  version_obj = rf.workspace(ws).project(proj_name).version(ver)
31
  dataset = version_obj.download("coco-segmentation")
32
  root = dataset.location
33
 
34
+ # find annotation JSON
35
  ann_file = None
36
  for dp, _, files in os.walk(root):
37
  for f in files:
 
51
  if not ann_file:
52
  raise FileNotFoundError(f"No JSON annotations under {root}")
53
 
54
+ coco = json.load(open(ann_file, 'r'))
 
55
  images_info = {img['id']: img for img in coco['images']}
56
  cat_ids = sorted(c['id'] for c in coco.get('categories', []))
57
  id_to_index = {cid: idx for idx, cid in enumerate(cat_ids)}
58
 
59
+ # prepare YOLO dirs
60
  out_root = tempfile.mkdtemp(prefix="yolov8_")
61
  img_out = os.path.join(out_root, "images")
62
  lbl_out = os.path.join(out_root, "labels")
63
  os.makedirs(img_out, exist_ok=True)
64
  os.makedirs(lbl_out, exist_ok=True)
65
 
66
+ # convert seg→bbox
67
  annos = {}
68
  for anno in coco['annotations']:
69
  img_id = anno['image_id']
 
81
  )
82
  annos.setdefault(img_id, []).append(line)
83
 
84
+ # locate images
85
  train_img_dir = None
86
  for dp, _, files in os.walk(root):
87
+ if any(f.lower().endswith(('.jpg','.png','.jpeg')) for f in files):
88
  train_img_dir = dp
89
  break
90
  if not train_img_dir:
91
  raise FileNotFoundError(f"No images under {root}")
92
 
93
+ # copy images + write labels
94
  name_to_id = {img['file_name']: img['id'] for img in coco['images']}
95
  for fname, img_id in name_to_id.items():
96
  src = os.path.join(train_img_dir, fname)
97
  if not os.path.isfile(src):
98
  continue
99
  shutil.copy(src, os.path.join(img_out, fname))
100
+ with open(os.path.join(lbl_out, fname.rsplit('.',1)[0]+".txt"), 'w') as lf:
101
  lf.write("\n".join(annos.get(img_id, [])))
102
 
103
+ # build before/after samples
104
  before, after = [], []
105
  sample = random.sample(list(name_to_id.keys()), min(5, len(name_to_id)))
106
  for fname in sample:
107
  src = os.path.join(train_img_dir, fname)
108
  img = cv2.cvtColor(cv2.imread(src), cv2.COLOR_BGR2RGB)
109
 
 
110
  seg_vis = img.copy()
111
  for anno in coco['annotations']:
112
  if anno['image_id'] != name_to_id[fname]:
113
  continue
114
+ pts = np.array(anno['segmentation'][0], np.int32).reshape(-1,2)
115
+ cv2.polylines(seg_vis, [pts], True, (255,0,0), 2)
116
 
 
117
  box_vis = img.copy()
118
  for line in annos.get(name_to_id[fname], []):
119
  _, cxn, cyn, wnorm, hnorm = map(float, line.split())
120
  iw, ih = images_info[name_to_id[fname]]['width'], images_info[name_to_id[fname]]['height']
121
+ w0, h0 = int(wnorm*iw), int(hnorm*ih)
122
+ x0 = int(cxn*iw - w0/2)
123
+ y0 = int(cyn*ih - h0/2)
124
+ cv2.rectangle(box_vis, (x0,y0), (x0+w0,y0+h0), (0,255,0), 2)
125
 
126
  before.append(Image.fromarray(seg_vis))
127
  after.append(Image.fromarray(box_vis))
128
 
 
129
  project_slug = f"{proj_name}-detection"
130
  return before, after, out_root, project_slug
131
 
 
137
  project_license: str = "MIT",
138
  project_type: str = "object-detection"
139
  ):
 
 
 
 
 
 
140
  rf = Roboflow(api_key=api_key)
141
  ws = rf.workspace()
142
 
143
+ # get-or-create detection project
144
  try:
145
  proj = ws.project(project_slug)
146
  except Exception:
 
147
  proj = ws.create_project(
148
  project_slug,
149
  annotation=project_type,
 
151
  project_license=project_license
152
  )
153
 
154
+ # upload the YOLO dataset
155
+ ws.upload_dataset(dataset_path, project_slug,
156
+ project_license=project_license,
157
+ project_type=project_type)
158
+
159
+ # generate a new version (must pass settings arg—even if empty)
160
+ version_num = proj.generate_version(settings={})
161
 
162
+ # train it
 
 
163
  proj.version(str(version_num)).train()
164
 
165
+ # return hosted endpoint
166
  m = proj.version(str(version_num)).model
167
+ return f"{m['base_url']}{m['id']}?api_key={api_key}"
 
168
 
169
 
170
+ # --- Gradio UI ---
171
  with gr.Blocks() as app:
172
+ gr.Markdown("## 🔄 SegBBox + Auto‐Upload/Train")
173
 
 
174
  api_input = gr.Textbox(label="Roboflow API Key", type="password")
175
  url_input = gr.Textbox(label="Segmentation Dataset URL")
176
  run_btn = gr.Button("Convert to BBoxes")
177
+ before_g = gr.Gallery(columns=5, label="Before")
178
+ after_g = gr.Gallery(columns=5, label="After")
179
+ ds_state = gr.Textbox(visible=False)
180
+ slug_state = gr.Textbox(visible=False)
 
 
181
 
182
  run_btn.click(
183
+ convert_seg_to_bbox,
184
  inputs=[api_input, url_input],
185
+ outputs=[before_g, after_g, ds_state, slug_state]
186
  )
187
 
188
  gr.Markdown("## 🚀 Upload & Train Detection Model")
189
+ train_btn = gr.Button("Upload & Train")
190
+ url_out = gr.Textbox(label="Hosted Model Endpoint URL")
191
 
192
  train_btn.click(
193
+ upload_and_train_detection,
194
+ inputs=[api_input, slug_state, ds_state],
195
+ outputs=[url_out]
196
  )
197
 
198
  if __name__ == "__main__":