wuhp commited on
Commit
ac01980
·
verified ·
1 Parent(s): 99c4ece

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -75
app.py CHANGED
@@ -26,66 +26,46 @@ def parse_roboflow_url(url: str):
26
 
27
 
28
  def convert_seg_to_bbox(api_key: str, dataset_url: str):
29
- # 1) download the dataset
30
  rf = Roboflow(api_key=api_key)
31
  ws, proj, ver = parse_roboflow_url(dataset_url)
32
  version_obj = rf.workspace(ws).project(proj).version(ver)
33
  dataset = version_obj.download("coco-segmentation")
34
- root = dataset.location # e.g. "/home/user/app/ds-2"
35
-
36
- # --- DEBUG: print out the downloaded directory tree ---
37
- print(f"\n=== Downloaded dataset root: {root} ===")
38
- for dirpath, dirnames, filenames in os.walk(root):
39
- print(f"\nDirectory: {dirpath}")
40
- for d in dirnames:
41
- print(f" [DIR ] {d}")
42
- for f in filenames:
43
- print(f" [FILE] {f}")
44
- print("=== end tree dump ===\n")
45
-
46
- # 2) search for any JSON file with "train" in its name
47
  ann_file = None
48
- for dirpath, _, filenames in os.walk(root):
49
- for fname in filenames:
50
- if 'train' in fname.lower() and fname.lower().endswith('.json'):
51
- ann_file = os.path.join(dirpath, fname)
52
- print(f"Found TRAIN annotation file: {ann_file}")
53
  break
54
  if ann_file:
55
  break
56
-
57
- # 2b) fallback: first .json anywhere
58
- if ann_file is None:
59
- for dirpath, _, filenames in os.walk(root):
60
- for fname in filenames:
61
- if fname.lower().endswith('.json'):
62
- ann_file = os.path.join(dirpath, fname)
63
- print(f"No TRAIN file—falling back to first JSON: {ann_file}")
64
  break
65
  if ann_file:
66
  break
 
 
67
 
68
- if ann_file is None:
69
- raise FileNotFoundError(f"No JSON annotations found under {root}")
70
-
71
- # 3) load COCO annotations
72
- with open(ann_file, 'r') as f:
73
- coco = json.load(f)
74
  images_info = {img['id']: img for img in coco['images']}
75
-
76
- # 4) build category→index map
77
  cat_ids = sorted(c['id'] for c in coco.get('categories', []))
78
  id_to_index = {cid: idx for idx, cid in enumerate(cat_ids)}
79
 
80
- # 5) prepare YOLO output directories
81
  out_root = tempfile.mkdtemp(prefix="yolov8_")
82
  img_out = os.path.join(out_root, "images")
83
  lbl_out = os.path.join(out_root, "labels")
84
  os.makedirs(img_out, exist_ok=True)
85
  os.makedirs(lbl_out, exist_ok=True)
86
- print(f"Preparing YOLOv8 output in: {out_root}")
87
 
88
- # 6) convert each segmentation annotation to a YOLO bbox line
89
  annos = {}
90
  for anno in coco['annotations']:
91
  img_id = anno['image_id']
@@ -94,59 +74,53 @@ def convert_seg_to_bbox(api_key: str, dataset_url: str):
94
  x_min, x_max = min(xs), max(xs)
95
  y_min, y_max = min(ys), max(ys)
96
  w, h = x_max - x_min, y_max - y_min
97
- cx, cy = x_min + w/2, y_min + h/2
98
-
99
- info = images_info[img_id]
100
- iw, ih = info['width'], info['height']
101
- line = (
102
- f"{id_to_index[anno['category_id']]} "
103
- f"{cx/iw:.6f} {cy/ih:.6f} {w/iw:.6f} {h/ih:.6f}"
104
- )
105
  annos.setdefault(img_id, []).append(line)
106
 
107
- # 7) locate your image folder (first with any .jpg/.png)
108
  train_img_dir = None
109
- for dirpath, _, files in os.walk(root):
110
  if any(f.lower().endswith(('.jpg', '.png', '.jpeg')) for f in files):
111
- train_img_dir = dirpath
112
- print(f"Found image directory: {train_img_dir}")
113
  break
 
 
114
 
115
- if train_img_dir is None:
116
- raise FileNotFoundError(f"No image files found under {root}")
117
-
118
- # 8) copy images + write labels
119
  name_to_id = {img['file_name']: img['id'] for img in coco['images']}
120
  for fname, img_id in name_to_id.items():
121
  src = os.path.join(train_img_dir, fname)
122
  if not os.path.isfile(src):
123
  continue
124
  shutil.copy(src, os.path.join(img_out, fname))
125
- lbl_path = os.path.join(lbl_out, fname.rsplit('.', 1)[0] + ".txt")
126
- with open(lbl_path, 'w') as lf:
127
  lf.write("\n".join(annos.get(img_id, [])))
128
 
129
- # 9) build before/after galleries
130
  before, after = [], []
131
  sample = random.sample(list(name_to_id.keys()), min(5, len(name_to_id)))
132
  for fname in sample:
133
  src = os.path.join(train_img_dir, fname)
134
  img = cv2.cvtColor(cv2.imread(src), cv2.COLOR_BGR2RGB)
135
 
 
136
  seg_vis = img.copy()
137
- img_id = name_to_id[fname]
138
  for anno in coco['annotations']:
139
- if anno['image_id'] != img_id: continue
140
- pts = np.array(anno['segmentation'][0], dtype=np.int32).reshape(-1, 2)
 
141
  cv2.polylines(seg_vis, [pts], True, (255, 0, 0), 2)
142
 
 
143
  box_vis = img.copy()
144
- for line in annos.get(img_id, []):
145
  _, cxn, cyn, wnorm, hnorm = map(float, line.split())
146
- iw, ih = images_info[img_id]['width'], images_info[img_id]['height']
147
  w0, h0 = int(wnorm * iw), int(hnorm * ih)
148
- x0 = int(cxn * iw - w0/2)
149
- y0 = int(cyn * ih - h0/2)
150
  cv2.rectangle(box_vis, (x0, y0), (x0 + w0, y0 + h0), (0, 255, 0), 2)
151
 
152
  before.append(Image.fromarray(seg_vis))
@@ -155,19 +129,67 @@ def convert_seg_to_bbox(api_key: str, dataset_url: str):
155
  return before, after
156
 
157
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
  # --- Gradio app ---
159
  with gr.Blocks() as app:
160
- gr.Markdown("# Segmentation → YOLOv8 Converter")
161
- api_input = gr.Textbox(label="Roboflow API Key", type="password")
162
- url_input = gr.Textbox(label="Roboflow Dataset URL (Segmentation)")
163
- run_btn = gr.Button("Convert")
164
- before_gallery = gr.Gallery(label="Before (Segmentation)", columns=5, height="auto")
165
- after_gallery = gr.Gallery(label="After (Bounding Boxes)", columns=5, height="auto")
166
-
167
- run_btn.click(
168
- fn=convert_seg_to_bbox,
169
- inputs=[api_input, url_input],
170
- outputs=[before_gallery, after_gallery]
 
 
 
 
 
 
 
171
  )
172
 
173
  if __name__ == "__main__":
 
26
 
27
 
28
  def convert_seg_to_bbox(api_key: str, dataset_url: str):
29
+ """Download a segmentation dataset from Roboflow, convert to YOLO bboxes, and return before/after galleries."""
30
  rf = Roboflow(api_key=api_key)
31
  ws, proj, ver = parse_roboflow_url(dataset_url)
32
  version_obj = rf.workspace(ws).project(proj).version(ver)
33
  dataset = version_obj.download("coco-segmentation")
34
+ root = dataset.location
35
+
36
+ # 1) Find annotation JSON
 
 
 
 
 
 
 
 
 
 
37
  ann_file = None
38
+ for dp, _, files in os.walk(root):
39
+ for f in files:
40
+ if 'train' in f.lower() and f.lower().endswith('.json'):
41
+ ann_file = os.path.join(dp, f)
 
42
  break
43
  if ann_file:
44
  break
45
+ if not ann_file:
46
+ for dp, _, files in os.walk(root):
47
+ for f in files:
48
+ if f.lower().endswith('.json'):
49
+ ann_file = os.path.join(dp, f)
 
 
 
50
  break
51
  if ann_file:
52
  break
53
+ if not ann_file:
54
+ raise FileNotFoundError("No JSON annotations found under %s" % root)
55
 
56
+ coco = json.load(open(ann_file, 'r'))
 
 
 
 
 
57
  images_info = {img['id']: img for img in coco['images']}
 
 
58
  cat_ids = sorted(c['id'] for c in coco.get('categories', []))
59
  id_to_index = {cid: idx for idx, cid in enumerate(cat_ids)}
60
 
61
+ # 2) Prepare YOLO folders
62
  out_root = tempfile.mkdtemp(prefix="yolov8_")
63
  img_out = os.path.join(out_root, "images")
64
  lbl_out = os.path.join(out_root, "labels")
65
  os.makedirs(img_out, exist_ok=True)
66
  os.makedirs(lbl_out, exist_ok=True)
 
67
 
68
+ # 3) Convert seg→bbox
69
  annos = {}
70
  for anno in coco['annotations']:
71
  img_id = anno['image_id']
 
74
  x_min, x_max = min(xs), max(xs)
75
  y_min, y_max = min(ys), max(ys)
76
  w, h = x_max - x_min, y_max - y_min
77
+ cx, cy = x_min + w / 2, y_min + h / 2
78
+
79
+ iw, ih = images_info[img_id]['width'], images_info[img_id]['height']
80
+ line = f"{id_to_index[anno['category_id']]} {cx/iw:.6f} {cy/ih:.6f} {w/iw:.6f} {h/ih:.6f}"
 
 
 
 
81
  annos.setdefault(img_id, []).append(line)
82
 
83
+ # 4) Find images and write labels
84
  train_img_dir = None
85
+ for dp, _, files in os.walk(root):
86
  if any(f.lower().endswith(('.jpg', '.png', '.jpeg')) for f in files):
87
+ train_img_dir = dp
 
88
  break
89
+ if not train_img_dir:
90
+ raise FileNotFoundError("No image files found under %s" % root)
91
 
 
 
 
 
92
  name_to_id = {img['file_name']: img['id'] for img in coco['images']}
93
  for fname, img_id in name_to_id.items():
94
  src = os.path.join(train_img_dir, fname)
95
  if not os.path.isfile(src):
96
  continue
97
  shutil.copy(src, os.path.join(img_out, fname))
98
+ with open(os.path.join(lbl_out, fname.rsplit('.', 1)[0] + ".txt"), 'w') as lf:
 
99
  lf.write("\n".join(annos.get(img_id, [])))
100
 
101
+ # 5) Build galleries
102
  before, after = [], []
103
  sample = random.sample(list(name_to_id.keys()), min(5, len(name_to_id)))
104
  for fname in sample:
105
  src = os.path.join(train_img_dir, fname)
106
  img = cv2.cvtColor(cv2.imread(src), cv2.COLOR_BGR2RGB)
107
 
108
+ # draw seg polygons
109
  seg_vis = img.copy()
 
110
  for anno in coco['annotations']:
111
+ if anno['image_id'] != name_to_id[fname]:
112
+ continue
113
+ pts = np.array(anno['segmentation'][0], np.int32).reshape(-1, 2)
114
  cv2.polylines(seg_vis, [pts], True, (255, 0, 0), 2)
115
 
116
+ # draw boxes
117
  box_vis = img.copy()
118
+ for line in annos.get(name_to_id[fname], []):
119
  _, cxn, cyn, wnorm, hnorm = map(float, line.split())
120
+ iw, ih = images_info[name_to_id[fname]]['width'], images_info[name_to_id[fname]]['height']
121
  w0, h0 = int(wnorm * iw), int(hnorm * ih)
122
+ x0 = int(cxn * iw - w0 / 2)
123
+ y0 = int(cyn * ih - h0 / 2)
124
  cv2.rectangle(box_vis, (x0, y0), (x0 + w0, y0 + h0), (0, 255, 0), 2)
125
 
126
  before.append(Image.fromarray(seg_vis))
 
129
  return before, after
130
 
131
 
132
+ def upload_and_train_detection(
133
+ api_key: str,
134
+ project_id: str,
135
+ dataset_path: str,
136
+ project_license: str = "MIT",
137
+ project_type: str = "object-detection",
138
+ preprocessing: dict = None,
139
+ augmentation: dict = None,
140
+ speed: str = "fast"
141
+ ):
142
+ """
143
+ Upload a local detection dataset to Roboflow, generate+train a new version,
144
+ and return the hosted inference endpoint URL.
145
+ """
146
+ rf = Roboflow(api_key=api_key)
147
+ ws = rf.workspace()
148
+
149
+ # 1) upload
150
+ ws.upload_dataset(
151
+ dataset_path,
152
+ project_id,
153
+ project_license=project_license,
154
+ project_type=project_type
155
+ )
156
+
157
+ # 2) generate version
158
+ proj = ws.project(project_id)
159
+ version_number = proj.generate_version(
160
+ preprocessing=preprocessing or {},
161
+ augmentation=augmentation or {}
162
+ )
163
+
164
+ # 3) train
165
+ proj.version(version_number).train(speed=speed)
166
+
167
+ # 4) fetch model endpoint info
168
+ m = proj.version(str(version_number)).model
169
+ endpoint = f"{m['base_url']}{m['id']}?api_key={api_key}"
170
+ return endpoint
171
+
172
+
173
  # --- Gradio app ---
174
  with gr.Blocks() as app:
175
+ gr.Markdown("## 🔄 Segmentation → YOLOv8 Converter")
176
+ api_input1 = gr.Textbox(label="Roboflow API Key", type="password")
177
+ url_input = gr.Textbox(label="Segmentation Dataset URL")
178
+ run_btn = gr.Button("Convert to BBoxes")
179
+ before_g = gr.Gallery(label="Before (Segmentation)", columns=5)
180
+ after_g = gr.Gallery(label="After (BBoxes)", columns=5)
181
+ run_btn.click(fn=convert_seg_to_bbox, inputs=[api_input1, url_input], outputs=[before_g, after_g])
182
+
183
+ gr.Markdown("## 🚀 Upload & Train Detection Model")
184
+ api_input2 = gr.Textbox(label="Roboflow API Key", type="password")
185
+ project_input = gr.Textbox(label="Project ID (slug)")
186
+ path_input = gr.Textbox(label="Local Dataset Path")
187
+ train_btn = gr.Button("Upload & Train")
188
+ url_output = gr.Textbox(label="Hosted Model Endpoint URL")
189
+ train_btn.click(
190
+ fn=upload_and_train_detection,
191
+ inputs=[api_input2, project_input, path_input],
192
+ outputs=[url_output],
193
  )
194
 
195
  if __name__ == "__main__":