wuhp commited on
Commit
24fabae
Β·
verified Β·
1 Parent(s): 6f41ebf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +226 -20
app.py CHANGED
@@ -7,19 +7,23 @@ import shutil
7
  from ultralytics import YOLO
8
  import requests
9
 
 
10
  MODELS_DIR = "models"
11
  MODELS_INFO_FILE = "models_info.json"
12
  TEMP_DIR = "temp"
13
  OUTPUT_DIR = "outputs"
14
 
 
 
 
 
 
15
  def download_file(url, dest_path):
16
  """
17
  Download a file from a URL to the destination path.
18
-
19
  Args:
20
  url (str): The URL to download from.
21
  dest_path (str): The local path to save the file.
22
-
23
  Returns:
24
  bool: True if download succeeded, False otherwise.
25
  """
@@ -39,11 +43,9 @@ def load_models(models_dir=MODELS_DIR, info_file=MODELS_INFO_FILE):
39
  """
40
  Load YOLO models and their information from the specified directory and JSON file.
41
  Downloads models if they are not already present.
42
-
43
  Args:
44
  models_dir (str): Path to the models directory.
45
  info_file (str): Path to the JSON file containing model info.
46
-
47
  Returns:
48
  dict: A dictionary of models and their associated information.
49
  """
@@ -67,7 +69,6 @@ def load_models(models_dir=MODELS_DIR, info_file=MODELS_INFO_FILE):
67
  continue
68
 
69
  try:
70
-
71
  model = YOLO(model_path)
72
  models[model_name] = {
73
  'display_name': display_name,
@@ -80,15 +81,14 @@ def load_models(models_dir=MODELS_DIR, info_file=MODELS_INFO_FILE):
80
 
81
  return models
82
 
83
- def get_model_info(model_info):
84
  """
85
- Retrieve formatted model information for display.
86
-
87
  Args:
88
  model_info (dict): The model's information dictionary.
89
-
90
  Returns:
91
- str: A formatted string containing model details.
92
  """
93
  info = model_info
94
  class_ids = info.get('class_ids', {})
@@ -99,6 +99,11 @@ def get_model_info(model_info):
99
  class_image_counts_formatted = "\n".join([f"{cname}: {count}" for cname, count in class_image_counts.items()])
100
  datasets_used_formatted = "\n".join([f"- {dataset}" for dataset in datasets_used])
101
 
 
 
 
 
 
102
  info_text = (
103
  f"**{info.get('display_name', 'Model Name')}**\n\n"
104
  f"**Architecture:** {info.get('architecture', 'N/A')}\n\n"
@@ -111,20 +116,19 @@ def get_model_info(model_info):
111
  f"**Number of Images Trained On:** {info.get('num_images', 'N/A')}\n\n"
112
  f"**Class IDs:**\n{class_ids_formatted}\n\n"
113
  f"**Datasets Used:**\n{datasets_used_formatted}\n\n"
114
- f"**Class Image Counts:**\n{class_image_counts_formatted}"
 
115
  )
116
  return info_text
117
 
118
  def predict_image(model_name, image, confidence, models):
119
  """
120
  Perform prediction on an uploaded image using the selected YOLO model.
121
-
122
  Args:
123
  model_name (str): The name of the selected model.
124
  image (PIL.Image.Image): The uploaded image.
125
  confidence (float): The confidence threshold for detections.
126
  models (dict): The dictionary containing models and their info.
127
-
128
  Returns:
129
  tuple: A status message, the processed image, and the path to the output image.
130
  """
@@ -133,7 +137,6 @@ def predict_image(model_name, image, confidence, models):
133
  if not model:
134
  return "Error: Model not found.", None, None
135
  try:
136
-
137
  os.makedirs(TEMP_DIR, exist_ok=True)
138
  os.makedirs(OUTPUT_DIR, exist_ok=True)
139
 
@@ -145,7 +148,6 @@ def predict_image(model_name, image, confidence, models):
145
  latest_run = sorted(Path("runs/detect").glob("predict*"), key=os.path.getmtime)[-1]
146
  output_image_path = os.path.join(latest_run, Path(input_image_path).name)
147
  if not os.path.isfile(output_image_path):
148
-
149
  output_image_path = results[0].save()[0]
150
 
151
  final_output_path = os.path.join(OUTPUT_DIR, f"{model_name}_output_image.jpg")
@@ -153,17 +155,86 @@ def predict_image(model_name, image, confidence, models):
153
 
154
  output_image = Image.open(final_output_path)
155
 
156
- return "βœ… Prediction completed successfully.", output_image, final_output_path
 
 
157
  except Exception as e:
158
  return f"❌ Error during prediction: {str(e)}", None, None
159
 
160
- def main():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
 
 
 
162
  models = load_models()
163
  if not models:
164
  print("No models loaded. Please check your models_info.json and model URLs.")
165
  return
166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  with gr.Blocks() as demo:
168
  gr.Markdown("# πŸ§ͺ YOLOv11 Model Tester")
169
  gr.Markdown(
@@ -172,6 +243,15 @@ def main():
172
  """
173
  )
174
 
 
 
 
 
 
 
 
 
 
175
  with gr.Row():
176
  model_dropdown = gr.Dropdown(
177
  choices=[models[m]['display_name'] for m in models],
@@ -189,7 +269,8 @@ def main():
189
  if not model_name:
190
  return "Model information not available."
191
  model_entry = models[model_name]['info']
192
- return get_model_info(model_entry)
 
193
 
194
  model_dropdown.change(
195
  fn=update_model_info,
@@ -212,7 +293,6 @@ def main():
212
  image_input = gr.Image(
213
  type='pil',
214
  label="Upload Image for Prediction"
215
-
216
  )
217
  image_predict_btn = gr.Button("πŸ” Predict on Image")
218
  image_status = gr.Markdown("**Status will appear here.**")
@@ -223,7 +303,32 @@ def main():
223
  if not selected_display_name:
224
  return "❌ Please select a model.", None, None
225
  model_name = display_to_name.get(selected_display_name)
226
- return predict_image(model_name, image, confidence, models)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
 
228
  image_predict_btn.click(
229
  fn=process_image,
@@ -231,6 +336,107 @@ def main():
231
  outputs=[image_status, image_output, image_download_btn]
232
  )
233
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
  gr.Markdown(
235
  """
236
  ---
 
7
  from ultralytics import YOLO
8
  import requests
9
 
10
+ # Directory and file configurations
11
  MODELS_DIR = "models"
12
  MODELS_INFO_FILE = "models_info.json"
13
  TEMP_DIR = "temp"
14
  OUTPUT_DIR = "outputs"
15
 
16
+ # New files for storing ratings, detections, and recommended datasets
17
+ RATINGS_FILE = "ratings.json"
18
+ DETECTIONS_FILE = "detections.json"
19
+ RECOMMENDED_DATASETS_FILE = "recommended_datasets.json"
20
+
21
  def download_file(url, dest_path):
22
  """
23
  Download a file from a URL to the destination path.
 
24
  Args:
25
  url (str): The URL to download from.
26
  dest_path (str): The local path to save the file.
 
27
  Returns:
28
  bool: True if download succeeded, False otherwise.
29
  """
 
43
  """
44
  Load YOLO models and their information from the specified directory and JSON file.
45
  Downloads models if they are not already present.
 
46
  Args:
47
  models_dir (str): Path to the models directory.
48
  info_file (str): Path to the JSON file containing model info.
 
49
  Returns:
50
  dict: A dictionary of models and their associated information.
51
  """
 
69
  continue
70
 
71
  try:
 
72
  model = YOLO(model_path)
73
  models[model_name] = {
74
  'display_name': display_name,
 
81
 
82
  return models
83
 
84
+ def get_model_info(model_info, ratings_info):
85
  """
86
+ Retrieve formatted model information for display, including average rating.
 
87
  Args:
88
  model_info (dict): The model's information dictionary.
89
+ ratings_info (dict): The ratings information for the model.
90
  Returns:
91
+ str: A formatted string containing model details and average rating.
92
  """
93
  info = model_info
94
  class_ids = info.get('class_ids', {})
 
99
  class_image_counts_formatted = "\n".join([f"{cname}: {count}" for cname, count in class_image_counts.items()])
100
  datasets_used_formatted = "\n".join([f"- {dataset}" for dataset in datasets_used])
101
 
102
+ # Calculate average rating
103
+ total_rating = ratings_info.get('total', 0)
104
+ count_rating = ratings_info.get('count', 0)
105
+ average_rating = (total_rating / count_rating) if count_rating > 0 else "No ratings yet"
106
+
107
  info_text = (
108
  f"**{info.get('display_name', 'Model Name')}**\n\n"
109
  f"**Architecture:** {info.get('architecture', 'N/A')}\n\n"
 
116
  f"**Number of Images Trained On:** {info.get('num_images', 'N/A')}\n\n"
117
  f"**Class IDs:**\n{class_ids_formatted}\n\n"
118
  f"**Datasets Used:**\n{datasets_used_formatted}\n\n"
119
+ f"**Class Image Counts:**\n{class_image_counts_formatted}\n\n"
120
+ f"**Average Rating:** {average_rating} ⭐"
121
  )
122
  return info_text
123
 
124
  def predict_image(model_name, image, confidence, models):
125
  """
126
  Perform prediction on an uploaded image using the selected YOLO model.
 
127
  Args:
128
  model_name (str): The name of the selected model.
129
  image (PIL.Image.Image): The uploaded image.
130
  confidence (float): The confidence threshold for detections.
131
  models (dict): The dictionary containing models and their info.
 
132
  Returns:
133
  tuple: A status message, the processed image, and the path to the output image.
134
  """
 
137
  if not model:
138
  return "Error: Model not found.", None, None
139
  try:
 
140
  os.makedirs(TEMP_DIR, exist_ok=True)
141
  os.makedirs(OUTPUT_DIR, exist_ok=True)
142
 
 
148
  latest_run = sorted(Path("runs/detect").glob("predict*"), key=os.path.getmtime)[-1]
149
  output_image_path = os.path.join(latest_run, Path(input_image_path).name)
150
  if not os.path.isfile(output_image_path):
 
151
  output_image_path = results[0].save()[0]
152
 
153
  final_output_path = os.path.join(OUTPUT_DIR, f"{model_name}_output_image.jpg")
 
155
 
156
  output_image = Image.open(final_output_path)
157
 
158
+ # Calculate number of detections
159
+ detections = len(results[0].boxes)
160
+ return f"βœ… Prediction completed successfully. **Detections:** {detections}", output_image, final_output_path
161
  except Exception as e:
162
  return f"❌ Error during prediction: {str(e)}", None, None
163
 
164
+ def load_or_initialize_json(file_path, default_data):
165
+ """
166
+ Load JSON data from a file or initialize it with default data if the file doesn't exist.
167
+ Args:
168
+ file_path (str): Path to the JSON file.
169
+ default_data (dict or list): Default data to initialize if file doesn't exist.
170
+ Returns:
171
+ dict or list: The loaded or initialized data.
172
+ """
173
+ if os.path.isfile(file_path):
174
+ with open(file_path, 'r') as f:
175
+ return json.load(f)
176
+ else:
177
+ with open(file_path, 'w') as f:
178
+ json.dump(default_data, f, indent=4)
179
+ return default_data
180
+
181
+ def save_json(file_path, data):
182
+ """
183
+ Save data to a JSON file.
184
+ Args:
185
+ file_path (str): Path to the JSON file.
186
+ data (dict or list): Data to save.
187
+ """
188
+ with open(file_path, 'w') as f:
189
+ json.dump(data, f, indent=4)
190
+
191
+ def is_valid_roboflow_url(url):
192
+ """
193
+ Validate if the provided URL is a Roboflow URL.
194
+ Args:
195
+ url (str): The URL to validate.
196
+ Returns:
197
+ bool: True if valid, False otherwise.
198
+ """
199
+ return url.startswith("https://roboflow.com/") or url.startswith("http://roboflow.com/")
200
+
201
+ def get_top_model(detections_per_model, models):
202
+ """
203
+ Determine the top model based on the highest number of detections.
204
+ Args:
205
+ detections_per_model (dict): Dictionary with model names as keys and detection counts as values.
206
+ models (dict): Dictionary of loaded models.
207
+ Returns:
208
+ str: The display name of the top model or a message if no detections exist.
209
+ """
210
+ if not detections_per_model:
211
+ return "No detections yet."
212
+ top_model_name = max(detections_per_model, key=detections_per_model.get)
213
+ top_model_display = models[top_model_name]['display_name']
214
+ top_detections = detections_per_model[top_model_name]
215
+ return f"**Top Model:** {top_model_display} with **{top_detections}** detections."
216
 
217
+ def main():
218
+ # Load models
219
  models = load_models()
220
  if not models:
221
  print("No models loaded. Please check your models_info.json and model URLs.")
222
  return
223
 
224
+ # Load or initialize ratings
225
+ ratings_data = load_or_initialize_json(RATINGS_FILE, {})
226
+ # Initialize ratings for each model if not present
227
+ for model_name in models:
228
+ if model_name not in ratings_data:
229
+ ratings_data[model_name] = {"total": 0, "count": 0}
230
+ save_json(RATINGS_FILE, ratings_data)
231
+
232
+ # Load or initialize detections counter
233
+ detections_data = load_or_initialize_json(DETECTIONS_FILE, {"total_detections": 0, "detections_per_model": {}})
234
+
235
+ # Load or initialize recommended datasets
236
+ recommended_datasets = load_or_initialize_json(RECOMMENDED_DATASETS_FILE, [])
237
+
238
  with gr.Blocks() as demo:
239
  gr.Markdown("# πŸ§ͺ YOLOv11 Model Tester")
240
  gr.Markdown(
 
243
  """
244
  )
245
 
246
+ # Display total detections counter and top model
247
+ with gr.Row():
248
+ detections_counter = gr.Markdown(
249
+ f"**Total Detections Across All Users:** {detections_data.get('total_detections', 0)}"
250
+ )
251
+ top_model_display = gr.Markdown(
252
+ get_top_model(detections_data.get('detections_per_model', {}), models)
253
+ )
254
+
255
  with gr.Row():
256
  model_dropdown = gr.Dropdown(
257
  choices=[models[m]['display_name'] for m in models],
 
269
  if not model_name:
270
  return "Model information not available."
271
  model_entry = models[model_name]['info']
272
+ ratings_info = ratings_data.get(model_name, {"total": 0, "count": 0})
273
+ return get_model_info(model_entry, ratings_info)
274
 
275
  model_dropdown.change(
276
  fn=update_model_info,
 
293
  image_input = gr.Image(
294
  type='pil',
295
  label="Upload Image for Prediction"
 
296
  )
297
  image_predict_btn = gr.Button("πŸ” Predict on Image")
298
  image_status = gr.Markdown("**Status will appear here.**")
 
303
  if not selected_display_name:
304
  return "❌ Please select a model.", None, None
305
  model_name = display_to_name.get(selected_display_name)
306
+ status, output_img, output_path = predict_image(model_name, image, confidence, models)
307
+
308
+ # Extract number of detections from the status message
309
+ detections = 0
310
+ if "Detections:" in status:
311
+ try:
312
+ detections = int(status.split("Detections:")[1].strip())
313
+ except:
314
+ pass
315
+
316
+ # Update detections counter
317
+ try:
318
+ detections_data['total_detections'] += detections
319
+ if model_name in detections_data['detections_per_model']:
320
+ detections_data['detections_per_model'][model_name] += detections
321
+ else:
322
+ detections_data['detections_per_model'][model_name] = detections
323
+ save_json(DETECTIONS_FILE, detections_data)
324
+ except Exception as e:
325
+ print(f"Error updating detections counter: {e}")
326
+
327
+ # Update detections and top model display
328
+ detections_counter.value = f"**Total Detections Across All Users:** {detections_data.get('total_detections', 0)}"
329
+ top_model_display.value = get_top_model(detections_data.get('detections_per_model', {}), models)
330
+
331
+ return status, output_img, output_path
332
 
333
  image_predict_btn.click(
334
  fn=process_image,
 
336
  outputs=[image_status, image_output, image_download_btn]
337
  )
338
 
339
+ with gr.Tab("⭐ Rate Model"):
340
+ with gr.Column():
341
+ selected_model = gr.Dropdown(
342
+ choices=[models[m]['display_name'] for m in models],
343
+ label="Select Model to Rate",
344
+ value=None
345
+ )
346
+ rating = gr.Slider(
347
+ minimum=1,
348
+ maximum=5,
349
+ step=1,
350
+ label="Rate the Model (1-5 Stars)",
351
+ info="Select a star rating between 1 and 5."
352
+ )
353
+ submit_rating_btn = gr.Button("Submit Rating")
354
+ rating_status = gr.Markdown("**Your rating will be submitted here.**")
355
+
356
+ def submit_rating(selected_display_name, user_rating):
357
+ if not selected_display_name:
358
+ return "❌ Please select a model to rate."
359
+ if not user_rating:
360
+ return "❌ Please provide a rating."
361
+ model_name = display_to_name.get(selected_display_name)
362
+ if not model_name:
363
+ return "❌ Invalid model selected."
364
+
365
+ # Update ratings data
366
+ ratings_info = ratings_data.get(model_name, {"total": 0, "count": 0})
367
+ ratings_info['total'] += user_rating
368
+ ratings_info['count'] += 1
369
+ ratings_data[model_name] = ratings_info
370
+ save_json(RATINGS_FILE, ratings_data)
371
+
372
+ # Update model info display if the rated model is currently selected
373
+ if model_dropdown.value == selected_display_name:
374
+ updated_info = get_model_info(models[model_name]['info'], ratings_info)
375
+ model_info.value = updated_info
376
+
377
+ average = (ratings_info['total'] / ratings_info['count'])
378
+ return f"βœ… Thank you for rating! Current Average Rating: {average:.2f} ⭐"
379
+
380
+ submit_rating_btn.click(
381
+ fn=submit_rating,
382
+ inputs=[selected_model, rating],
383
+ outputs=rating_status
384
+ )
385
+
386
+ with gr.Tab("πŸ’‘ Recommend Dataset"):
387
+ with gr.Column():
388
+ dataset_name = gr.Textbox(
389
+ label="Dataset Name",
390
+ placeholder="Enter the name of the dataset"
391
+ )
392
+ dataset_url = gr.Textbox(
393
+ label="Dataset URL",
394
+ placeholder="Enter the Roboflow dataset URL"
395
+ )
396
+ recommend_btn = gr.Button("Recommend Dataset")
397
+ recommend_status = gr.Markdown("**Your recommendation status will appear here.**")
398
+
399
+ def recommend_dataset(name, url):
400
+ if not name or not url:
401
+ return "❌ Please provide both the dataset name and URL."
402
+
403
+ if not is_valid_roboflow_url(url):
404
+ return "❌ Invalid URL. Please provide a valid Roboflow dataset URL."
405
+
406
+ # Check for duplicates
407
+ for dataset in recommended_datasets:
408
+ if dataset['name'].lower() == name.lower() or dataset['url'] == url:
409
+ return "❌ This dataset has already been recommended."
410
+
411
+ # Add to recommended datasets
412
+ recommended_datasets.append({"name": name, "url": url})
413
+ save_json(RECOMMENDED_DATASETS_FILE, recommended_datasets)
414
+
415
+ return f"βœ… Thank you for recommending the dataset **{name}**!"
416
+
417
+ recommend_btn.click(
418
+ fn=recommend_dataset,
419
+ inputs=[dataset_name, dataset_url],
420
+ outputs=recommend_status
421
+ )
422
+
423
+ with gr.Tab("πŸ“„ Recommended Datasets"):
424
+ with gr.Column():
425
+ recommended_display = gr.Markdown("### Recommended Roboflow Datasets\n")
426
+
427
+ def display_recommended_datasets():
428
+ if not recommended_datasets:
429
+ return "No datasets have been recommended yet."
430
+ dataset_md = "\n".join([f"- [{dataset['name']}]({dataset['url']})" for dataset in recommended_datasets])
431
+ return dataset_md
432
+
433
+ # Display the recommended datasets
434
+ recommended_display.value = display_recommended_datasets()
435
+
436
+ with gr.Tab("πŸ† Top Model"):
437
+ with gr.Column():
438
+ top_model_md = gr.Markdown(get_top_model(detections_data.get('detections_per_model', {}), models))
439
+
440
  gr.Markdown(
441
  """
442
  ---