Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,41 +2,48 @@ import gradio as gr
|
|
| 2 |
import cv2
|
| 3 |
import requests
|
| 4 |
import os
|
| 5 |
-
|
|
|
|
| 6 |
from ultralytics import YOLO
|
| 7 |
-
|
| 8 |
file_urls = [
|
| 9 |
'https://www.dropbox.com/s/b5g97xo901zb3ds/pothole_example.jpg?dl=1',
|
| 10 |
'https://www.dropbox.com/s/86uxlxxlm1iaexa/pothole_screenshot.png?dl=1',
|
| 11 |
'https://www.dropbox.com/s/7sjfwncffg8xej2/video_7.mp4?dl=1'
|
| 12 |
]
|
| 13 |
-
|
| 14 |
def download_file(url, save_name):
|
| 15 |
-
url = url
|
| 16 |
if not os.path.exists(save_name):
|
| 17 |
file = requests.get(url)
|
| 18 |
open(save_name, 'wb').write(file.content)
|
| 19 |
-
|
| 20 |
for i, url in enumerate(file_urls):
|
| 21 |
if 'mp4' in file_urls[i]:
|
| 22 |
-
download_file(
|
| 23 |
-
file_urls[i],
|
| 24 |
-
f"video.mp4"
|
| 25 |
-
)
|
| 26 |
else:
|
| 27 |
-
download_file(
|
| 28 |
-
file_urls[i],
|
| 29 |
-
f"image_{i}.jpg"
|
| 30 |
-
)
|
| 31 |
|
| 32 |
model = YOLO('best.pt')
|
| 33 |
-
path
|
| 34 |
video_path = [['video.mp4']]
|
| 35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
def show_preds_image(image_path):
|
| 37 |
image = cv2.imread(image_path)
|
| 38 |
outputs = model.predict(source=image_path)
|
| 39 |
results = outputs[0].cpu().numpy()
|
|
|
|
|
|
|
|
|
|
| 40 |
for i, det in enumerate(results.boxes.xyxy):
|
| 41 |
cv2.rectangle(
|
| 42 |
image,
|
|
@@ -46,14 +53,16 @@ def show_preds_image(image_path):
|
|
| 46 |
thickness=2,
|
| 47 |
lineType=cv2.LINE_AA
|
| 48 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 50 |
-
|
| 51 |
-
inputs_image = [
|
| 52 |
-
|
| 53 |
-
]
|
| 54 |
-
outputs_image = [
|
| 55 |
-
gr.components.Image(type="numpy", label="Output Image"),
|
| 56 |
-
]
|
| 57 |
interface_image = gr.Interface(
|
| 58 |
fn=show_preds_image,
|
| 59 |
inputs=inputs_image,
|
|
|
|
| 2 |
import cv2
|
| 3 |
import requests
|
| 4 |
import os
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
from ultralytics import YOLO
|
| 8 |
+
|
| 9 |
file_urls = [
|
| 10 |
'https://www.dropbox.com/s/b5g97xo901zb3ds/pothole_example.jpg?dl=1',
|
| 11 |
'https://www.dropbox.com/s/86uxlxxlm1iaexa/pothole_screenshot.png?dl=1',
|
| 12 |
'https://www.dropbox.com/s/7sjfwncffg8xej2/video_7.mp4?dl=1'
|
| 13 |
]
|
| 14 |
+
|
| 15 |
def download_file(url, save_name):
|
|
|
|
| 16 |
if not os.path.exists(save_name):
|
| 17 |
file = requests.get(url)
|
| 18 |
open(save_name, 'wb').write(file.content)
|
| 19 |
+
|
| 20 |
for i, url in enumerate(file_urls):
|
| 21 |
if 'mp4' in file_urls[i]:
|
| 22 |
+
download_file(file_urls[i], f"video.mp4")
|
|
|
|
|
|
|
|
|
|
| 23 |
else:
|
| 24 |
+
download_file(file_urls[i], f"image_{i}.jpg")
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
model = YOLO('best.pt')
|
| 27 |
+
path = [['image_0.jpg'], ['image_1.jpg']]
|
| 28 |
video_path = [['video.mp4']]
|
| 29 |
|
| 30 |
+
def save_annotation(image_path, results):
|
| 31 |
+
height, width, _ = cv2.imread(image_path).shape
|
| 32 |
+
annotation_txt = ""
|
| 33 |
+
for i, det in enumerate(results.boxes.xyxy):
|
| 34 |
+
# YOLO format: class x_center y_center width height
|
| 35 |
+
class_id = int(results.names[int(det[5])])
|
| 36 |
+
x_center, y_center, bbox_width, bbox_height = det[0], det[1], det[2] - det[0], det[3] - det[1]
|
| 37 |
+
annotation_txt += f"{class_id} {x_center / width:.6f} {y_center / height:.6f} {bbox_width / width:.6f} {bbox_height / height:.6f}\n"
|
| 38 |
+
return annotation_txt
|
| 39 |
+
|
| 40 |
def show_preds_image(image_path):
|
| 41 |
image = cv2.imread(image_path)
|
| 42 |
outputs = model.predict(source=image_path)
|
| 43 |
results = outputs[0].cpu().numpy()
|
| 44 |
+
|
| 45 |
+
annotation_txt = save_annotation(image_path, results)
|
| 46 |
+
|
| 47 |
for i, det in enumerate(results.boxes.xyxy):
|
| 48 |
cv2.rectangle(
|
| 49 |
image,
|
|
|
|
| 53 |
thickness=2,
|
| 54 |
lineType=cv2.LINE_AA
|
| 55 |
)
|
| 56 |
+
|
| 57 |
+
# Save YOLO format annotation to a txt file
|
| 58 |
+
annotation_filename = f"annotation_{os.path.basename(image_path).split('.')[0]}.txt"
|
| 59 |
+
with open(annotation_filename, 'w') as f:
|
| 60 |
+
f.write(annotation_txt)
|
| 61 |
+
|
| 62 |
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 63 |
+
|
| 64 |
+
inputs_image = [gr.components.Image(type="filepath", label="Input Image"),]
|
| 65 |
+
outputs_image = [gr.components.Image(type="numpy", label="Output Image"),]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
interface_image = gr.Interface(
|
| 67 |
fn=show_preds_image,
|
| 68 |
inputs=inputs_image,
|