Spaces:
Runtime error
Runtime error
Commit
·
978e03b
1
Parent(s):
47a7b1d
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,14 @@
|
|
1 |
import gradio as gr
|
2 |
|
|
|
|
|
3 |
|
4 |
import torch
|
5 |
-
from ultralyticsplus import
|
6 |
|
7 |
torch.hub.download_url_to_file(
|
8 |
-
'https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Ftexashafts.com%2Fwp-content%2Fuploads%2F2016%2F04%2Fconstruction-worker.jpg',
|
|
|
9 |
torch.hub.download_url_to_file(
|
10 |
'https://www.pearsonkoutcherlaw.com/wp-content/uploads/2020/06/Construction-Workers.jpg', 'two.jpg')
|
11 |
torch.hub.download_url_to_file(
|
@@ -25,8 +28,8 @@ def yoloV8_func(image: gr.Image = None,
|
|
25 |
iou_threshold (float, optional): Intersection over Union threshold for object detection. Defaults to 0.50.
|
26 |
"""
|
27 |
# Load the YOLOv8 model from the 'best.pt' checkpoint
|
28 |
-
model_path = "
|
29 |
-
model =
|
30 |
|
31 |
# Perform object detection on the input image using the YOLOv8 model
|
32 |
results = model.predict(image,
|
@@ -43,6 +46,8 @@ def yoloV8_func(image: gr.Image = None,
|
|
43 |
# Render the output image with bounding boxes around detected objects
|
44 |
render = render_result(model=model, image=image, result=results[0])
|
45 |
return render
|
|
|
|
|
46 |
inputs = [
|
47 |
gr.Image(type="filepath", label="Input Image"),
|
48 |
gr.Slider(minimum=320, maximum=1280, step=32, label="Image Size", value=640),
|
@@ -50,15 +55,13 @@ inputs = [
|
|
50 |
gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label="IOU Threshold"),
|
51 |
]
|
52 |
|
53 |
-
|
54 |
-
|
55 |
outputs = gr.Image(type="filepath", label="Output Image")
|
56 |
|
57 |
title = "YOLOv8 101: Custom Object Detection on Construction Workers"
|
58 |
|
59 |
-
examples = [['
|
60 |
-
['
|
61 |
-
['
|
62 |
|
63 |
yolo_app = gr.Interface(
|
64 |
fn=yoloV8_func,
|
@@ -70,4 +73,4 @@ yolo_app = gr.Interface(
|
|
70 |
)
|
71 |
|
72 |
# Launch the Gradio interface in debug mode with queue enabled
|
73 |
-
yolo_app.launch(debug=True).queue()
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
+
from ultralytics import YOLO
|
4 |
+
model = YOLO('./best.pt') # load your custom trained model
|
5 |
|
6 |
import torch
|
7 |
+
from ultralyticsplus import render_result
|
8 |
|
9 |
torch.hub.download_url_to_file(
|
10 |
+
'https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Ftexashafts.com%2Fwp-content%2Fuploads%2F2016%2F04%2Fconstruction-worker.jpg',
|
11 |
+
'one.jpg')
|
12 |
torch.hub.download_url_to_file(
|
13 |
'https://www.pearsonkoutcherlaw.com/wp-content/uploads/2020/06/Construction-Workers.jpg', 'two.jpg')
|
14 |
torch.hub.download_url_to_file(
|
|
|
28 |
iou_threshold (float, optional): Intersection over Union threshold for object detection. Defaults to 0.50.
|
29 |
"""
|
30 |
# Load the YOLOv8 model from the 'best.pt' checkpoint
|
31 |
+
model_path = "yolov8n.pt"
|
32 |
+
# model = torch.hub.load('ultralytics/yolov8', 'custom', path='/content/best.pt', force_reload=True, trust_repo=True)
|
33 |
|
34 |
# Perform object detection on the input image using the YOLOv8 model
|
35 |
results = model.predict(image,
|
|
|
46 |
# Render the output image with bounding boxes around detected objects
|
47 |
render = render_result(model=model, image=image, result=results[0])
|
48 |
return render
|
49 |
+
|
50 |
+
|
51 |
inputs = [
|
52 |
gr.Image(type="filepath", label="Input Image"),
|
53 |
gr.Slider(minimum=320, maximum=1280, step=32, label="Image Size", value=640),
|
|
|
55 |
gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label="IOU Threshold"),
|
56 |
]
|
57 |
|
|
|
|
|
58 |
outputs = gr.Image(type="filepath", label="Output Image")
|
59 |
|
60 |
title = "YOLOv8 101: Custom Object Detection on Construction Workers"
|
61 |
|
62 |
+
examples = [['img1.jpg', 640, 0.5, 0.7],
|
63 |
+
['img2.jpg', 800, 0.5, 0.6],
|
64 |
+
['img3.jpg', 900, 0.5, 0.8]]
|
65 |
|
66 |
yolo_app = gr.Interface(
|
67 |
fn=yoloV8_func,
|
|
|
73 |
)
|
74 |
|
75 |
# Launch the Gradio interface in debug mode with queue enabled
|
76 |
+
yolo_app.launch(debug=True, share=True).queue()
|