youssefboutaleb commited on
Commit
4e87a32
·
1 Parent(s): 457705a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -0
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from ultralyticsplus import YOLO, render_result
4
+
5
+
6
+ torch.hub.download_url_to_file("../app/static/src/img1.jpg", 'one.jpg')
7
+ torch.hub.download_url_to_file("../app/static/src/img2.jpg", 'two.jpg')
8
+ torch.hub.download_url_to_file("../app/static/src/img3.jpg", 'three.jpg')
9
+
10
+ def yoloV8_func(image: gr.inputs.Image = None,
11
+ image_size: gr.inputs.Slider = 640,
12
+ conf_threshold: gr.inputs.Slider = 0.4,
13
+ iou_threshold: gr.inputs.Slider = 0.50):
14
+ """This function performs YOLOv8 object detection on the given image.
15
+
16
+ Args:
17
+ image (gr.inputs.Image, optional): Input image to detect objects on. Defaults to None.
18
+ image_size (gr.inputs.Slider, optional): Desired image size for the model. Defaults to 640.
19
+ conf_threshold (gr.inputs.Slider, optional): Confidence threshold for object detection. Defaults to 0.4.
20
+ iou_threshold (gr.inputs.Slider, optional): Intersection over Union threshold for object detection. Defaults to 0.50.
21
+ """
22
+ # Load the YOLOv8 model from the 'best.pt' checkpoint
23
+ model_path = "best.pt"
24
+ model = YOLO(model_path)
25
+
26
+ # Perform object detection on the input image using the YOLOv8 model
27
+ results = model.predict(image,
28
+ conf=conf_threshold,
29
+ iou=iou_threshold,
30
+ imgsz=image_size)
31
+
32
+ # Print the detected objects' information (class, coordinates, and probability)
33
+ box = results[0].boxes
34
+ print("Object type:", box.cls)
35
+ print("Coordinates:", box.xyxy)
36
+ print("Probability:", box.conf)
37
+
38
+ # Render the output image with bounding boxes around detected objects
39
+ render = render_result(model=model, image=image, result=results[0])
40
+ return render
41
+
42
+
43
+ inputs = [
44
+ gr.inputs.Image(type="filepath", label="Input Image"),
45
+ gr.inputs.Slider(minimum=320, maximum=1280, default=640,
46
+ step=32, label="Image Size"),
47
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25,
48
+ step=0.05, label="Confidence Threshold"),
49
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45,
50
+ step=0.05, label="IOU Threshold"),
51
+ ]
52
+
53
+
54
+ outputs = gr.outputs.Image(type="filepath", label="Output Image")
55
+
56
+ title = "YOLOv8 101: Custom Object Detection on Construction Workers"
57
+
58
+
59
+ examples = [['one.jpg', 640, 0.5, 0.7],
60
+ ['two.jpg', 800, 0.5, 0.6],
61
+ ['three.jpg', 900, 0.5, 0.8]]
62
+
63
+ yolo_app = gr.Interface(
64
+ fn=yoloV8_func,
65
+ inputs=inputs,
66
+ outputs=outputs,
67
+ title=title,
68
+ examples=examples,
69
+ cache_examples=True,
70
+ )
71
+
72
+ # Launch the Gradio interface in debug mode with queue enabled
73
+ yolo_app.launch(debug=True, enable_queue=True)