youssefboutaleb commited on
Commit
b08e5f0
·
1 Parent(s): 5eeb136

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -29
app.py CHANGED
@@ -1,26 +1,31 @@
1
  import gradio as gr
2
 
 
 
3
  from ultralyticsplus import YOLO, render_result
4
 
 
 
 
 
 
 
5
 
6
- #torch.hub.download_url_to_file("img1.jpg", 'one.jpg')
7
- #torch.hub.download_url_to_file("img2.jpg", 'two.jpg')
8
- #torch.hub.download_url_to_file("img3.jpg", 'three.jpg')
9
 
10
  def yoloV8_func(image: gr.Image = None,
11
- image_size: gr.Slider = 640,
12
- conf_threshold: gr.Slider = 0.4,
13
- iou_threshold: gr.Slider = 0.50):
14
  """This function performs YOLOv8 object detection on the given image.
15
 
16
  Args:
17
- image (gr.inputs.Image, optional): Input image to detect objects on. Defaults to None.
18
- image_size (gr.inputs.Slider, optional): Desired image size for the model. Defaults to 640.
19
- conf_threshold (gr.inputs.Slider, optional): Confidence threshold for object detection. Defaults to 0.4.
20
- iou_threshold (gr.inputs.Slider, optional): Intersection over Union threshold for object detection. Defaults to 0.50.
21
  """
22
  # Load the YOLOv8 model from the 'best.pt' checkpoint
23
- model_path = "best.pt"
24
  model = YOLO(model_path)
25
 
26
  # Perform object detection on the input image using the YOLOv8 model
@@ -38,39 +43,31 @@ def yoloV8_func(image: gr.Image = None,
38
  # Render the output image with bounding boxes around detected objects
39
  render = render_result(model=model, image=image, result=results[0])
40
  return render
41
-
42
-
43
-
44
  inputs = [
45
  gr.Image(type="filepath", label="Input Image"),
46
- gr.Slider(minimum=320, maximum=1280, value=640, # Changed 'default' to 'value'
47
- step=32, label="Image Size"),
48
- gr.Slider(minimum=0.0, maximum=1.0, value=0.25, # Changed 'default' to 'value'
49
- step=0.05, label="Confidence Threshold"),
50
- gr.Slider(minimum=0.0, maximum=1.0, value=0.45, # Changed 'default' to 'value'
51
- step=0.05, label="IOU Threshold"),
52
  ]
53
 
54
 
55
 
56
- output_image = gr.Image(label="Output Image")
57
-
58
 
59
  title = "YOLOv8 101: Custom Object Detection on Construction Workers"
60
 
61
-
62
- examples = [['img1.jpg', 640, 0.5, 0.7],
63
- ['img2.jpg', 800, 0.5, 0.6],
64
- ['img3.jpg', 900, 0.5, 0.8]]
65
 
66
  yolo_app = gr.Interface(
67
  fn=yoloV8_func,
68
  inputs=inputs,
69
- outputs=output_image,
70
  title=title,
71
  examples=examples,
72
- cache_examples=True,
73
  )
74
 
75
  # Launch the Gradio interface in debug mode with queue enabled
76
- yolo_app.launch(debug=True, enable_queue=True)
 
1
  import gradio as gr
2
 
3
+
4
+ import torch
5
  from ultralyticsplus import YOLO, render_result
6
 
7
+ torch.hub.download_url_to_file(
8
+ 'https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Ftexashafts.com%2Fwp-content%2Fuploads%2F2016%2F04%2Fconstruction-worker.jpg', 'one.jpg')
9
+ torch.hub.download_url_to_file(
10
+ 'https://www.pearsonkoutcherlaw.com/wp-content/uploads/2020/06/Construction-Workers.jpg', 'two.jpg')
11
+ torch.hub.download_url_to_file(
12
+ 'https://nssgroup.com/wp-content/uploads/2019/02/Building-maintenance-blog.jpg', 'three.jpg')
13
 
 
 
 
14
 
15
  def yoloV8_func(image: gr.Image = None,
16
+ image_size: int = 640,
17
+ conf_threshold: float = 0.4,
18
+ iou_threshold: float = 0.5):
19
  """This function performs YOLOv8 object detection on the given image.
20
 
21
  Args:
22
+ image (gr.Image, optional): Input image to detect objects on. Defaults to None.
23
+ image_size (int, optional): Desired image size for the model. Defaults to 640.
24
+ conf_threshold (float, optional): Confidence threshold for object detection. Defaults to 0.4.
25
+ iou_threshold (float, optional): Intersection over Union threshold for object detection. Defaults to 0.50.
26
  """
27
  # Load the YOLOv8 model from the 'best.pt' checkpoint
28
+ model_path = "./best.pt.pt"
29
  model = YOLO(model_path)
30
 
31
  # Perform object detection on the input image using the YOLOv8 model
 
43
  # Render the output image with bounding boxes around detected objects
44
  render = render_result(model=model, image=image, result=results[0])
45
  return render
 
 
 
46
  inputs = [
47
  gr.Image(type="filepath", label="Input Image"),
48
+ gr.Slider(minimum=320, maximum=1280, step=32, label="Image Size", value=640),
49
+ gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label="Confidence Threshold"),
50
+ gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label="IOU Threshold"),
 
 
 
51
  ]
52
 
53
 
54
 
55
+ outputs = gr.Image(type="filepath", label="Output Image")
 
56
 
57
  title = "YOLOv8 101: Custom Object Detection on Construction Workers"
58
 
59
+ examples = [['one.jpg', 640, 0.5, 0.7],
60
+ ['two.jpg', 800, 0.5, 0.6],
61
+ ['three.jpg', 900, 0.5, 0.8]]
 
62
 
63
  yolo_app = gr.Interface(
64
  fn=yoloV8_func,
65
  inputs=inputs,
66
+ outputs=outputs,
67
  title=title,
68
  examples=examples,
69
+ cache_examples=False,
70
  )
71
 
72
  # Launch the Gradio interface in debug mode with queue enabled
73
+ yolo_app.launch(debug=True).queue()