HemaAM commited on
Commit
b8d52e9
·
1 Parent(s): 8771fa1

Modified app

Browse files
Files changed (1) hide show
  1. app.py +68 -15
app.py CHANGED
@@ -1,8 +1,57 @@
 
 
 
1
  import numpy as np
2
  import gradio as gr
3
- from detection import detect_objects
4
- from config import PASCAL_CLASSES
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  def inference(
8
  image: np.ndarray,
@@ -10,18 +59,22 @@ def inference(
10
  enable_grad_cam: str,
11
  transparency: float,
12
  ):
13
- infer_output = detect_objects(image, iou_thresh, thresh, enable_grad_cam, transparency)
14
- return infer_output
15
-
16
-
17
- title = "YoloV3 for Pascal VOC Dataset"
18
- description = f"Pytorch Implementation of YoloV3 model trained on Pascal VOC dataset with GradCAM \n Classes in pascol voc are: {', '.join(PASCAL_CLASSES)}"
19
- example_images = [
20
- ["images/001114.jpg", 0.7, 0.5, True, 0.6],
21
- ["images/001133.jpg", 0.6, 0.5, True, 0.6],
22
- ["images/001142.jpg", 0.65, 0.45, True, 0.6],
23
- ["images/001147.jpg", 0.6, 0.5, True, 0.6],
24
- ["images/001155.jpg", 0.7, 0.7, True, 0.6],
 
 
 
 
25
  ]
26
 
27
  demo = gr.Interface(
@@ -38,7 +91,7 @@ demo = gr.Interface(
38
  ],
39
  title=title,
40
  description=description,
41
- examples=example_images,
42
  )
43
 
44
  demo.launch(debug=True)
 
1
+ from typing import List
2
+ import cv2
3
+ import torch
4
  import numpy as np
5
  import gradio as gr
6
+ import config as modelConfig
 
7
 
8
+ from pytorch_grad_cam.utils.image import show_cam_on_image
9
+
10
+ from yolov3 import YOLOv3
11
+ import utils
12
+ from utils import cells_to_bboxes, non_max_suppression, draw_bounding_boxes, YoloGradCAM
13
+
14
+
15
+ model = YOLOv3(num_classes=len(modelConfig.PASCAL_CLASSES))
16
+ optimizer = torch.optim.Adam(model.parameters(), lr=0.00072/100, weight_decay=1e-4)
17
+
18
+ utils.load_checkpoint("checkpoint.pth.tar", model, optimizer, 0.00072/100)
19
+
20
+ scaled_anchors = (
21
+ torch.tensor(modelConfig.ANCHORS)
22
+ * torch.tensor(modelConfig.S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)
23
+ ).to(modelConfig.DEVICE)
24
+
25
+ yolo_grad_cam = YoloGradCAM(model=model, target_layers=[model.layers[-2]], use_cuda=False)
26
+
27
+ @torch.inference_mode()
28
+ def detect_objects(image: np.ndarray, iou_thresh: float = 0.5, thresh: float = 0.4, enable_grad_cam: bool = False, transparency: float = 0.5) -> List[np.ndarray]:
29
+ transformed_image = modelConfig.transforms(image=image)["image"].unsqueeze(0)
30
+ transformed_image = transformed_image.cuda()
31
+ output = model(transformed_image)
32
+
33
+ bboxes = [[] for _ in range(1)]
34
+ for i in range(3):
35
+ batch_size, A, S, _, _ = output[i].shape
36
+ anchor = scaled_anchors[i]
37
+ boxes_scale_i = cells_to_bboxes(
38
+ output[i], anchor, S=S, is_preds=True
39
+ )
40
+ for idx, (box) in enumerate(boxes_scale_i):
41
+ bboxes[idx] += box
42
+
43
+ nms_boxes = non_max_suppression(
44
+ bboxes[0], iou_threshold=iou_thresh, threshold=thresh, box_format="midpoint",
45
+ )
46
+ plot_img_with_bboxes = draw_bounding_boxes(image.copy(), nms_boxes, class_labels=modelConfig.PASCAL_CLASSES)
47
+ if not enable_grad_cam:
48
+ return [plot_img_with_bboxes]
49
+
50
+ grayscale_cam = cam(transformed_image, scaled_anchors)[0, :, :]
51
+ img = cv2.resize(image, (416, 416))
52
+ img = np.float32(img) / 255
53
+ grad_cam_image = show_cam_on_image(img, grayscale_cam, use_rgb=True, image_weight=transparency)
54
+ return [plot_img_with_bboxes, grad_cam_image]
55
 
56
  def inference(
57
  image: np.ndarray,
 
59
  enable_grad_cam: str,
60
  transparency: float,
61
  ):
62
+ results = detect_objects(image, iou_thresh, thresh, enable_grad_cam, transparency)
63
+ return results
64
+
65
+
66
+ title = "Object detection application using YoloV3 Model"
67
+ description = f"Object detection application using pre-trained YoloV3 model for Pascal VOC dataset. This app has GradCAM option also. \n Classes in Pascal voc dataset are : {', '.join(modelConfig.PASCAL_CLASSES)}"
68
+ examples = [
69
+ ["images_2/000811.jpg", 0.6, 0.6, True, 0.6],
70
+ ["images_2/000830.jpg", 0.5, 0.5, True, 0.6],
71
+ ["images_2/000842.jpg", 0.6, 0.6, True, 0.6],
72
+ ["images_2/001114.jpg", 0.4, 0.5, True, 0.6],
73
+ ["images_2/001133.jpg", 0.7, 0.7, True, 0.6],
74
+ ["images_2/001155.jpg", 0.7, 0.69, True, 0.6],
75
+ ["images_2/000008.jpg", 0.66, 0.69, True, 0.6],
76
+ ["images_2/000031.jpg", 0.6, 0.6, True, 0.6],
77
+ ["images_2/000175.jpg", 0.6, 0.6, True, 0.6],
78
  ]
79
 
80
  demo = gr.Interface(
 
91
  ],
92
  title=title,
93
  description=description,
94
+ examples=examples,
95
  )
96
 
97
  demo.launch(debug=True)