azeemkhan417 commited on
Commit
a24a8b9
·
verified ·
1 Parent(s): 73729bf

Upload 9 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ demo.mp4 filter=lfs diff=lfs merge=lfs -text
37
+ temp/output_demo.mp4 filter=lfs diff=lfs merge=lfs -text
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca30bc0b183d5932931b21c53b2e3f6ab8a773e26b0e737d36066da195507b05
3
+ size 6217113
demo.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e707d2ae1c912beb3e661a4c9f2b1587250e0abaa34bd524a7ceef0cdd26e93d
3
+ size 9563349
images.jpeg ADDED
main.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+ from ultralytics import YOLO
5
+ from PIL import Image
6
+ import os
7
+ st.title("YOLO Image and Video Processing")
8
+
9
+ # Allow users to upload images or videos
10
+ uploaded_file = st.file_uploader("Upload an image or video", type=["jpg", "jpeg", "png", "bmp", "mp4", "avi", "mov", "mkv"])
11
+ try:
12
+ model = YOLO(r'C:\Users\vedan\PycharmProjects\license_plate\best.pt') # Replace with the path to your trained YOLO model
13
+
14
+ except Exception as e:
15
+ st.error(f"Error loading YOLO model: {e}")
16
+
17
+
18
+ def predict_and_save_image(path_test_car, output_image_path):
19
+ """
20
+ Predicts and saves the bounding boxes on the given test image using the trained YOLO model.
21
+
22
+ Parameters:
23
+ path_test_car (str): Path to the test image file.
24
+ output_image_path (str): Path to save the output image file.
25
+
26
+ Returns:
27
+ str: The path to the saved output image file.
28
+ """
29
+ try:
30
+ results = model.predict(path_test_car, device='cpu')
31
+ image = cv2.imread(path_test_car)
32
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
33
+ for result in results:
34
+ for box in result.boxes:
35
+ x1, y1, x2, y2 = map(int, box.xyxy[0])
36
+ confidence = box.conf[0]
37
+ cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
38
+ cv2.putText(image, f'{confidence * 100:.2f}%', (x1, y1 - 10),
39
+ cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)
40
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
41
+ cv2.imwrite(output_image_path, image)
42
+ return output_image_path
43
+ except Exception as e:
44
+ st.error(f"Error processing image: {e}")
45
+ return None
46
+
47
+
48
+ def predict_and_plot_video(video_path, output_path):
49
+ """
50
+ Predicts and saves the bounding boxes on the given test video using the trained YOLO model.
51
+
52
+ Parameters:
53
+ video_path (str): Path to the test video file.
54
+ output_path (str): Path to save the output video file.
55
+
56
+ Returns:
57
+ str: The path to the saved output video file.
58
+ """
59
+ try:
60
+ cap = cv2.VideoCapture(video_path)
61
+ if not cap.isOpened():
62
+ st.error(f"Error opening video file: {video_path}")
63
+ return None
64
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
65
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
66
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
67
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
68
+ out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
69
+ while cap.isOpened():
70
+ ret, frame = cap.read()
71
+ if not ret:
72
+ break
73
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
74
+ results = model.predict(rgb_frame, device='cpu')
75
+ for result in results:
76
+ for box in result.boxes:
77
+ x1, y1, x2, y2 = map(int, box.xyxy[0])
78
+ confidence = box.conf[0]
79
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
80
+ cv2.putText(frame, f'{confidence * 100:.2f}%', (x1, y1 - 10),
81
+ cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)
82
+ out.write(frame)
83
+ cap.release()
84
+ out.release()
85
+ return output_path
86
+ except Exception as e:
87
+ st.error(f"Error processing video: {e}")
88
+ return None
89
+
90
+
91
+ def process_media(input_path, output_path):
92
+ """
93
+ Processes the uploaded media file (image or video) and returns the path to the saved output file.
94
+
95
+ Parameters:
96
+ input_path (str): Path to the input media file.
97
+ output_path (str): Path to save the output media file.
98
+
99
+ Returns:
100
+ str: The path to the saved output media file.
101
+ """
102
+ file_extension = os.path.splitext(input_path)[1].lower()
103
+ if file_extension in ['.mp4', '.avi', '.mov', '.mkv']:
104
+ return predict_and_plot_video(input_path, output_path)
105
+ elif file_extension in ['.jpg', '.jpeg', '.png', '.bmp']:
106
+ return predict_and_save_image(input_path, output_path)
107
+ else:
108
+ st.error(f"Unsupported file type: {file_extension}")
109
+ return None
110
+
111
+
112
+ if uploaded_file is not None:
113
+ input_path = os.path.join("temp", uploaded_file.name)
114
+ output_path = os.path.join("temp", f"output_{uploaded_file.name}")
115
+ try:
116
+ with open(input_path, "wb") as f:
117
+ f.write(uploaded_file.getbuffer())
118
+ st.write("Processing...")
119
+ result_path = process_media(input_path, output_path)
120
+ if result_path:
121
+ if input_path.endswith(('.mp4', '.avi', '.mov', '.mkv')):
122
+ video_file = open(result_path, 'rb')
123
+ video_bytes = video_file.read()
124
+ st.video(video_bytes)
125
+ else:
126
+ st.image(result_path)
127
+ except Exception as e:
128
+ st.error(f"Error uploading or processing file: {e}")
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ pandas
2
+ matplotlib
3
+ streamlit
4
+ opencv-python
5
+ ultralytics
6
+ numpy
7
+ pillow
8
+
temp/demo.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e707d2ae1c912beb3e661a4c9f2b1587250e0abaa34bd524a7ceef0cdd26e93d
3
+ size 9563349
temp/images.jpeg ADDED
temp/output_demo.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:099c667151f6e6cdf3f0615b185e227bcd55076e0a7437f68c6a24129c4b1709
3
+ size 16161833
temp/output_images.jpeg ADDED