nagasurendra commited on
Commit
b291ae2
·
verified ·
1 Parent(s): 57d54ad

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -0
app.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import torch
3
+ from ultralytics import YOLO
4
+ import gradio as gr
5
+
6
+ # Load the pre-trained YOLO model (assuming 'best.pt' is a YOLOv5 model)
7
+ model = YOLO("./data/best.pt")
8
+
9
+ # Function to process video frames and count wine bottles
10
+ def process_frame(frame):
11
+ # Perform inference on the frame
12
+ results = model(frame)
13
+
14
+ # Extract results
15
+ detections = results.pandas().xywh[results.pandas().xywh['class'] == 0] # Assuming '0' is the class for wine bottles
16
+
17
+ # Count the number of wine bottles detected
18
+ bottle_count = len(detections)
19
+ return bottle_count
20
+
21
+ # Classify stock based on bottle count
22
+ def classify_stock(bottle_count):
23
+ if bottle_count > 50:
24
+ return "Full"
25
+ elif 20 <= bottle_count <= 50:
26
+ return "Medium"
27
+ else:
28
+ return "Low"
29
+
30
+ # Video processing function to classify each frame and track stock level
31
+ def classify_video(video):
32
+ cap = cv2.VideoCapture(video.name)
33
+ stock_status = None
34
+
35
+ while True:
36
+ ret, frame = cap.read()
37
+ if not ret:
38
+ break
39
+
40
+ bottle_count = process_frame(frame)
41
+ stock_status = classify_stock(bottle_count)
42
+
43
+ cap.release()
44
+ return stock_status
45
+
46
+ # Gradio interface to upload a video and classify stock
47
+ def main(video_input):
48
+ return classify_video(video_input)
49
+
50
+ # Creating the Gradio interface
51
+ iface = gr.Interface(fn=main, inputs=gr.Video(), outputs="text")
52
+
53
+ if __name__ == "__main__":
54
+ iface.launch()