Spaces:
Sleeping
Sleeping
import cv2 | |
import torch | |
from ultralytics import YOLO | |
import gradio as gr | |
# Load the pre-trained YOLOv8 model | |
model = YOLO("./data/best.pt") # Replace with path to your trained YOLOv8 model | |
# Function to process video frames and count wine bottles | |
def process_frame(frame): | |
# Perform inference on the frame | |
results = model(frame) | |
# Extract results | |
detections = results.pandas().xywh[results.pandas().xywh['class'] == 0] # Assuming '0' is the class for wine bottles | |
# Count the number of wine bottles detected | |
bottle_count = len(detections) | |
return bottle_count | |
# Classify stock based on bottle count | |
def classify_stock(bottle_count): | |
if bottle_count > 50: | |
return "Full" | |
elif 20 <= bottle_count <= 50: | |
return "Medium" | |
else: | |
return "Low" | |
# Video processing function to classify each frame and track stock level | |
def classify_video(video): | |
cap = cv2.VideoCapture(video.name) | |
stock_status = None | |
while True: | |
ret, frame = cap.read() | |
if not ret: | |
break | |
bottle_count = process_frame(frame) | |
stock_status = classify_stock(bottle_count) | |
cap.release() | |
return stock_status | |
# Gradio interface to upload a video and classify stock | |
def main(video_input): | |
return classify_video(video_input) | |
# Creating the Gradio interface | |
iface = gr.Interface(fn=main, inputs=gr.Video(), outputs="text") | |
if __name__ == "__main__": | |
iface.launch() | |