File size: 4,547 Bytes
c05d727
 
 
 
55c29e2
c05d727
 
55c29e2
 
 
 
 
 
 
c05d727
970733c
 
35cc57d
c05d727
 
35cc57d
c05d727
35cc57d
 
 
 
 
 
 
 
 
 
c05d727
55c29e2
c05d727
35cc57d
c05d727
35cc57d
c05d727
55c29e2
c05d727
55c29e2
c05d727
4b5873a
55c29e2
 
 
4b5873a
55c29e2
c05d727
 
 
 
4b5873a
 
55c29e2
4b5873a
 
 
 
 
55c29e2
 
 
 
 
 
4b5873a
55c29e2
 
c05d727
55c29e2
 
 
 
4b5873a
55c29e2
 
 
 
 
 
 
 
 
 
 
 
 
c05d727
55c29e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c05d727
 
 
 
55c29e2
c05d727
 
 
 
55c29e2
 
 
c05d727
 
 
 
55c29e2
c05d727
 
 
 
55c29e2
c05d727
 
55c29e2
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import gradio as gr
import cv2
import requests
import os
import random
from ultralytics import YOLO

# Define class names based on YOLO labels
class_names = {0: 'AluCan', 1: 'Glass', 2: 'PET', 3: 'HDPEM'}

# Generate random colors for each class
class_colors = {cls: (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) for cls in class_names}

# File URLs for sample images and video
file_urls = [
    'https://huggingface.co/spaces/iamsuman/waste-detection/resolve/main/samples/mix2.jpg?download=true',
    'https://huggingface.co/spaces/iamsuman/waste-detection/resolve/main/samples/mix11.jpg?download=true',
    'https://huggingface.co/spaces/iamsuman/waste-detection/resolve/main/samples/sample_waste.mp4?download=true',
]

# Function to download files (always overwrites existing ones)
def download_file(url, save_name):
    print(f"Downloading from: {url}")  # Log the URL
    try:
        response = requests.get(url, stream=True)
        response.raise_for_status()  # Check for HTTP errors
        with open(save_name, 'wb') as file:
            for chunk in response.iter_content(1024):
                file.write(chunk)
        print(f"Downloaded and overwritten: {save_name}")
    except requests.exceptions.RequestException as e:
        print(f"Error downloading {url}: {e}")

# Download images and video
for i, url in enumerate(file_urls):
    print(i, url)
    if 'mp4' in file_urls[i]:
        download_file(file_urls[i], f"video.mp4")
    else:
        download_file(file_urls[i], f"image_{i}.jpg")

# Load YOLO model
model = YOLO('best.pt')

# Sample paths
path = [['image_0.jpg'], ['image_1.jpg']]
video_path = [['video.mp4']]

# Function to process and display predictions on images
def show_preds_image(image_path):
    image = cv2.imread(image_path)
    outputs = model.predict(source=image_path)
    results = outputs[0].cpu().numpy()

    boxes = results.boxes
    names = model.model.names

    for box, conf, cls in zip(boxes.xyxy, boxes.conf, boxes.cls):
        x1, y1, x2, y2 = map(int, box)

        class_name = names[int(cls)]
        color = class_colors.get(int(cls), (255, 255, 255))  # Default to white if class is unknown

        # Draw bounding box
        cv2.rectangle(image, (x1, y1), (x2, y2), color=color, thickness=2, lineType=cv2.LINE_AA)

        # Display class label
        label = f"{class_name.capitalize()}: {conf:.2f}"
        cv2.putText(image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, 2, cv2.LINE_AA)

    return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# Function to process and display predictions on video
def show_preds_video(video_path):
    cap = cv2.VideoCapture(video_path)
    
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        
        frame_copy = frame.copy()
        outputs = model.predict(source=frame)
        results = outputs[0].cpu().numpy()
        
        boxes = results.boxes
        confidences = boxes.conf
        classes = boxes.cls
        names = model.model.names

        for box, conf, cls in zip(boxes.xyxy, confidences, classes):
            x1, y1, x2, y2 = map(int, box)

            class_name = names[int(cls)]
            color = class_colors.get(int(cls), (255, 255, 255))  # Default to white if class is unknown

            # Draw bounding box
            cv2.rectangle(frame_copy, (x1, y1), (x2, y2), color=color, thickness=2, lineType=cv2.LINE_AA)

            # Display class label
            label = f"{class_name.capitalize()}: {conf:.2f}"
            cv2.putText(frame_copy, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1, cv2.LINE_AA)

        yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)

    cap.release()

# Gradio Image Interface
inputs_image = [gr.Image(type="filepath", label="Input Image")]
outputs_image = [gr.Image(type="numpy", label="Output Image")]
interface_image = gr.Interface(
    fn=show_preds_image,
    inputs=inputs_image,
    outputs=outputs_image,
    title="Waste Detection",
    examples=path,
    cache_examples=False,
)

# Gradio Video Interface
inputs_video = [gr.Video(label="Input Video")]
outputs_video = [gr.Image(type="numpy", label="Output Image")]
interface_video = gr.Interface(
    fn=show_preds_video,
    inputs=inputs_video,
    outputs=outputs_video,
    title="Waste Detection",
    examples=video_path,
    cache_examples=False,
)

# Launch Gradio App
gr.TabbedInterface(
    [interface_image, interface_video],
    tab_names=['Image Inference', 'Video Inference']
).queue().launch()