Spaces:
Runtime error
Runtime error
import torch | |
import cv2 | |
import numpy as np | |
import gradio as gr | |
from PIL import Image | |
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) | |
model.conf = 0.25 | |
model.iou = 0.45 | |
model.agnostic = False | |
model.multi_label = False | |
model.max_det = 1000 | |
def detect(img): | |
results = model(img, size=640) | |
predictions = results.pred[0] | |
boxes = predictions[:, :4] # x1, y1, x2, y2 | |
scores = predictions[:, 4] | |
categories = predictions[:, 5] | |
new_image = np.squeeze(results.render()) | |
scale_percent = 60 # percent of original size | |
width = int(img.shape[1] * scale_percent / 100) | |
height = int(img.shape[0] * scale_percent / 100) | |
dim = (width, height) | |
# resize image | |
new_image = cv2.resize(new_image, dim, interpolation = cv2.INTER_AREA) | |
return new_image | |
img = gr.inputs.Image(shape=(192, 192)) | |
intf = gr.Interface(fn=detect, inputs=img, outputs='image', output_width=192, output_height=192) | |
intf.launch(inline=False) | |