Spaces:
Runtime error
Runtime error
File size: 998 Bytes
339d133 4d78218 339d133 03a517d 339d133 03a517d 339d133 4d78218 339d133 03a517d 4d78218 2642a92 4d78218 539c230 3e1b9a6 004d756 4d78218 f1fe0df 4d78218 f312c0e 339d133 4d78218 339d133 63e0b63 d74af8d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import torch
import cv2
import numpy as np
import gradio as gr
from PIL import Image
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
model.conf = 0.25
model.iou = 0.45
model.agnostic = False
model.multi_label = False
model.max_det = 1000
def detect(img):
results = model(img, size=640)
predictions = results.pred[0]
boxes = predictions[:, :4] # x1, y1, x2, y2
scores = predictions[:, 4]
categories = predictions[:, 5]
new_image = np.squeeze(results.render())
scale_percent = 60 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
new_image = cv2.resize(new_image, dim, interpolation = cv2.INTER_AREA)
return new_image
img = gr.inputs.Image(shape=(192, 192))
intf = gr.Interface(fn=detect, inputs=img, outputs='image', output_width=192, output_height=192)
intf.launch(inline=False)
|