Spaces:
Runtime error
Runtime error
File size: 1,009 Bytes
339d133 4d78218 339d133 03a517d 339d133 03a517d 339d133 4d78218 339d133 03a517d 4d78218 2642a92 4d78218 539c230 3e6e90e 3e1b9a6 004d756 4d78218 f1fe0df 4d78218 f312c0e 339d133 3e6e90e 339d133 3e6e90e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import torch
import cv2
import numpy as np
import gradio as gr
from PIL import Image
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
model.conf = 0.25
model.iou = 0.45
model.agnostic = False
model.multi_label = False
model.max_det = 1000
def detect(img):
results = model(img, size=640)
predictions = results.pred[0]
boxes = predictions[:, :4] # x1, y1, x2, y2
scores = predictions[:, 4]
categories = predictions[:, 5]
new_image = np.squeeze(results.render())
# resize image
new_image = cv2.resize(new_image, dim, interpolation = cv2.INTER_AREA)
return new_image
css = ".output-image, .input-image, .image-preview {height: 600px !important}"
iface = gr.Interface(fn=detect,
inputs=gr.inputs.Image(type="numpy",),
outputs=gr.outputs.Image(type="numpy",),
css=css,
enable_queue=True)
iface.launch(debug=True, cache_examples=True) |