File size: 860 Bytes
263ad4a
f0c5c7f
902135a
f5d3a10
f0c5c7f
b054db6
5adacc8
f0c5c7f
 
 
 
3ba9a35
f0c5c7f
 
b054db6
147dcd4
f5d3a10
f56ed91
f0c5c7f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
import gradio as gr
import torch
from PIL import Image

# Model
model = torch.hub.load('ultralytics/yolov5', 'custom', 'customModel/model.pt')
def yolo(im, size=640):
    g = (size / max(im.size))  # gain
    im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS)  # resize
    results = model(im)  # inference
    results.render()  # updates results.imgs with boxes and labels
    return Image.fromarray(results.ims[0])
inputs = gr.inputs.Image(type='pil', label="Original Image")
outputs = gr.outputs.Image(type="pil", label="Output Image")
title = "Custom YOLOv5"
description = "Custom YOLOv5 Gradio demo for object detection. Upload an image or click an example image to use."
examples = [['c.jpg'], ['t.jpg']]
gr.Interface(yolo, inputs, outputs, title=title, description=description, examples=examples, theme="huggingface").launch(
    debug=True)