File size: 1,997 Bytes
a3555b5 2cf5554 a3555b5 e224bb7 eab0e5c a3555b5 e224bb7 a3555b5 e224bb7 a3555b5 e224bb7 a3555b5 355835e 4694dbb 4335d9a 7487879 a668133 4694dbb 2cf5554 4694dbb 4c87af3 4335d9a 4694dbb 041a7e2 a3555b5 eab0e5c a3555b5 4694dbb a3555b5 d05911d e224bb7 a3555b5 e224bb7 a3555b5 1bfc058 e224bb7 a3555b5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import numpy as np
from PIL import Image
import gradio as gr
from ultralytics import YOLO
from ultralytics.yolo.utils.ops import scale_image
import cv2
# Load the YOLO model
m_raw_model = YOLO("M-Raw.pt")
n_raw_model = YOLO("N-Raw.pt")
s_raw_model = YOLO("S-Raw.pt")
def snap(image, model, conf, iou):
# Run the selected model
results = None
if model == "M-Raw":
results = m_raw_model(image, conf=conf, iou=iou)
elif model == "N-Raw":
results = n_raw_model(image, conf=conf, iou=iou)
elif model == "S-Raw":
results = s_raw_model(image, conf=conf, iou=iou)
# Convert the results list into an output image
result = results[0]
if result.boxes.cls.cpu().numpy().size == 0:
return [image]
classes = result.boxes.cls.cpu().numpy()[0]
probs = result.boxes.conf.cpu().numpy()[0]
boxes = result.boxes.xyxy.cpu().numpy()
print("-------------------")
print(classes)
print("-------------------")
print(probs)
print("-------------------")
print(boxes)
print(image)
for i in range(len(boxes)):
x1, y1, x2, y2 = boxes[i]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(image, f"{classes} {probs:.2f}", (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# Convert the resulting image to a PIL image
resulting_image = image
# Get the labels
# labels = results.pandas().xyxy[0]["name"].values
# Sort the labels by their x-value first and then by their y-value
# print(labels)
return [resulting_image]
demo = gr.Interface(
snap,
[gr.Image(source="webcam", tool=None, streaming=True), gr.Radio(["M-Raw", "S-Raw", "N-Raw"]), gr.Slider(0, 1, value=0.6, label="Classifier Confidence Threshold"), gr.Slider(0, 1, value=0.7, label="IoU Threshold")],
["image"],
title="Baybayin Instance Detection"
)
if __name__ == "__main__":
demo.launch() |