File size: 1,499 Bytes
6bb3cd0 549be87 dd25b82 5b3b1a9 6bb3cd0 5b3b1a9 dd25b82 cb135c7 dd25b82 549be87 dd25b82 549be87 dd25b82 549be87 dd25b82 549be87 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import gradio as gr
from huggingface_hub import hf_hub_download
import onnxruntime
from huggingface_hub import ModelCard
card = ModelCard.load('mkhug98/Echo-Yolo')
# Download the ONNX model from Hugging Face
model_path = hf_hub_download(repo_id="mkhug98/Echo-Yolo", filename="best.onnx")
# Load the ONNX model
session = onnxruntime.InferenceSession(model_path)
# Function to perform object detection
def detect_objects(image):
# Preprocess the image
image = image.resize((640, 640)) # Resize the image to the expected input size
input_data = image.transpose(2, 0, 1).numpy() # Rearrange the dimensions for ONNX
# Perform inference with the ONNX model
outputs = session.run(None, {"images": input_data.astype("float32")})
bboxes, scores, class_ids = outputs
# Create a list of dictionaries for each detected object
detections = []
for bbox, score, class_id in zip(bboxes[0], scores[0], class_ids[0]):
x1, y1, x2, y2 = bbox
label = session.get_modelmeta().custom_metadata_map["names"][int(class_id)]
detections.append({
'label': label,
'confidence': float(score),
'x1': float(x1),
'y1': float(y1),
'x2': float(x2),
'y2': float(y2)
})
return detections
# Create the Gradio app
app = gr.Interface(detect_objects, gr.Image(type="pil"), "label", examples=[
["example_image.jpg"] # Replace with your own example image
])
# Run the app
app.launch() |