ghostai1's picture
Create app.py
92d6fec verified
raw
history blame
1.75 kB
# 📷 Object Detection Demo | CPU-only HF Space
import gradio as gr
from transformers import pipeline
from PIL import Image, ImageDraw
# Load the DETR object-detection pipeline (CPU)
detector = pipeline("object-detection", model="facebook/detr-resnet-50", device=-1)
def detect_objects(image: Image.Image):
# Run object detection
outputs = detector(image)
# Draw bounding boxes
annotated = image.convert("RGB")
draw = ImageDraw.Draw(annotated)
table = []
for obj in outputs:
# DETR returns box as [xmin, ymin, xmax, ymax]
xmin, ymin, xmax, ymax = obj["box"]
label = obj["label"]
score = round(obj["score"], 3)
# draw box and label
draw.rectangle([xmin, ymin, xmax, ymax], outline="red", width=2)
draw.text((xmin, ymin - 10), f"{label} ({score})", fill="red")
table.append([label, score])
# Return the annotated image and a table of detections
return annotated, table
with gr.Blocks(title="📷✨ Object Detection Demo") as demo:
gr.Markdown(
"""
# 📷✨ Object Detection
Upload an image and let DETR (a Transformer-based model) identify objects in real time.
"""
)
with gr.Row():
img_in = gr.Image(type="pil", label="Upload Image")
detect_btn = gr.Button("Detect Objects 🔍", variant="primary")
img_out = gr.Image(label="Annotated Image")
table_out = gr.Dataframe(
headers=["Label", "Score"],
datatype=["str", "number"],
wrap=True,
interactive=False,
label="Detections"
)
detect_btn.click(detect_objects, inputs=img_in, outputs=[img_out, table_out])
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0")