File size: 1,753 Bytes
92d6fec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
# 📷 Object Detection Demo | CPU-only HF Space

import gradio as gr
from transformers import pipeline
from PIL import Image, ImageDraw

# Load the DETR object-detection pipeline (CPU)
detector = pipeline("object-detection", model="facebook/detr-resnet-50", device=-1)

def detect_objects(image: Image.Image):
    # Run object detection
    outputs = detector(image)

    # Draw bounding boxes
    annotated = image.convert("RGB")
    draw = ImageDraw.Draw(annotated)
    table = []
    for obj in outputs:
        # DETR returns box as [xmin, ymin, xmax, ymax]
        xmin, ymin, xmax, ymax = obj["box"]
        label = obj["label"]
        score = round(obj["score"], 3)

        # draw box and label
        draw.rectangle([xmin, ymin, xmax, ymax], outline="red", width=2)
        draw.text((xmin, ymin - 10), f"{label} ({score})", fill="red")

        table.append([label, score])

    # Return the annotated image and a table of detections
    return annotated, table

with gr.Blocks(title="📷✨ Object Detection Demo") as demo:
    gr.Markdown(
        """
        # 📷✨ Object Detection  
        Upload an image and let DETR (a Transformer-based model) identify objects in real time.
        """
    )

    with gr.Row():
        img_in  = gr.Image(type="pil", label="Upload Image")
        detect_btn = gr.Button("Detect Objects 🔍", variant="primary")
    img_out   = gr.Image(label="Annotated Image")
    table_out = gr.Dataframe(
        headers=["Label", "Score"],
        datatype=["str", "number"],
        wrap=True,
        interactive=False,
        label="Detections"
    )

    detect_btn.click(detect_objects, inputs=img_in, outputs=[img_out, table_out])

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0")