Upload 4 files
Browse files- README.md +19 -13
- app.py +57 -0
- huggingface.yml +9 -0
- requirements.txt +5 -0
README.md
CHANGED
@@ -1,13 +1,19 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOS Object Detection with Gradio
|
2 |
+
|
3 |
+
This Gradio demo uses the pretrained YOLOS transformer (`hustvl/yolos-base`) from Hugging Face Transformers to detect objects in uploaded images.
|
4 |
+
|
5 |
+
## Features
|
6 |
+
- Upload any image
|
7 |
+
- Detect objects with YOLOS
|
8 |
+
- See bounding boxes and object labels
|
9 |
+
- Adjustable confidence threshold
|
10 |
+
|
11 |
+
## Run Locally
|
12 |
+
```bash
|
13 |
+
pip install -r requirements.txt
|
14 |
+
python app.py
|
15 |
+
```
|
16 |
+
|
17 |
+
## Powered By
|
18 |
+
- [Hugging Face Transformers](https://huggingface.co/transformers/)
|
19 |
+
- [Gradio](https://gradio.app/)
|
app.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# STEP 1: Install dependencies
|
3 |
+
# Note: Use requirements.txt when deploying
|
4 |
+
import torch
|
5 |
+
from transformers import AutoImageProcessor, AutoModelForObjectDetection
|
6 |
+
from PIL import Image, ImageDraw, ImageFont
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
# STEP 2: Load YOLOS model & processor
|
10 |
+
model_name = "hustvl/yolos-base"
|
11 |
+
processor = AutoImageProcessor.from_pretrained(model_name)
|
12 |
+
model = AutoModelForObjectDetection.from_pretrained(model_name)
|
13 |
+
model.eval()
|
14 |
+
|
15 |
+
if torch.cuda.is_available():
|
16 |
+
model.to(torch.float16).to("cuda")
|
17 |
+
|
18 |
+
# STEP 3: Detection function with object name return
|
19 |
+
def detect_yolos(image, threshold=0.5):
|
20 |
+
image = image.convert("RGB")
|
21 |
+
inputs = processor(images=image, return_tensors="pt").to(model.device)
|
22 |
+
with torch.no_grad():
|
23 |
+
outputs = model(**inputs)
|
24 |
+
|
25 |
+
target_sizes = torch.tensor([image.size[::-1]], device=model.device)
|
26 |
+
results = processor.post_process_object_detection(outputs, threshold=threshold, target_sizes=target_sizes)[0]
|
27 |
+
|
28 |
+
draw = ImageDraw.Draw(image)
|
29 |
+
font = ImageFont.load_default()
|
30 |
+
detected_labels = []
|
31 |
+
|
32 |
+
for score, label_idx, box in zip(results["scores"], results["labels"], results["boxes"]):
|
33 |
+
label = model.config.id2label[label_idx.item()]
|
34 |
+
detected_labels.append(label)
|
35 |
+
box = [round(i, 2) for i in box.tolist()]
|
36 |
+
draw.rectangle(box, outline="green", width=2)
|
37 |
+
draw.text((box[0], box[1] - 10), f"{label}: {score:.2f}", fill="green", font=font)
|
38 |
+
|
39 |
+
label_summary = ", ".join(set(detected_labels)) if detected_labels else "No objects detected."
|
40 |
+
return image, label_summary
|
41 |
+
|
42 |
+
# STEP 4: Gradio UI
|
43 |
+
demo = gr.Interface(
|
44 |
+
fn=detect_yolos,
|
45 |
+
inputs=[
|
46 |
+
gr.Image(type="pil", label="Upload Image"),
|
47 |
+
gr.Slider(0, 1, value=0.5, label="Confidence Threshold")
|
48 |
+
],
|
49 |
+
outputs=[
|
50 |
+
gr.Image(type="pil", label="Image with Detections"),
|
51 |
+
gr.Textbox(label="Detected Object Names")
|
52 |
+
],
|
53 |
+
title="📦 YOLOS Object Detection + Label List",
|
54 |
+
description="Detects objects using YOLOS and lists all object names in a textbox."
|
55 |
+
)
|
56 |
+
|
57 |
+
demo.launch()
|
huggingface.yml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
sdk: gradio
|
2 |
+
sdk_version: 4.27.0
|
3 |
+
python_version: 3.10
|
4 |
+
app_file: app.py
|
5 |
+
title: YOLOS Object Detection
|
6 |
+
emoji: 📦
|
7 |
+
color_from: green
|
8 |
+
color_to: blue
|
9 |
+
license: mit
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
torch
|
3 |
+
torchvision
|
4 |
+
transformers
|
5 |
+
pillow
|