Spaces:
Sleeping
Sleeping
push app.py
Browse files- app.py +47 -0
- butterfly.jpg +0 -0
- cat.jpeg +0 -0
- chicken1.jpeg +0 -0
- chicken2.jpeg +0 -0
- elefant.jpg +0 -0
- spider.jpg +0 -0
app.py
CHANGED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
+
|
4 |
+
vit_classifier = pipeline("image-classification", model="MichaelMM2000/vit-base-animals10")
|
5 |
+
|
6 |
+
clip_detector = pipeline(model="openai/clip-vit-base-patch32", task="zero-shot-image-classification")
|
7 |
+
|
8 |
+
# Animals-10 class names
|
9 |
+
labels_animals10 = [
|
10 |
+
"butterfly", "cat", "chicken", "cow", "dog",
|
11 |
+
"elephant", "horse", "sheep", "spider", "squirrel"
|
12 |
+
]
|
13 |
+
|
14 |
+
def classify_animal(image):
|
15 |
+
# Run ViT classifier
|
16 |
+
vit_results = vit_classifier(image)
|
17 |
+
vit_output = {result['label']: result['score'] for result in vit_results}
|
18 |
+
|
19 |
+
# Run CLIP zero-shot classifier
|
20 |
+
clip_results = clip_detector(image, candidate_labels=[f"a photo of a {label}" for label in labels_animals10])
|
21 |
+
clip_output = {result['label']: result['score'] for result in clip_results}
|
22 |
+
|
23 |
+
return {
|
24 |
+
"ViT Classification": vit_output,
|
25 |
+
"CLIP Zero-Shot Classification": clip_output
|
26 |
+
}
|
27 |
+
|
28 |
+
# Optional: you can add example images
|
29 |
+
example_images = [
|
30 |
+
["example_images/cat.jpeg"],
|
31 |
+
["example_images/chicken1.jpeg"],
|
32 |
+
["example_images/chicken2.jpeg"],
|
33 |
+
["example_images/elefant.jpg"],
|
34 |
+
["example_images/spider.jpeg"],
|
35 |
+
["example_images/butterfly.jpg"]
|
36 |
+
]
|
37 |
+
|
38 |
+
iface = gr.Interface(
|
39 |
+
fn=classify_animal,
|
40 |
+
inputs=gr.Image(type="filepath"),
|
41 |
+
outputs=gr.JSON(),
|
42 |
+
title="Animals-10 Classification: ViT vs CLIP",
|
43 |
+
description="Upload an animal image to compare predictions from your trained ViT model and a zero-shot CLIP model.",
|
44 |
+
examples=example_images
|
45 |
+
)
|
46 |
+
|
47 |
+
iface.launch()
|
butterfly.jpg
ADDED
![]() |
cat.jpeg
ADDED
![]() |
chicken1.jpeg
ADDED
![]() |
chicken2.jpeg
ADDED
![]() |
elefant.jpg
ADDED
![]() |
spider.jpg
ADDED
![]() |