Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from turtle import title
|
2 |
+
import gradio as gr
|
3 |
+
from transformers import pipeline
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
|
8 |
+
pipes = {
|
9 |
+
"ViT/B-16": pipeline("zero-shot-image-classification", model="openai/clip-vit-base-patch16"),
|
10 |
+
"ViT/L-14": pipeline("zero-shot-image-classification", model="openai/clip-vit-base-patch16"),
|
11 |
+
}
|
12 |
+
inputs = [
|
13 |
+
gr.inputs.Image(type='pil',
|
14 |
+
label="Image"),
|
15 |
+
gr.inputs.Textbox(lines=1,
|
16 |
+
label="Candidate Labels"),
|
17 |
+
gr.inputs.Radio(choices=[
|
18 |
+
"ViT/B-16",
|
19 |
+
"ViT/L-14",
|
20 |
+
"ViT/L-14@336px",
|
21 |
+
"ViT/H-14",
|
22 |
+
], type="value", default="ViT/B-16", label="Model"),
|
23 |
+
gr.inputs.Textbox(lines=1,
|
24 |
+
label="Prompt Template Prompt",
|
25 |
+
default="a photo of a {}"),
|
26 |
+
]
|
27 |
+
images="festival.jpg"
|
28 |
+
|
29 |
+
def shot(image, labels_text, model_name, hypothesis_template):
|
30 |
+
labels = [label.strip(" ") for label in labels_text.strip(" ").split(",")]
|
31 |
+
res = pipes[model_name](images=image,
|
32 |
+
candidate_labels=labels,
|
33 |
+
hypothesis_template=hypothesis_template)
|
34 |
+
return {dic["label"]: dic["score"] for dic in res}
|
35 |
+
|
36 |
+
iface = gr.Interface(shot,
|
37 |
+
inputs,
|
38 |
+
"label",
|
39 |
+
examples=[["festival.jpg", "lantern, firecracker, couplet", "ViT/B-16", "a photo of a {}"],
|
40 |
+
# ["cat-dog-music.png", "音乐表演, 体育运动", "ViT/B-16", "a photo of a {}"],
|
41 |
+
# ["football-match.jpg", "梅西, C罗, 马奎尔", "ViT/B-16", "a photo of a {}"]],
|
42 |
+
description="""<p>Chinese CLIP is a contrastive-learning-based vision-language foundation model pretrained on large-scale Chinese data. For more information, please refer to the paper and official github. Also, Chinese CLIP has already been merged into Huggingface Transformers! <br><br>
|
43 |
+
Paper: <a href='https://arxiv.org/abs/2211.01335'>https://arxiv.org/abs/2211.01335</a> <br>
|
44 |
+
Github: <a href='https://github.com/OFA-Sys/Chinese-CLIP'>https://github.com/OFA-Sys/Chinese-CLIP</a> (Welcome to star! 🔥🔥) <br><br>
|
45 |
+
To play with this demo, add a picture and a list of labels in Chinese separated by commas. 上传图片,并输入多个分类标签,用英文逗号分隔。可点击页面最下方示例参考。<br>
|
46 |
+
You can duplicate this space and run it privately: <a href='https://huggingface.co/spaces/OFA-Sys/chinese-clip-zero-shot-image-classification?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>""",
|
47 |
+
title="Zero-shot Image Classification")
|
48 |
+
|
49 |
+
iface.launch()
|