Spaces:
Running
Running
import gradio as gr | |
import torch | |
import io | |
from PIL import Image | |
from transformers import ( | |
AutoImageProcessor, | |
AutoTokenizer, | |
AutoModelForCausalLM, | |
) | |
import numpy as np | |
model_root = "qihoo360/fg-clip-base" | |
model = AutoModelForCausalLM.from_pretrained(model_root,trust_remote_code=True) | |
device = model.device | |
tokenizer = AutoTokenizer.from_pretrained(model_root) | |
image_processor = AutoImageProcessor.from_pretrained(model_root) | |
import math | |
import matplotlib | |
matplotlib.use('Agg') | |
import matplotlib.pyplot as plt | |
def postprocess_result(probs, labels): | |
pro_output = {labels[i]: probs[i] for i in range(len(labels))} | |
return pro_output | |
def Retrieval(image, candidate_labels): | |
""" | |
Takes an image and a comma-separated string of candidate labels, | |
and returns the classification scores. | |
""" | |
image_size=224 | |
image = image.convert("RGB") | |
image = image.resize((image_size,image_size)) | |
image_input = image_processor.preprocess(image, return_tensors='pt')['pixel_values'].to(device) | |
walk_short_pos = True | |
caption_input = torch.tensor(tokenizer(candidate_labels, max_length=77, padding="max_length", truncation=True).input_ids, dtype=torch.long, device=device) | |
with torch.no_grad(): | |
image_feature = model.get_image_features(image_input) | |
text_feature = model.get_text_features(caption_input,walk_short_pos=walk_short_pos) | |
image_feature = image_feature / image_feature.norm(p=2, dim=-1, keepdim=True) | |
text_feature = text_feature / text_feature.norm(p=2, dim=-1, keepdim=True) | |
logits_per_image = image_feature @ text_feature.T | |
logits_per_image = model.logit_scale.exp() * logits_per_image | |
probs = logits_per_image.softmax(dim=1) | |
results = probs[0].tolist() | |
return results | |
def infer(image, candidate_labels): | |
candidate_labels = [label.lstrip(" ") for label in candidate_labels.split(",") if label !=""] | |
fg_probs = Retrieval(image, candidate_labels) | |
return postprocess_result(fg_probs,candidate_labels) | |
with gr.Blocks() as demo: | |
gr.Markdown("# FG-CLIP Retrieval") | |
gr.Markdown( | |
"This app uses the FG-CLIP model (qihoo360/fg-clip-base) for retrieval on CPU :" | |
) | |
with gr.Row(): | |
with gr.Column(): | |
image_input = gr.Image(type="pil") | |
text_input = gr.Textbox(label="Input a list of labels (comma seperated)") | |
run_button = gr.Button("Run Retrieval", visible=True) | |
with gr.Column(): | |
fg_output = gr.Label(label="FG-CLIP Output", num_top_classes=11) | |
examples = [ | |
["./Landscape.jpg", "red grass, yellow grass, green grass"], | |
["./cat.jpg", "two sleeping cats, two cats playing, three cats laying down"], | |
] | |
gr.Examples( | |
examples=examples, | |
inputs=[image_input, text_input], | |
outputs=fg_output, | |
fn=infer, | |
) | |
run_button.click(fn=infer, inputs=[image_input, text_input], outputs=fg_output) | |
demo.launch() |