Spaces:
Runtime error
Runtime error
File size: 1,435 Bytes
3bc38a4 a609e95 3bc38a4 a609e95 3bc38a4 a08c268 a609e95 3bc38a4 a609e95 94e11e1 5d936e8 cfa3cbe a609e95 94e11e1 5d936e8 3bc38a4 a609e95 888841b a609e95 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
from turtle import title
from transformers import pipeline
import numpy as np
from PIL import Image
import sys, os
import gradio as gr
model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
pipe = pipeline("zero-shot-image-classification", model="openai/clip-vit-base-patch32")
images="dog.jpg"
def shot(image, labels_text):
PIL_image = Image.fromarray(np.uint8(image)).convert('RGB')
labels = labels_text.split(",")
res = pipe(images=PIL_image,
candidate_labels=labels,
hypothesis_template= "This is a photo of a {}")
return {dic["label"]: dic["score"] for dic in res}
# Translate
tokenizer.src_lang = "en"
encodedText = tokenizer(labels_text, return_tensors="pt")
generatedTokens = model.generate(**encodedText, forced_bos_token_id=tokenizer.get_lang_id("ru"))
return tokenizer.batch_decode(generatedTokens, skip_special_tokens=True)[0]
iface = gr.Interface(shot,
["image", "text"],
"label",
examples=[["dog.jpg", "dog,cat,bird"]],
description="Add a picture and a list of labels separated by commas",
title="Zero-shot Image Classification")
iface.launch() |