from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer from turtle import title from transformers import pipeline import numpy as np from PIL import Image import sys, os import gradio as gr model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M") pipe = pipeline("zero-shot-image-classification", model="openai/clip-vit-base-patch32") images="dog.jpg" def shot(image, labels_text): PIL_image = Image.fromarray(np.uint8(image)).convert('RGB') labels = labels_text.split(",") res = pipe(images=PIL_image, candidate_labels=labels, hypothesis_template= "This is a photo of a {}") return {dic["label"]: dic["score"] for dic in res} # Translate tokenizer.src_lang = "en" encodedText = tokenizer(labels_text, return_tensors="pt") generatedTokens = model.generate(**encodedText, forced_bos_token_id=tokenizer.get_lang_id("ru")) return tokenizer.batch_decode(generatedTokens, skip_special_tokens=True)[0] iface = gr.Interface(shot, ["image", "text"], "label", examples=[["dog.jpg", "dog,cat,bird"]], description="Add a picture and a list of labels separated by commas", title="Zero-shot Image Classification") iface.launch()