Spaces:
Runtime error
Runtime error
import open_clip | |
from PIL import Image | |
import gradio as gr | |
model, _, transform = open_clip.create_model_and_transform( | |
model_name="coca_ViT-L-14", | |
pretrained="laion2B-s13B-b90k" | |
) | |
# load an image | |
def generate_caption(image): | |
im = image.convert("RGB") | |
# transform the image and add a batch size dimension | |
im = transform(im).unsqueeze(0) | |
generated = model.generate(im) | |
generated = generated.detach() | |
return(open_clip.decode(generated[0])) | |
with gr.Blocks() as demo: | |
gr.Markdown("## Captioning with OpenCLIP CoCa") | |
with gr.Row(): | |
inp = gr.Image(label="Image to Caption", type="pil") | |
out = gr.Textbox(label="Caption") | |
btn = gr.Button("Generate caption") | |
btn.click(fn=generate_caption,inputs=inp, outputs=out) | |
demo.launch() |