File size: 784 Bytes
0974218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import open_clip
from PIL import Image

import gradio as gr

model, _, transform = open_clip.create_model_and_transform(
  model_name="coca_ViT-L-14",
  pretrained="laion2B-s13B-b90k"
)

# load an image
def generate_caption(image):
    im = image.convert("RGB")
    # transform the image and add a batch size dimension
    im = transform(im).unsqueeze(0)

    generated = model.generate(im)
    generated = generated.detach()

    return(open_clip.decode(generated[0]))

with gr.Blocks() as demo:
    gr.Markdown("## Captioning with OpenCLIP CoCa")
    with gr.Row():
        inp = gr.Image(label="Image to Caption", type="pil")
        out = gr.Textbox(label="Caption")
    btn = gr.Button("Generate caption")
    btn.click(fn=generate_caption,inputs=inp, outputs=out)

demo.launch()