Mmm / app.py
Ashrafb's picture
Create app.py
31a04a4
raw
history blame
953 Bytes
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
MODEL_NAME = "cloudqi/cqi_text_to_image_pt_v0"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
def generate_images(description):
input_ids = tokenizer.encode(description, return_tensors="pt")
# Model generates a batch of one image
output = model.generate(input_ids)
output_image = output[0].numpy().transpose(1,2,0)
return output_image.astype("uint8")
inputs = gr.inputs.Textbox(prompt="Enter Text Description")
outputs = gr.outputs.Image(label="Generated Image")
iface = gr.Interface(fn=generate_images, inputs=inputs, outputs=outputs, title="Description to Image")
iface.disable_caching=True # Disable caching to ensure the model is reloaded each time the app is opened
if __name__ == "__main__":
iface.launch()