File size: 952 Bytes
a6520c9
 
 
 
 
 
3908e27
a6520c9
 
 
3908e27
a6520c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
from PIL import Image
import base64
import io

# Load model and tokenizer
model = AutoModelForCausalLM.from_pretrained("cloudqi/cqi_text_to_image_pt_v0")
tokenizer = AutoTokenizer.from_pretrained("cloudqi/cqi_text_to_image_pt_v0")

def generate_image(prompt):
    inputs = tokenizer(prompt, return_tensors="pt")
    output_ids = model.generate(**inputs, max_length=256)
    output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True)

    # Decode base64 to image
    if "data:image/png;base64," in output_str:
        b64_img = output_str.split("data:image/png;base64,")[1]
        image_data = base64.b64decode(b64_img)
        image = Image.open(io.BytesIO(image_data))
        return image
    return "No image found in output."

demo = gr.Interface(fn=generate_image, inputs="text", outputs="image", title="CQI Text-to-Image")

demo.launch()