import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import torch from PIL import Image import base64 import io # Load model and tokenizer model = AutoModelForCausalLM.from_pretrained("cloudqi/cqi_text_to_image_pt_v0") tokenizer = AutoTokenizer.from_pretrained("cloudqi/cqi_text_to_image_pt_v0") def generate_image(prompt): inputs = tokenizer(prompt, return_tensors="pt") output_ids = model.generate(**inputs, max_length=256) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) # Decode base64 to image if "data:image/png;base64," in output_str: b64_img = output_str.split("data:image/png;base64,")[1] image_data = base64.b64decode(b64_img) image = Image.open(io.BytesIO(image_data)) return image return "No image found in output." demo = gr.Interface(fn=generate_image, inputs="text", outputs="image", title="CQI Text-to-Image") demo.launch()