import gradio as gr import torch from diffusers import StableDiffusionPipeline from PIL import Image # Use a smaller SD model variant that fits within free tier MODEL_ID = "CompVis/ldm-super-resolution-4x-openimages" # Only ~1.4GB @gr.cache() def load_model(): pipe = StableDiffusionPipeline.from_pretrained( MODEL_ID, torch_dtype=torch.float16, safety_checker=None, use_safetensors=True ) pipe = pipe.to("cpu") pipe.enable_attention_slicing() # Reduces memory usage return pipe def generate_character(prompt, seed=42): try: pipe = load_model() generator = torch.Generator(device="cpu").manual_seed(seed) image = pipe( prompt=f"pixel art {prompt}, clean lines, vibrant colors", num_inference_steps=20, guidance_scale=7.0, width=256, height=256, generator=generator ).images[0] return image except Exception as e: return f"Error: {str(e)}\nTry a simpler prompt." with gr.Blocks(theme=gr.themes.Default()) as demo: gr.Markdown("# 🎮 Lightweight Character Generator") with gr.Row(): prompt = gr.Textbox( label="Describe your character", placeholder="e.g. 'robot pirate with laser eye'", max_lines=2 ) generate_btn = gr.Button("Generate", variant="primary") output = gr.Image(label="Your Character", type="pil") generate_btn.click( generate_character, inputs=prompt, outputs=output ) demo.launch(debug=False)