import streamlit as st from diffusers import AutoPipelineForText2Image import torch from PIL import Image import io import os # Force CPU usage os.environ["CUDA_VISIBLE_DEVICES"] = "" @st.cache_resource def load_model(): pipe = AutoPipelineForText2Image.from_pretrained( "stabilityai/sd-turbo", torch_dtype=torch.float32 # CPU-compatible ) pipe.to("cpu") return pipe st.title("⚡ Fast AI Image Generator (under 1 minute)") prompt = st.text_input("Enter your prompt:", "A glowing alien city with floating islands and neon rivers, concept art, 8K") guidance = st.slider("Guidance scale (higher = more faithful to prompt)", 1.0, 10.0, 3.0) if st.button("Generate Image"): with st.spinner("Generating image (approx. 20–40 seconds on CPU)..."): pipe = load_model() result = pipe(prompt, guidance_scale=guidance, num_inference_steps=20) image = result.images[0] st.image(image, caption="Generated Image", use_column_width=True) buf = io.BytesIO() image.save(buf, format="PNG") st.download_button("Download Image", buf.getvalue(), "generated.png", "image/png")