File size: 1,678 Bytes
16588c5
 
 
 
 
 
39c1d2f
16588c5
39c1d2f
16588c5
 
 
 
 
 
 
 
 
 
 
39c1d2f
16588c5
 
39c1d2f
16588c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import torch
from diffusers import StableDiffusionPipeline
import gradio as gr

device = "cuda" if torch.cuda.is_available() else "cpu"

# Load Stable Diffusion v1.4 from Hugging Face
pipe = StableDiffusionPipeline.from_pretrained(
    "CompVis/stable-diffusion-v1-4",
    torch_dtype=torch.float16 if device == "cuda" else torch.float32,
    use_safetensors=True
)
pipe = pipe.to(device)

# Inference function
def generate(prompt, guidance, steps, width, height):
    image = pipe(prompt=prompt, guidance_scale=guidance, num_inference_steps=steps, height=height, width=width).images[0]
    return image

# Gradio UI
title = "🎨 Offline Text-to-Image Generator (Stable Diffusion v1.4)"
description = "Generate images from text prompts using a fully self-hosted Stable Diffusion model."

with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="pink")) as demo:
    gr.Markdown(f"# {title}")
    gr.Markdown(description)

    with gr.Row():
        with gr.Column():
            prompt = gr.Textbox(label="Enter your prompt", placeholder="A steampunk dragon flying over a futuristic city")
            guidance = gr.Slider(1, 20, value=7.5, step=0.5, label="Guidance Scale")
            steps = gr.Slider(10, 100, value=30, step=5, label="Inference Steps")
            width = gr.Slider(256, 768, value=512, step=64, label="Image Width")
            height = gr.Slider(256, 768, value=512, step=64, label="Image Height")
            submit = gr.Button("Generate Image")

        with gr.Column():
            output = gr.Image(label="Generated Image")

    submit.click(fn=generate, inputs=[prompt, guidance, steps, width, height], outputs=output)

demo.launch()