kevalfst commited on
Commit
39c1d2f
·
verified ·
1 Parent(s): 8f45be3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -2,14 +2,12 @@ import torch
2
  from diffusers import StableDiffusionPipeline
3
  import gradio as gr
4
 
5
- # Use GPU if available
6
  device = "cuda" if torch.cuda.is_available() else "cpu"
7
 
8
- # Load Stable Diffusion v1.5 from Hugging Face
9
  pipe = StableDiffusionPipeline.from_pretrained(
10
- "stable-diffusion-v1-5/stable-diffusion-v1-5",
11
  torch_dtype=torch.float16 if device == "cuda" else torch.float32,
12
- revision="fp16" if device == "cuda" else None,
13
  use_safetensors=True
14
  )
15
  pipe = pipe.to(device)
@@ -20,10 +18,10 @@ def generate(prompt, guidance, steps, width, height):
20
  return image
21
 
22
  # Gradio UI
23
- title = "🎨 Offline Text-to-Image Generator (Stable Diffusion v1.5)"
24
  description = "Generate images from text prompts using a fully self-hosted Stable Diffusion model."
25
 
26
- with gr.Blocks() as demo:
27
  gr.Markdown(f"# {title}")
28
  gr.Markdown(description)
29
 
 
2
  from diffusers import StableDiffusionPipeline
3
  import gradio as gr
4
 
 
5
  device = "cuda" if torch.cuda.is_available() else "cpu"
6
 
7
+ # Load Stable Diffusion v1.4 from Hugging Face
8
  pipe = StableDiffusionPipeline.from_pretrained(
9
+ "CompVis/stable-diffusion-v1-4",
10
  torch_dtype=torch.float16 if device == "cuda" else torch.float32,
 
11
  use_safetensors=True
12
  )
13
  pipe = pipe.to(device)
 
18
  return image
19
 
20
  # Gradio UI
21
+ title = "🎨 Offline Text-to-Image Generator (Stable Diffusion v1.4)"
22
  description = "Generate images from text prompts using a fully self-hosted Stable Diffusion model."
23
 
24
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="pink")) as demo:
25
  gr.Markdown(f"# {title}")
26
  gr.Markdown(description)
27