Spaces:
Running
Running
Update app.py
#1
by
Muthuraja18
- opened
app.py
CHANGED
@@ -1,38 +1,40 @@
|
|
1 |
-
# app.py
|
2 |
-
|
3 |
-
import streamlit as st
|
4 |
-
from diffusers import StableDiffusionPipeline
|
5 |
-
import torch
|
6 |
-
from PIL import Image
|
7 |
-
import io
|
8 |
-
|
9 |
-
# Load model
|
10 |
-
@st.cache_resource
|
11 |
-
def load_model():
|
12 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
13 |
-
"runwayml/stable-diffusion-v1-5", # You can change
|
14 |
-
torch_dtype=torch.
|
15 |
-
)
|
16 |
-
return pipe.to("
|
17 |
-
|
18 |
-
# Streamlit
|
19 |
-
st.title("π§ AI Image Generator (
|
20 |
-
st.markdown("Generate original, multidimensional images using Stable Diffusion!")
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
|
|
|
|
|
1 |
+
# app.py
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
from diffusers import StableDiffusionPipeline
|
5 |
+
import torch
|
6 |
+
from PIL import Image
|
7 |
+
import io
|
8 |
+
|
9 |
+
# Load model on CPU
|
10 |
+
@st.cache_resource
|
11 |
+
def load_model():
|
12 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
13 |
+
"runwayml/stable-diffusion-v1-5", # You can change to another model if needed
|
14 |
+
torch_dtype=torch.float32 # Use float32 on CPU
|
15 |
+
)
|
16 |
+
return pipe.to("cpu")
|
17 |
+
|
18 |
+
# Streamlit UI
|
19 |
+
st.title("π§ AI Image Generator (CPU-Compatible)")
|
20 |
+
st.markdown("Generate original, multidimensional images using Stable Diffusion β no GPU required!")
|
21 |
+
|
22 |
+
# Prompt and settings
|
23 |
+
prompt = st.text_area("Enter your creative prompt:",
|
24 |
+
"A multi-dimensional alien city with glowing fractals, floating geometry, cosmic lighting, 8K resolution")
|
25 |
+
|
26 |
+
guidance = st.slider("Creativity (Guidance Scale)", 1.0, 20.0, 8.5)
|
27 |
+
|
28 |
+
# Generate button
|
29 |
+
if st.button("Generate Image"):
|
30 |
+
with st.spinner("Generating image. This may take a few minutes on CPU..."):
|
31 |
+
pipe = load_model()
|
32 |
+
image = pipe(prompt, guidance_scale=guidance).images[0]
|
33 |
+
|
34 |
+
st.image(image, caption="Generated Image", use_column_width=True)
|
35 |
+
|
36 |
+
# Save for download
|
37 |
+
buf = io.BytesIO()
|
38 |
+
image.save(buf, format="PNG")
|
39 |
+
byte_im = buf.getvalue()
|
40 |
+
st.download_button("Download Image", byte_im, "generated.png", "image/png")
|