Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -22,7 +22,7 @@ import requests
|
|
22 |
from io import BytesIO
|
23 |
import json
|
24 |
import torch
|
25 |
-
from diffusers import DiffusionPipeline
|
26 |
import torch
|
27 |
from transformers import pipeline
|
28 |
|
@@ -229,13 +229,12 @@ def load_models():
|
|
229 |
models = {}
|
230 |
|
231 |
try:
|
232 |
-
# Use a
|
233 |
models['text_to_image'] = DiffusionPipeline.from_pretrained(
|
234 |
-
"
|
235 |
-
torch_dtype=torch.float32,
|
236 |
-
safety_checker=None
|
237 |
)
|
238 |
-
# Use GPU if available, otherwise CPU
|
239 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
240 |
models['text_to_image'].to(device)
|
241 |
except Exception as e:
|
@@ -294,10 +293,10 @@ def create_story_scene(story, concept, models):
|
|
294 |
# Generate image with optimized settings
|
295 |
image = models['text_to_image'](
|
296 |
prompt,
|
297 |
-
num_inference_steps=
|
298 |
guidance_scale=7.5,
|
299 |
-
height=
|
300 |
-
width=
|
301 |
).images[0]
|
302 |
|
303 |
# Convert to bytes
|
@@ -549,7 +548,7 @@ def main():
|
|
549 |
with st.spinner("π Loading AI models..."):
|
550 |
ai_models = load_models()
|
551 |
|
552 |
-
with st.spinner("π¨ Generating AI story scene (this may take 10
|
553 |
st.session_state.story_scene = create_story_scene(
|
554 |
story, main_concept, ai_models
|
555 |
)
|
@@ -580,19 +579,19 @@ def main():
|
|
580 |
st.caption("Loop Example")
|
581 |
st.code('"A dragon breathes fire 5 times at the castle"', language="text")
|
582 |
st.image(create_animation_preview("", "loop"),
|
583 |
-
|
584 |
caption="Loop Animation Preview")
|
585 |
with col2:
|
586 |
st.caption("Conditional Example")
|
587 |
st.code('"If it rains, the cat stays inside, else it goes out"', language="text")
|
588 |
st.image(create_animation_preview("", "conditional"),
|
589 |
-
|
590 |
caption="Conditional Animation Preview")
|
591 |
with col3:
|
592 |
st.caption("Function Example")
|
593 |
st.code('"A wizard casts a spell to make flowers grow"', language="text")
|
594 |
st.image(create_animation_preview("", "function"),
|
595 |
-
|
596 |
caption="Function Animation Preview")
|
597 |
|
598 |
# Animation tab
|
@@ -677,7 +676,7 @@ def main():
|
|
677 |
# Show animation preview for the concept
|
678 |
st.image(create_animation_preview("", concept),
|
679 |
caption=f"{details['name']} Animation Example",
|
680 |
-
|
681 |
|
682 |
if st.button("See the Magic Code!", use_container_width=True):
|
683 |
st.session_state.active_tab = "code"
|
@@ -717,7 +716,7 @@ def main():
|
|
717 |
concept = st.session_state.concepts[0] if st.session_state.concepts else "loop"
|
718 |
st.image(create_animation_preview("", concept),
|
719 |
caption="This is similar to what your animation would look like",
|
720 |
-
|
721 |
else:
|
722 |
st.warning("No code generated yet!")
|
723 |
|
|
|
22 |
from io import BytesIO
|
23 |
import json
|
24 |
import torch
|
25 |
+
from diffusers import DiffusionPipeline
|
26 |
import torch
|
27 |
from transformers import pipeline
|
28 |
|
|
|
229 |
models = {}
|
230 |
|
231 |
try:
|
232 |
+
# Use a fast, optimized model
|
233 |
models['text_to_image'] = DiffusionPipeline.from_pretrained(
|
234 |
+
"OFA-Sys/small-stable-diffusion-v0", # Optimized for speed
|
235 |
+
torch_dtype=torch.float32,
|
236 |
+
safety_checker=None
|
237 |
)
|
|
|
238 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
239 |
models['text_to_image'].to(device)
|
240 |
except Exception as e:
|
|
|
293 |
# Generate image with optimized settings
|
294 |
image = models['text_to_image'](
|
295 |
prompt,
|
296 |
+
num_inference_steps=15, # Faster generation
|
297 |
guidance_scale=7.5,
|
298 |
+
height=256, # Smaller image for faster generation
|
299 |
+
width=256
|
300 |
).images[0]
|
301 |
|
302 |
# Convert to bytes
|
|
|
548 |
with st.spinner("π Loading AI models..."):
|
549 |
ai_models = load_models()
|
550 |
|
551 |
+
with st.spinner("π¨ Generating AI story scene (this may take 5-10 seconds)..."):
|
552 |
st.session_state.story_scene = create_story_scene(
|
553 |
story, main_concept, ai_models
|
554 |
)
|
|
|
579 |
st.caption("Loop Example")
|
580 |
st.code('"A dragon breathes fire 5 times at the castle"', language="text")
|
581 |
st.image(create_animation_preview("", "loop"),
|
582 |
+
use_container_width=True,
|
583 |
caption="Loop Animation Preview")
|
584 |
with col2:
|
585 |
st.caption("Conditional Example")
|
586 |
st.code('"If it rains, the cat stays inside, else it goes out"', language="text")
|
587 |
st.image(create_animation_preview("", "conditional"),
|
588 |
+
use_container_width=True,
|
589 |
caption="Conditional Animation Preview")
|
590 |
with col3:
|
591 |
st.caption("Function Example")
|
592 |
st.code('"A wizard casts a spell to make flowers grow"', language="text")
|
593 |
st.image(create_animation_preview("", "function"),
|
594 |
+
use_container_width=True,
|
595 |
caption="Function Animation Preview")
|
596 |
|
597 |
# Animation tab
|
|
|
676 |
# Show animation preview for the concept
|
677 |
st.image(create_animation_preview("", concept),
|
678 |
caption=f"{details['name']} Animation Example",
|
679 |
+
use_container_width=True)
|
680 |
|
681 |
if st.button("See the Magic Code!", use_container_width=True):
|
682 |
st.session_state.active_tab = "code"
|
|
|
716 |
concept = st.session_state.concepts[0] if st.session_state.concepts else "loop"
|
717 |
st.image(create_animation_preview("", concept),
|
718 |
caption="This is similar to what your animation would look like",
|
719 |
+
use_container_width=True)
|
720 |
else:
|
721 |
st.warning("No code generated yet!")
|
722 |
|