Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -229,13 +229,13 @@ def load_models():
|
|
229 |
models = {}
|
230 |
|
231 |
try:
|
232 |
-
# Use
|
233 |
models['text_to_image'] = DiffusionPipeline.from_pretrained(
|
234 |
"runwayml/stable-diffusion-v1-5",
|
235 |
-
torch_dtype=torch.float32, # Use float32
|
236 |
-
safety_checker=None # Disable safety checker for
|
237 |
)
|
238 |
-
#
|
239 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
240 |
models['text_to_image'].to(device)
|
241 |
except Exception as e:
|
@@ -291,13 +291,13 @@ def create_story_scene(story, concept, models):
|
|
291 |
style = "cartoon style, bright colors, children's book illustration"
|
292 |
prompt = f"{story}. {CONCEPTS[concept]['name']} concept. {style}"
|
293 |
|
294 |
-
# Generate image with
|
295 |
image = models['text_to_image'](
|
296 |
prompt,
|
297 |
-
num_inference_steps=
|
298 |
guidance_scale=7.5,
|
299 |
-
height=
|
300 |
-
width=
|
301 |
).images[0]
|
302 |
|
303 |
# Convert to bytes
|
@@ -473,9 +473,6 @@ def main():
|
|
473 |
st.title("π§ββοΈ StoryCoder - Learn Python Through Stories!")
|
474 |
st.subheader("Turn your story into an animation and discover coding secrets!")
|
475 |
|
476 |
-
# Load AI models
|
477 |
-
ai_models = load_models()
|
478 |
-
|
479 |
# Initialize session state
|
480 |
if 'story' not in st.session_state:
|
481 |
st.session_state.story = ""
|
@@ -548,7 +545,11 @@ def main():
|
|
548 |
# Get the main concept
|
549 |
main_concept = st.session_state.concepts[0] if st.session_state.concepts else "variable"
|
550 |
|
551 |
-
|
|
|
|
|
|
|
|
|
552 |
st.session_state.story_scene = create_story_scene(
|
553 |
story, main_concept, ai_models
|
554 |
)
|
|
|
229 |
models = {}
|
230 |
|
231 |
try:
|
232 |
+
# Use a lightweight model that works well on CPU
|
233 |
models['text_to_image'] = DiffusionPipeline.from_pretrained(
|
234 |
"runwayml/stable-diffusion-v1-5",
|
235 |
+
torch_dtype=torch.float32, # Use float32 for CPU compatibility
|
236 |
+
safety_checker=None # Disable safety checker for faster performance
|
237 |
)
|
238 |
+
# Use GPU if available, otherwise CPU
|
239 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
240 |
models['text_to_image'].to(device)
|
241 |
except Exception as e:
|
|
|
291 |
style = "cartoon style, bright colors, children's book illustration"
|
292 |
prompt = f"{story}. {CONCEPTS[concept]['name']} concept. {style}"
|
293 |
|
294 |
+
# Generate image with optimized settings
|
295 |
image = models['text_to_image'](
|
296 |
prompt,
|
297 |
+
num_inference_steps=20, # Reduced steps for faster generation
|
298 |
guidance_scale=7.5,
|
299 |
+
height=384, # Smaller image for faster generation
|
300 |
+
width=384
|
301 |
).images[0]
|
302 |
|
303 |
# Convert to bytes
|
|
|
473 |
st.title("π§ββοΈ StoryCoder - Learn Python Through Stories!")
|
474 |
st.subheader("Turn your story into an animation and discover coding secrets!")
|
475 |
|
|
|
|
|
|
|
476 |
# Initialize session state
|
477 |
if 'story' not in st.session_state:
|
478 |
st.session_state.story = ""
|
|
|
545 |
# Get the main concept
|
546 |
main_concept = st.session_state.concepts[0] if st.session_state.concepts else "variable"
|
547 |
|
548 |
+
# Load models only when needed
|
549 |
+
with st.spinner("π Loading AI models..."):
|
550 |
+
ai_models = load_models()
|
551 |
+
|
552 |
+
with st.spinner("π¨ Generating AI story scene (this may take 10-20 seconds)..."):
|
553 |
st.session_state.story_scene = create_story_scene(
|
554 |
story, main_concept, ai_models
|
555 |
)
|