Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
|
|
1 |
# Fix for huggingface_hub compatibility
|
2 |
import huggingface_hub
|
3 |
-
|
4 |
if not hasattr(huggingface_hub, 'cached_download'):
|
5 |
from huggingface_hub import hf_hub_download
|
6 |
huggingface_hub.cached_download = hf_hub_download
|
@@ -25,9 +25,6 @@ import torch
|
|
25 |
from diffusers import DiffusionPipeline, StableDiffusionPipeline
|
26 |
import torch
|
27 |
from transformers import pipeline
|
28 |
-
from huggingface_hub import HfFolder, hf_hub_download, model_info
|
29 |
-
|
30 |
-
|
31 |
|
32 |
# Configure Streamlit page
|
33 |
st.set_page_config(
|
@@ -232,11 +229,15 @@ def load_models():
|
|
232 |
models = {}
|
233 |
|
234 |
try:
|
235 |
-
#
|
236 |
models['text_to_image'] = DiffusionPipeline.from_pretrained(
|
237 |
-
"
|
238 |
-
torch_dtype=torch.float16
|
|
|
239 |
)
|
|
|
|
|
|
|
240 |
except Exception as e:
|
241 |
st.error(f"Could not load text-to-image model: {str(e)}")
|
242 |
models['text_to_image'] = None
|
@@ -290,8 +291,14 @@ def create_story_scene(story, concept, models):
|
|
290 |
style = "cartoon style, bright colors, children's book illustration"
|
291 |
prompt = f"{story}. {CONCEPTS[concept]['name']} concept. {style}"
|
292 |
|
293 |
-
# Generate image
|
294 |
-
image = models['text_to_image'](
|
|
|
|
|
|
|
|
|
|
|
|
|
295 |
|
296 |
# Convert to bytes
|
297 |
buf = io.BytesIO()
|
|
|
1 |
+
# app.py - Final Version with AI-Powered Animations
|
2 |
# Fix for huggingface_hub compatibility
|
3 |
import huggingface_hub
|
|
|
4 |
if not hasattr(huggingface_hub, 'cached_download'):
|
5 |
from huggingface_hub import hf_hub_download
|
6 |
huggingface_hub.cached_download = hf_hub_download
|
|
|
25 |
from diffusers import DiffusionPipeline, StableDiffusionPipeline
|
26 |
import torch
|
27 |
from transformers import pipeline
|
|
|
|
|
|
|
28 |
|
29 |
# Configure Streamlit page
|
30 |
st.set_page_config(
|
|
|
229 |
models = {}
|
230 |
|
231 |
try:
|
232 |
+
# Use CPU-friendly model and float32 precision
|
233 |
models['text_to_image'] = DiffusionPipeline.from_pretrained(
|
234 |
+
"runwayml/stable-diffusion-v1-5",
|
235 |
+
torch_dtype=torch.float32, # Use float32 instead of float16
|
236 |
+
safety_checker=None # Disable safety checker for simplicity
|
237 |
)
|
238 |
+
# Move to GPU if available, otherwise keep on CPU
|
239 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
240 |
+
models['text_to_image'].to(device)
|
241 |
except Exception as e:
|
242 |
st.error(f"Could not load text-to-image model: {str(e)}")
|
243 |
models['text_to_image'] = None
|
|
|
291 |
style = "cartoon style, bright colors, children's book illustration"
|
292 |
prompt = f"{story}. {CONCEPTS[concept]['name']} concept. {style}"
|
293 |
|
294 |
+
# Generate image with fixed settings
|
295 |
+
image = models['text_to_image'](
|
296 |
+
prompt,
|
297 |
+
num_inference_steps=25, # Fewer steps for faster generation
|
298 |
+
guidance_scale=7.5,
|
299 |
+
height=512,
|
300 |
+
width=512
|
301 |
+
).images[0]
|
302 |
|
303 |
# Convert to bytes
|
304 |
buf = io.BytesIO()
|