Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -148,6 +148,60 @@ def update_image_input(mode):
|
|
148 |
else:
|
149 |
return gr.update(visible=False)
|
150 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
def generate_video(mode, prompt, image, aspect_ratio, seed, api_key_input, progress=gr.Progress()):
|
152 |
"""Main video generation function"""
|
153 |
|
@@ -171,7 +225,7 @@ def generate_video(mode, prompt, image, aspect_ratio, seed, api_key_input, progr
|
|
171 |
# Input parameters setup
|
172 |
input_params = {
|
173 |
"prompt": prompt,
|
174 |
-
"duration": 5,
|
175 |
"resolution": "480p",
|
176 |
"aspect_ratio": aspect_ratio,
|
177 |
"seed": seed
|
@@ -204,15 +258,13 @@ def generate_video(mode, prompt, image, aspect_ratio, seed, api_key_input, progr
|
|
204 |
|
205 |
progress(0.3, desc="Calling Replicate API...")
|
206 |
|
207 |
-
# Run Replicate with retry logic
|
208 |
-
max_attempts = 3
|
209 |
output = None
|
210 |
|
211 |
for attempt in range(max_attempts):
|
212 |
try:
|
213 |
-
# Run
|
214 |
-
start_time = time.time()
|
215 |
-
|
216 |
progress(0.3 + attempt * 0.1, desc=f"Generating video... (Attempt {attempt + 1}/{max_attempts})")
|
217 |
|
218 |
# Run the model
|
@@ -227,38 +279,78 @@ def generate_video(mode, prompt, image, aspect_ratio, seed, api_key_input, progr
|
|
227 |
|
228 |
except Exception as e:
|
229 |
error_str = str(e)
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
time.sleep(wait_time)
|
236 |
continue
|
237 |
else:
|
238 |
-
return None, "β Model is
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
elif "cold boot" in error_str.lower() or "starting" in error_str.lower():
|
240 |
if attempt < max_attempts - 1:
|
241 |
-
progress(0.3, desc=f"Model is
|
242 |
-
time.sleep(
|
243 |
continue
|
|
|
244 |
elif "timeout" in error_str.lower():
|
245 |
if attempt < max_attempts - 1:
|
246 |
-
progress(0.3, desc=f"
|
247 |
-
time.sleep(
|
248 |
continue
|
249 |
-
|
250 |
-
return None, f"β Replicate API error: {error_str}"
|
251 |
else:
|
|
|
252 |
if attempt < max_attempts - 1:
|
253 |
-
|
254 |
-
|
|
|
255 |
continue
|
256 |
else:
|
257 |
-
return None, f"β
|
258 |
|
259 |
# Check if we got output
|
260 |
if not output:
|
261 |
-
return None, "β Failed to generate video
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
262 |
|
263 |
progress(0.7, desc="Downloading video...")
|
264 |
|
@@ -333,10 +425,27 @@ with gr.Blocks(title="Bytedance Seedance Video Free", theme=gr.themes.Soft()) as
|
|
333 |
Generate videos from text or images using **Replicate API**.
|
334 |
|
335 |
[](https://ginigen.com/)
|
336 |
-
|
337 |
-
β οΈ **Note**: First generation may take longer (2-3 minutes) as the model warms up.
|
338 |
""")
|
339 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
340 |
with gr.Row():
|
341 |
with gr.Column(scale=1):
|
342 |
# API Settings
|
@@ -409,11 +518,16 @@ with gr.Blocks(title="Bytedance Seedance Video Free", theme=gr.themes.Soft()) as
|
|
409 |
value=""
|
410 |
)
|
411 |
|
412 |
-
# Generate
|
413 |
-
|
|
|
|
|
|
|
|
|
414 |
|
415 |
# Results display
|
416 |
with gr.Column():
|
|
|
417 |
output_video = gr.Video(
|
418 |
label="πΉ Generated Video",
|
419 |
autoplay=True
|
@@ -466,24 +580,38 @@ with gr.Blocks(title="Bytedance Seedance Video Free", theme=gr.themes.Soft()) as
|
|
466 |
|
467 |
### Troubleshooting
|
468 |
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
475 |
""")
|
476 |
|
477 |
# Examples
|
478 |
gr.Examples(
|
479 |
examples=[
|
480 |
-
["Text to Video", "
|
|
|
481 |
["Text to Video", "A serene lake at sunrise with mist rolling over the water. Camera slowly pans across the landscape as birds fly overhead.", None, "16:9", 42],
|
482 |
["Text to Video", "Urban street scene at night with neon lights reflecting on wet pavement. People walking with umbrellas, camera tracking forward.", None, "9:16", 123],
|
483 |
["Text to Video", "Close-up of a flower blooming in time-lapse, soft natural lighting, shallow depth of field.", None, "1:1", 789],
|
484 |
],
|
485 |
inputs=[mode, prompt, image_input, aspect_ratio, seed],
|
486 |
-
label="Example Prompts (Start with
|
487 |
)
|
488 |
|
489 |
# Event handlers
|
@@ -505,6 +633,12 @@ with gr.Blocks(title="Bytedance Seedance Video Free", theme=gr.themes.Soft()) as
|
|
505 |
outputs=[ratio_info]
|
506 |
)
|
507 |
|
|
|
|
|
|
|
|
|
|
|
|
|
508 |
generate_btn.click(
|
509 |
fn=generate_video,
|
510 |
inputs=[mode, prompt, image_input, aspect_ratio, seed, api_key_input],
|
|
|
148 |
else:
|
149 |
return gr.update(visible=False)
|
150 |
|
151 |
+
def warmup_model(api_key_input, progress=gr.Progress()):
|
152 |
+
"""Warm up the model with a simple request"""
|
153 |
+
token = api_key_input or api_token
|
154 |
+
if not token:
|
155 |
+
return gr.update(visible=True, value="β API token required. Please set it in API Settings.")
|
156 |
+
|
157 |
+
replicate.api_token = token
|
158 |
+
|
159 |
+
try:
|
160 |
+
progress(0, desc="Starting model warm-up...")
|
161 |
+
|
162 |
+
# Very simple warm-up request with minimal parameters
|
163 |
+
warm_up_params = {
|
164 |
+
"prompt": "red", # Extremely simple prompt
|
165 |
+
"duration": 1, # Minimum duration
|
166 |
+
"resolution": "480p",
|
167 |
+
"aspect_ratio": "16:9",
|
168 |
+
"seed": 42
|
169 |
+
}
|
170 |
+
|
171 |
+
progress(0.5, desc="Sending warm-up request (this may fail, that's normal)...")
|
172 |
+
|
173 |
+
try:
|
174 |
+
output = replicate.run("bytedance/seedance-1-lite", input=warm_up_params)
|
175 |
+
if output:
|
176 |
+
return gr.update(visible=True, value="""β
**Warm-up successful!**
|
177 |
+
|
178 |
+
The model is now awake. You can generate videos normally.
|
179 |
+
Note: The model stays active for ~5 minutes, so generate quickly!""")
|
180 |
+
except Exception as e:
|
181 |
+
if "PA" in str(e) or "interrupted" in str(e).lower():
|
182 |
+
return gr.update(visible=True, value="""β° **Warm-up request sent!**
|
183 |
+
|
184 |
+
The model is waking up. This is normal for a cold start.
|
185 |
+
|
186 |
+
**Next steps:**
|
187 |
+
1. Wait 2-3 minutes for the model to fully initialize
|
188 |
+
2. Try a simple prompt: "a cat walking"
|
189 |
+
3. Once that works, use your actual prompt
|
190 |
+
|
191 |
+
The wake-up process takes time, but subsequent generations will be fast!""")
|
192 |
+
else:
|
193 |
+
return gr.update(visible=True, value=f"""β οΈ **Warm-up attempted**
|
194 |
+
|
195 |
+
Error: {str(e)[:200]}...
|
196 |
+
|
197 |
+
**What to do:**
|
198 |
+
1. Wait 2-3 minutes
|
199 |
+
2. Try generating with a simple prompt
|
200 |
+
3. The model might already be warming up!""")
|
201 |
+
|
202 |
+
except Exception as e:
|
203 |
+
return gr.update(visible=True, value=f"β Warm-up error: {str(e)[:200]}... Try generating anyway after waiting a minute.")
|
204 |
+
|
205 |
def generate_video(mode, prompt, image, aspect_ratio, seed, api_key_input, progress=gr.Progress()):
|
206 |
"""Main video generation function"""
|
207 |
|
|
|
225 |
# Input parameters setup
|
226 |
input_params = {
|
227 |
"prompt": prompt,
|
228 |
+
"duration": 5, # Fixed at 5 seconds
|
229 |
"resolution": "480p",
|
230 |
"aspect_ratio": aspect_ratio,
|
231 |
"seed": seed
|
|
|
258 |
|
259 |
progress(0.3, desc="Calling Replicate API...")
|
260 |
|
261 |
+
# Run Replicate with retry logic and cold start handling
|
262 |
+
max_attempts = 3 # Reduced attempts, focus on user guidance
|
263 |
output = None
|
264 |
|
265 |
for attempt in range(max_attempts):
|
266 |
try:
|
267 |
+
# Run the actual request
|
|
|
|
|
268 |
progress(0.3 + attempt * 0.1, desc=f"Generating video... (Attempt {attempt + 1}/{max_attempts})")
|
269 |
|
270 |
# Run the model
|
|
|
279 |
|
280 |
except Exception as e:
|
281 |
error_str = str(e)
|
282 |
+
print(f"Attempt {attempt + 1} error: {error_str}") # Debug logging
|
283 |
+
|
284 |
+
# Check for cold start indicators
|
285 |
+
if "Prediction interrupted" in error_str or "code: PA" in error_str or "PA" in error_str:
|
286 |
+
if attempt == 0:
|
287 |
+
# First cold start - provide immediate guidance
|
288 |
+
return None, """β **Model is cold starting (code: PA)**
|
289 |
+
|
290 |
+
**Quick Fix - Follow these steps:**
|
291 |
+
|
292 |
+
1οΈβ£ Click the **"π₯ Warm Up Model"** button above
|
293 |
+
2οΈβ£ Wait for the warm-up to complete (it may also fail, that's OK)
|
294 |
+
3οΈβ£ **Wait 2-3 minutes** for the model to fully initialize
|
295 |
+
4οΈβ£ Try this simple prompt first: **"a cat walking"**
|
296 |
+
5οΈβ£ Once that works, try your original prompt
|
297 |
+
|
298 |
+
**Why this happens:** The model goes to sleep after inactivity to save resources. The first request wakes it up, which takes time.
|
299 |
+
|
300 |
+
**Alternative:** Try again in 5 minutes when the model should be fully awake."""
|
301 |
+
elif attempt < max_attempts - 1:
|
302 |
+
# Subsequent attempts - wait and retry
|
303 |
+
wait_time = 30 + (attempt * 15)
|
304 |
+
progress(0.3, desc=f"Model still warming up. Waiting {wait_time}s... (Attempt {attempt + 2}/{max_attempts})")
|
305 |
time.sleep(wait_time)
|
306 |
continue
|
307 |
else:
|
308 |
+
return None, """β Model is taking longer than usual to start.
|
309 |
+
|
310 |
+
**Please try one of these options:**
|
311 |
+
1. Use the "π₯ Warm Up Model" button first
|
312 |
+
2. Wait 5 minutes and try again
|
313 |
+
3. Try a very simple prompt: "a red circle"
|
314 |
+
4. Try during off-peak hours (evening/night US time)
|
315 |
+
|
316 |
+
The model needs to 'warm up' after being idle."""
|
317 |
+
|
318 |
elif "cold boot" in error_str.lower() or "starting" in error_str.lower():
|
319 |
if attempt < max_attempts - 1:
|
320 |
+
progress(0.3, desc=f"Model is booting up, waiting 60s...")
|
321 |
+
time.sleep(60)
|
322 |
continue
|
323 |
+
|
324 |
elif "timeout" in error_str.lower():
|
325 |
if attempt < max_attempts - 1:
|
326 |
+
progress(0.3, desc=f"Request timed out, retrying... (Attempt {attempt + 2}/{max_attempts})")
|
327 |
+
time.sleep(15)
|
328 |
continue
|
329 |
+
|
|
|
330 |
else:
|
331 |
+
# For any other error, still retry a few times
|
332 |
if attempt < max_attempts - 1:
|
333 |
+
wait_time = 10 + (attempt * 5)
|
334 |
+
progress(0.3, desc=f"Error occurred, retrying in {wait_time}s... (Attempt {attempt + 2}/{max_attempts})")
|
335 |
+
time.sleep(wait_time)
|
336 |
continue
|
337 |
else:
|
338 |
+
return None, f"β Error after {max_attempts} attempts: {error_str}"
|
339 |
|
340 |
# Check if we got output
|
341 |
if not output:
|
342 |
+
return None, """β Failed to generate video after multiple attempts.
|
343 |
+
|
344 |
+
**This usually happens when:**
|
345 |
+
- The model is cold starting (hasn't been used recently)
|
346 |
+
- The server is under heavy load
|
347 |
+
|
348 |
+
**Please try:**
|
349 |
+
1. Wait 3-5 minutes for the model to warm up
|
350 |
+
2. Start with a simple prompt: "a cat walking" or "a ball bouncing"
|
351 |
+
3. Once that works, try your original prompt
|
352 |
+
|
353 |
+
The first generation often takes longer, but subsequent ones will be faster."""
|
354 |
|
355 |
progress(0.7, desc="Downloading video...")
|
356 |
|
|
|
425 |
Generate videos from text or images using **Replicate API**.
|
426 |
|
427 |
[](https://ginigen.com/)
|
|
|
|
|
428 |
""")
|
429 |
|
430 |
+
# Cold start warning box
|
431 |
+
with gr.Accordion("β οΈ Getting 'Prediction interrupted (PA)' error? Click here!", open=False):
|
432 |
+
gr.Markdown("""
|
433 |
+
### π₯ Model Cold Start Guide
|
434 |
+
|
435 |
+
If you're getting **"Prediction interrupted (code: PA)"** errors, the model is sleeping and needs to wake up.
|
436 |
+
|
437 |
+
**Quick Solution:**
|
438 |
+
1. Click the **"π₯ Warm Up Model"** button
|
439 |
+
2. Wait 2-3 minutes after warm-up
|
440 |
+
3. Try a simple prompt: **"a cat walking"**
|
441 |
+
4. Once that works, use your actual prompt
|
442 |
+
|
443 |
+
**Why this happens:** Free tier models sleep after ~5 minutes of inactivity to save resources.
|
444 |
+
The first request wakes them up, which can take 2-5 minutes.
|
445 |
+
|
446 |
+
**Pro tip:** Once warmed up, the model stays active for ~5 minutes. Generate multiple videos quickly!
|
447 |
+
""")
|
448 |
+
|
449 |
with gr.Row():
|
450 |
with gr.Column(scale=1):
|
451 |
# API Settings
|
|
|
518 |
value=""
|
519 |
)
|
520 |
|
521 |
+
# Generate and warm-up buttons
|
522 |
+
with gr.Row():
|
523 |
+
generate_btn = gr.Button("π¬ Generate Video", variant="primary", size="lg", scale=2)
|
524 |
+
warmup_btn = gr.Button("π₯ Warm Up Model First", variant="secondary", size="lg", scale=1)
|
525 |
+
|
526 |
+
gr.Markdown("*Use Warm Up if you're getting PA errors or it's your first generation today*")
|
527 |
|
528 |
# Results display
|
529 |
with gr.Column():
|
530 |
+
warmup_status = gr.Textbox(label="Status", visible=False)
|
531 |
output_video = gr.Video(
|
532 |
label="πΉ Generated Video",
|
533 |
autoplay=True
|
|
|
580 |
|
581 |
### Troubleshooting
|
582 |
|
583 |
+
#### If you get "Prediction interrupted (code: PA)":
|
584 |
+
|
585 |
+
This means the model is cold starting. Follow these steps:
|
586 |
+
|
587 |
+
1. **First Try**: Click the "π₯ Warm Up Model" button and wait for completion
|
588 |
+
2. **Wait**: After warm-up, wait 2-3 minutes for the model to fully initialize
|
589 |
+
3. **Simple Test**: Try generating with a very simple prompt: "a cat walking"
|
590 |
+
4. **Your Prompt**: Once the simple test works, try your actual prompt
|
591 |
+
|
592 |
+
**Other Issues:**
|
593 |
+
- **Repeated failures**: The model may be under heavy load. Try during off-peak hours
|
594 |
+
- **Timeout errors**: Increase wait time between attempts
|
595 |
+
- **Watermark not showing**: Install opencv-python for watermark support
|
596 |
+
|
597 |
+
**Pro Tips:**
|
598 |
+
- First generation of the day always takes longer
|
599 |
+
- Keep prompts under 100 words for best results
|
600 |
+
- Simple prompts work better for warming up the model
|
601 |
+
- Once warmed up, generate multiple videos quickly before the model sleeps again!
|
602 |
""")
|
603 |
|
604 |
# Examples
|
605 |
gr.Examples(
|
606 |
examples=[
|
607 |
+
["Text to Video", "a red ball bouncing", None, "16:9", 42], # Very simple warm-up
|
608 |
+
["Text to Video", "a cat walking", None, "16:9", 42], # Simple warm-up prompt
|
609 |
["Text to Video", "A serene lake at sunrise with mist rolling over the water. Camera slowly pans across the landscape as birds fly overhead.", None, "16:9", 42],
|
610 |
["Text to Video", "Urban street scene at night with neon lights reflecting on wet pavement. People walking with umbrellas, camera tracking forward.", None, "9:16", 123],
|
611 |
["Text to Video", "Close-up of a flower blooming in time-lapse, soft natural lighting, shallow depth of field.", None, "1:1", 789],
|
612 |
],
|
613 |
inputs=[mode, prompt, image_input, aspect_ratio, seed],
|
614 |
+
label="Example Prompts (β‘ Start with 'red ball' or 'cat walking' if model is cold)"
|
615 |
)
|
616 |
|
617 |
# Event handlers
|
|
|
633 |
outputs=[ratio_info]
|
634 |
)
|
635 |
|
636 |
+
warmup_btn.click(
|
637 |
+
fn=warmup_model,
|
638 |
+
inputs=[api_key_input],
|
639 |
+
outputs=[warmup_status]
|
640 |
+
)
|
641 |
+
|
642 |
generate_btn.click(
|
643 |
fn=generate_video,
|
644 |
inputs=[mode, prompt, image_input, aspect_ratio, seed, api_key_input],
|