Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -148,22 +148,6 @@ def update_image_input(mode):
|
|
148 |
else:
|
149 |
return gr.update(visible=False)
|
150 |
|
151 |
-
def wait_for_model_with_retry(model_name, max_retries=5, initial_wait=10):
|
152 |
-
"""Wait for model to be ready with retry logic"""
|
153 |
-
for attempt in range(max_retries):
|
154 |
-
try:
|
155 |
-
# Try to get model info
|
156 |
-
model = replicate.models.get(model_name)
|
157 |
-
return True
|
158 |
-
except Exception as e:
|
159 |
-
if attempt < max_retries - 1:
|
160 |
-
wait_time = initial_wait * (attempt + 1)
|
161 |
-
print(f"Model not ready, waiting {wait_time} seconds... (Attempt {attempt + 1}/{max_retries})")
|
162 |
-
time.sleep(wait_time)
|
163 |
-
else:
|
164 |
-
return False
|
165 |
-
return False
|
166 |
-
|
167 |
def generate_video(mode, prompt, image, aspect_ratio, seed, api_key_input, progress=gr.Progress()):
|
168 |
"""Main video generation function"""
|
169 |
|
@@ -210,15 +194,13 @@ def generate_video(mode, prompt, image, aspect_ratio, seed, api_key_input, progr
|
|
210 |
|
211 |
input_params["image"] = f"data:image/png;base64,{image_base64}"
|
212 |
|
213 |
-
progress(0.2, desc="
|
214 |
|
215 |
# Set up Replicate with the API token
|
216 |
replicate.api_token = token
|
217 |
|
218 |
-
#
|
219 |
-
|
220 |
-
if not model_ready:
|
221 |
-
return None, "β³ Model is still booting up. Please try again in a few minutes."
|
222 |
|
223 |
progress(0.3, desc="Calling Replicate API...")
|
224 |
|
@@ -231,51 +213,52 @@ def generate_video(mode, prompt, image, aspect_ratio, seed, api_key_input, progr
|
|
231 |
# Run Replicate - use the model directly without version specifier
|
232 |
start_time = time.time()
|
233 |
|
234 |
-
|
235 |
-
|
|
|
|
|
236 |
"bytedance/seedance-1-lite",
|
237 |
input=input_params
|
238 |
)
|
239 |
|
240 |
-
# If it's a generator, iterate through it
|
241 |
-
if hasattr(output_generator, '__iter__') and not isinstance(output_generator, (str, bytes)):
|
242 |
-
for event in output_generator:
|
243 |
-
elapsed = time.time() - start_time
|
244 |
-
progress_val = min(0.3 + (elapsed / 300) * 0.4, 0.7)
|
245 |
-
progress(progress_val, desc=f"Generating video... ({int(elapsed)}s)")
|
246 |
-
output = event # Keep the last event as output
|
247 |
-
else:
|
248 |
-
# If it's not a generator, use it directly
|
249 |
-
output = output_generator
|
250 |
-
|
251 |
# If we got output, break the retry loop
|
252 |
if output:
|
253 |
break
|
254 |
-
|
255 |
-
except
|
256 |
error_str = str(e)
|
257 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
if attempt < max_attempts - 1:
|
259 |
-
progress(0.3, desc=f"Model is starting up,
|
260 |
time.sleep(30)
|
261 |
continue
|
262 |
-
elif "timeout" in error_str.lower()
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
|
|
267 |
return None, f"β Replicate API error: {error_str}"
|
268 |
-
except Exception as e:
|
269 |
-
if attempt < max_attempts - 1:
|
270 |
-
progress(0.3, desc=f"Error occurred, retrying... (Attempt {attempt + 2}/{max_attempts})")
|
271 |
-
time.sleep(5)
|
272 |
-
continue
|
273 |
else:
|
274 |
-
|
|
|
|
|
|
|
|
|
|
|
275 |
|
276 |
# Check if we got output
|
277 |
if not output:
|
278 |
-
return None, "β Failed to generate video
|
279 |
|
280 |
progress(0.7, desc="Downloading video...")
|
281 |
|
@@ -350,6 +333,8 @@ with gr.Blocks(title="Bytedance Seedance Video Free", theme=gr.themes.Soft()) as
|
|
350 |
Generate videos from text or images using **Replicate API**.
|
351 |
|
352 |
[](https://ginigen.com/)
|
|
|
|
|
353 |
""")
|
354 |
|
355 |
with gr.Row():
|
@@ -481,6 +466,8 @@ with gr.Blocks(title="Bytedance Seedance Video Free", theme=gr.themes.Soft()) as
|
|
481 |
|
482 |
### Troubleshooting
|
483 |
|
|
|
|
|
484 |
- **Timeout errors**: The model might be cold starting. Wait 1-2 minutes and try again.
|
485 |
- **Model booting**: First requests after inactivity may take longer as the model boots up.
|
486 |
- **Extended wait times**: Complex prompts or server load may cause longer generation times.
|
@@ -490,12 +477,13 @@ with gr.Blocks(title="Bytedance Seedance Video Free", theme=gr.themes.Soft()) as
|
|
490 |
# Examples
|
491 |
gr.Examples(
|
492 |
examples=[
|
|
|
493 |
["Text to Video", "A serene lake at sunrise with mist rolling over the water. Camera slowly pans across the landscape as birds fly overhead.", None, "16:9", 42],
|
494 |
["Text to Video", "Urban street scene at night with neon lights reflecting on wet pavement. People walking with umbrellas, camera tracking forward.", None, "9:16", 123],
|
495 |
["Text to Video", "Close-up of a flower blooming in time-lapse, soft natural lighting, shallow depth of field.", None, "1:1", 789],
|
496 |
],
|
497 |
inputs=[mode, prompt, image_input, aspect_ratio, seed],
|
498 |
-
label="Example Prompts"
|
499 |
)
|
500 |
|
501 |
# Event handlers
|
|
|
148 |
else:
|
149 |
return gr.update(visible=False)
|
150 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
def generate_video(mode, prompt, image, aspect_ratio, seed, api_key_input, progress=gr.Progress()):
|
152 |
"""Main video generation function"""
|
153 |
|
|
|
194 |
|
195 |
input_params["image"] = f"data:image/png;base64,{image_base64}"
|
196 |
|
197 |
+
progress(0.2, desc="Preparing API request...")
|
198 |
|
199 |
# Set up Replicate with the API token
|
200 |
replicate.api_token = token
|
201 |
|
202 |
+
# Skip model availability check to avoid delays
|
203 |
+
# The actual run will handle cold start retries
|
|
|
|
|
204 |
|
205 |
progress(0.3, desc="Calling Replicate API...")
|
206 |
|
|
|
213 |
# Run Replicate - use the model directly without version specifier
|
214 |
start_time = time.time()
|
215 |
|
216 |
+
progress(0.3 + attempt * 0.1, desc=f"Generating video... (Attempt {attempt + 1}/{max_attempts})")
|
217 |
+
|
218 |
+
# Run the model
|
219 |
+
output = replicate.run(
|
220 |
"bytedance/seedance-1-lite",
|
221 |
input=input_params
|
222 |
)
|
223 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
# If we got output, break the retry loop
|
225 |
if output:
|
226 |
break
|
227 |
+
|
228 |
+
except Exception as e:
|
229 |
error_str = str(e)
|
230 |
+
# Handle specific error patterns
|
231 |
+
if "Prediction interrupted" in error_str or "code: PA" in error_str:
|
232 |
+
if attempt < max_attempts - 1:
|
233 |
+
wait_time = 45 + (attempt * 20) # Longer wait for interrupted predictions
|
234 |
+
progress(0.3, desc=f"Model is cold starting. This is normal for first use. Waiting {wait_time}s...")
|
235 |
+
time.sleep(wait_time)
|
236 |
+
continue
|
237 |
+
else:
|
238 |
+
return None, "β Model is still warming up. Please wait 2-3 minutes and try again with a simple prompt like 'a cat walking'."
|
239 |
+
elif "cold boot" in error_str.lower() or "starting" in error_str.lower():
|
240 |
if attempt < max_attempts - 1:
|
241 |
+
progress(0.3, desc=f"Model is starting up, waiting 30s before retry...")
|
242 |
time.sleep(30)
|
243 |
continue
|
244 |
+
elif "timeout" in error_str.lower():
|
245 |
+
if attempt < max_attempts - 1:
|
246 |
+
progress(0.3, desc=f"Timeout occurred, retrying... (Attempt {attempt + 2}/{max_attempts})")
|
247 |
+
time.sleep(10)
|
248 |
+
continue
|
249 |
+
elif "ReplicateError" in error_str:
|
250 |
return None, f"β Replicate API error: {error_str}"
|
|
|
|
|
|
|
|
|
|
|
251 |
else:
|
252 |
+
if attempt < max_attempts - 1:
|
253 |
+
progress(0.3, desc=f"Error occurred, retrying... (Attempt {attempt + 2}/{max_attempts})")
|
254 |
+
time.sleep(5)
|
255 |
+
continue
|
256 |
+
else:
|
257 |
+
return None, f"β Unexpected error: {error_str}"
|
258 |
|
259 |
# Check if we got output
|
260 |
if not output:
|
261 |
+
return None, "β Failed to generate video. The model might be cold starting. Please wait 2-3 minutes and try again with a simple prompt."
|
262 |
|
263 |
progress(0.7, desc="Downloading video...")
|
264 |
|
|
|
333 |
Generate videos from text or images using **Replicate API**.
|
334 |
|
335 |
[](https://ginigen.com/)
|
336 |
+
|
337 |
+
β οΈ **Note**: First generation may take longer (2-3 minutes) as the model warms up.
|
338 |
""")
|
339 |
|
340 |
with gr.Row():
|
|
|
466 |
|
467 |
### Troubleshooting
|
468 |
|
469 |
+
- **"Prediction interrupted (code: PA)"**: The model is warming up. Wait 2-3 minutes and try again, or try a simpler prompt first.
|
470 |
+
- **Repeated failures**: Try with a shorter, simpler prompt to warm up the model.
|
471 |
- **Timeout errors**: The model might be cold starting. Wait 1-2 minutes and try again.
|
472 |
- **Model booting**: First requests after inactivity may take longer as the model boots up.
|
473 |
- **Extended wait times**: Complex prompts or server load may cause longer generation times.
|
|
|
477 |
# Examples
|
478 |
gr.Examples(
|
479 |
examples=[
|
480 |
+
["Text to Video", "A cat walking", None, "16:9", 42], # Simple warm-up prompt
|
481 |
["Text to Video", "A serene lake at sunrise with mist rolling over the water. Camera slowly pans across the landscape as birds fly overhead.", None, "16:9", 42],
|
482 |
["Text to Video", "Urban street scene at night with neon lights reflecting on wet pavement. People walking with umbrellas, camera tracking forward.", None, "9:16", 123],
|
483 |
["Text to Video", "Close-up of a flower blooming in time-lapse, soft natural lighting, shallow depth of field.", None, "1:1", 789],
|
484 |
],
|
485 |
inputs=[mode, prompt, image_input, aspect_ratio, seed],
|
486 |
+
label="Example Prompts (Start with the simple 'cat walking' for cold start)"
|
487 |
)
|
488 |
|
489 |
# Event handlers
|