Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -98,7 +98,6 @@ except Exception as e:
|
|
98 |
logger.exception("Failed to initialize Google Clients/Models."); st.error(f"π¨ Failed Init: {e}", icon="π¨"); st.stop()
|
99 |
|
100 |
# --- Define Pydantic Schemas (Using V2 Syntax) ---
|
101 |
-
# (Schemas remain the same as previous version)
|
102 |
class StorySegment(BaseModel):
|
103 |
scene_id: int = Field(..., ge=0)
|
104 |
image_prompt: str = Field(..., min_length=10, max_length=250)
|
@@ -126,15 +125,29 @@ class ChronoWeaveResponse(BaseModel):
|
|
126 |
return self
|
127 |
|
128 |
# --- Helper Functions ---
|
129 |
-
|
|
|
130 |
@contextlib.contextmanager
|
131 |
def wave_file_writer(filename: str, channels: int = 1, rate: int = AUDIO_SAMPLING_RATE, sample_width: int = 2):
|
132 |
"""Context manager to safely write WAV files."""
|
133 |
-
wf = None
|
134 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
finally:
|
136 |
-
if wf:
|
137 |
-
|
|
|
|
|
|
|
|
|
138 |
|
139 |
async def generate_audio_live_async(api_text: str, output_filename: str, voice: Optional[str] = None) -> Optional[str]:
|
140 |
"""Generates audio using Gemini Live API (async version) via the GenerativeModel."""
|
@@ -184,49 +197,10 @@ def generate_image_imagen(prompt: str, aspect_ratio: str = "1:1", task_id: str =
|
|
184 |
logger.info(f"πΌοΈ [{task_id}] Requesting image: '{prompt[:70]}...' (Aspect: {aspect_ratio})")
|
185 |
logger.error(f" β [{task_id}] Image generation skipped: Function needs update to use Vertex AI SDK for Imagen.")
|
186 |
st.error(f"Image generation for {task_id} skipped: Requires Vertex AI SDK implementation.", icon="πΌοΈ")
|
187 |
-
|
188 |
-
|
189 |
-
# Example conceptual structure (replace with actual Vertex AI SDK code):
|
190 |
-
# try:
|
191 |
-
# from vertexai.preview.generative_models import ImageGenerationModel # Example import
|
192 |
-
#
|
193 |
-
# # Assuming vertex_image_model is initialized globally or passed in
|
194 |
-
# # vertex_image_model = ImageGenerationModel.from_pretrained("imagegeneration@006") # Example init
|
195 |
-
#
|
196 |
-
# response = vertex_image_model.generate_images(
|
197 |
-
# prompt=f"Simple kids animation style... NO humans... Aspect ratio {aspect_ratio}. Scene: {prompt}",
|
198 |
-
# number_of_images=1,
|
199 |
-
# # Add other relevant parameters like negative_prompt, seed, etc.
|
200 |
-
# )
|
201 |
-
#
|
202 |
-
# if response.images:
|
203 |
-
# image_bytes = response.images[0]._image_bytes # Access image bytes (check actual attribute name)
|
204 |
-
# image = Image.open(BytesIO(image_bytes))
|
205 |
-
# logger.info(f" β
[{task_id}] Image generated (Vertex AI).")
|
206 |
-
# # Check safety attributes if available in Vertex AI response
|
207 |
-
# return image
|
208 |
-
# else:
|
209 |
-
# # Check Vertex AI response for errors / blocking reasons
|
210 |
-
# logger.warning(f" β οΈ [{task_id}] No image data received from Vertex AI.")
|
211 |
-
# st.warning(f"No image data {task_id} (Vertex AI).", icon="πΌοΈ")
|
212 |
-
# return None
|
213 |
-
#
|
214 |
-
# except ImportError:
|
215 |
-
# logger.error(f" β [{task_id}] Vertex AI SDK ('google-cloud-aiplatform') not installed.")
|
216 |
-
# st.error(f"Vertex AI SDK not installed for image generation.", icon="π¨")
|
217 |
-
# return None
|
218 |
-
# except Exception as e:
|
219 |
-
# logger.exception(f" β [{task_id}] Vertex AI image generation failed: {e}")
|
220 |
-
# st.error(f"Image gen failed {task_id} (Vertex AI): {e}", icon="πΌοΈ")
|
221 |
-
# return None
|
222 |
-
# --- End Placeholder ---
|
223 |
-
|
224 |
-
# Keep the old failing logic commented out or remove, returning None for now
|
225 |
-
return None # Return None until Vertex AI SDK is implemented
|
226 |
-
|
227 |
|
228 |
# --- Streamlit UI Elements ---
|
229 |
-
# (Identical to previous version)
|
230 |
st.sidebar.header("βοΈ Configuration")
|
231 |
if GOOGLE_API_KEY: st.sidebar.success("Google API Key Loaded", icon="β
")
|
232 |
else: st.sidebar.error("Google API Key Missing!", icon="π¨")
|
@@ -249,16 +223,13 @@ if generate_button:
|
|
249 |
except OSError as e: st.error(f"π¨ Failed create temp dir {temp_dir}: {e}", icon="π"); st.stop()
|
250 |
final_video_paths, generation_errors = {}, {}
|
251 |
|
252 |
-
# --- 1. Generate Narrative Structure ---
|
253 |
chrono_response: Optional[ChronoWeaveResponse] = None
|
254 |
with st.spinner("Generating narrative structure... π€"): chrono_response = generate_story_sequence_chrono(theme, num_scenes, num_timelines, divergence_prompt)
|
255 |
|
256 |
if chrono_response:
|
257 |
-
# --- 2. Process Each Timeline ---
|
258 |
overall_start_time = time.time(); all_timelines_successful = True
|
259 |
with st.status("Generating assets and composing videos...", expanded=True) as status:
|
260 |
for timeline_index, timeline in enumerate(chrono_response.timelines):
|
261 |
-
# ... (Timeline setup - same as before) ...
|
262 |
timeline_id, divergence, segments = timeline.timeline_id, timeline.divergence_reason, timeline.segments
|
263 |
timeline_label = f"Timeline {timeline_id}"; st.subheader(f"Processing {timeline_label}: {divergence}")
|
264 |
logger.info(f"--- Processing {timeline_label} (Idx: {timeline_index}) ---"); generation_errors[timeline_id] = []
|
@@ -266,7 +237,6 @@ if generate_button:
|
|
266 |
timeline_start_time = time.time(); scene_success_count = 0
|
267 |
|
268 |
for scene_index, segment in enumerate(segments):
|
269 |
-
# ... (Scene setup - same as before) ...
|
270 |
scene_id = segment.scene_id; task_id = f"T{timeline_id}_S{scene_id}"
|
271 |
status.update(label=f"Processing {timeline_label}, Scene {scene_id + 1}/{len(segments)}...")
|
272 |
st.markdown(f"--- **Scene {scene_id + 1} ({task_id})** ---")
|
@@ -275,28 +245,22 @@ if generate_button:
|
|
275 |
st.write(f" *Img Prompt:* {segment.image_prompt}" + (f" *(Mod: {segment.timeline_visual_modifier})*" if segment.timeline_visual_modifier else "")); st.write(f" *Audio Text:* {segment.audio_text}")
|
276 |
|
277 |
# --- 2a. Image Generation ---
|
278 |
-
# !!! This call will currently return None until Vertex AI SDK is implemented !!!
|
279 |
generated_image: Optional[Image.Image] = None
|
280 |
with st.spinner(f"[{task_id}] Generating image... π¨"):
|
281 |
combined_prompt = segment.image_prompt
|
282 |
if segment.character_description: combined_prompt += f" Featuring: {segment.character_description}"
|
283 |
if segment.timeline_visual_modifier: combined_prompt += f" Style hint: {segment.timeline_visual_modifier}."
|
284 |
-
generated_image = generate_image_imagen(combined_prompt, aspect_ratio, task_id) # Needs Vertex AI SDK update
|
285 |
|
286 |
-
if generated_image:
|
287 |
image_path = os.path.join(temp_dir, f"{task_id}_image.png")
|
288 |
try: generated_image.save(image_path); temp_image_files[scene_id] = image_path; st.image(generated_image, width=180, caption=f"Scene {scene_id+1}")
|
289 |
except Exception as e: logger.error(f" β [{task_id}] Img save error: {e}"); st.error(f"Save image {task_id} failed.", icon="πΎ"); scene_has_error = True; generation_errors[timeline_id].append(f"S{scene_id+1}: Img save fail.")
|
290 |
-
else:
|
291 |
-
# Error logged within generate_image_imagen if it fails
|
292 |
-
scene_has_error = True; generation_errors[timeline_id].append(f"S{scene_id+1}: Img gen fail.")
|
293 |
-
continue # Skip rest of scene processing if image fails
|
294 |
|
295 |
# --- 2b. Audio Generation ---
|
296 |
-
# (Audio generation logic remains the same, but won't be reached if image fails)
|
297 |
generated_audio_path: Optional[str] = None
|
298 |
if not scene_has_error:
|
299 |
-
# ... (Audio generation logic - same as before) ...
|
300 |
with st.spinner(f"[{task_id}] Generating audio... π"):
|
301 |
audio_path_temp = os.path.join(temp_dir, f"{task_id}_audio.wav")
|
302 |
try: generated_audio_path = asyncio.run(generate_audio_live_async(segment.audio_text, audio_path_temp, audio_voice))
|
@@ -305,13 +269,12 @@ if generate_button:
|
|
305 |
if generated_audio_path:
|
306 |
temp_audio_files[scene_id] = generated_audio_path; try: open(generated_audio_path,'rb') as ap: st.audio(ap.read(), format='audio/wav')
|
307 |
except Exception as e: logger.warning(f" β οΈ [{task_id}] Audio preview error: {e}")
|
308 |
-
else: scene_has_error = True; generation_errors[timeline_id].append(f"S{scene_id+1}: Audio gen fail."); continue
|
309 |
|
310 |
# --- 2c. Create Video Clip ---
|
311 |
-
# (Clip creation logic remains the same, but won't be reached if image/audio fails)
|
312 |
if not scene_has_error and scene_id in temp_image_files and scene_id in temp_audio_files:
|
313 |
-
|
314 |
-
|
315 |
audio_clip_instance, image_clip_instance, composite_clip = None, None, None
|
316 |
try:
|
317 |
if not os.path.exists(img_path): raise FileNotFoundError(f"Img missing: {img_path}")
|
@@ -321,18 +284,14 @@ if generate_button:
|
|
321 |
composite_clip = image_clip_instance.set_audio(audio_clip_instance); video_clips.append(composite_clip)
|
322 |
logger.info(f" β
[{task_id}] Clip created (Dur: {audio_clip_instance.duration:.2f}s)."); st.write(f" β
Clip created (Dur: {audio_clip_instance.duration:.2f}s)."); scene_success_count += 1
|
323 |
except Exception as e: logger.exception(f" β [{task_id}] Failed clip creation: {e}"); st.error(f"Failed clip {task_id}: {e}", icon="π¬"); scene_has_error = True; generation_errors[timeline_id].append(f"S{scene_id+1}: Clip fail.")
|
324 |
-
finally:
|
325 |
if audio_clip_instance: audio_clip_instance.close();
|
326 |
if image_clip_instance: image_clip_instance.close()
|
327 |
-
# Don't remove files here on error, let assembly logic handle based on overall success
|
328 |
|
329 |
# --- 2d. Assemble Timeline Video ---
|
330 |
-
# (Video assembly logic remains the same)
|
331 |
timeline_duration = time.time() - timeline_start_time
|
332 |
-
if video_clips and scene_success_count == len(segments):
|
333 |
-
|
334 |
-
status.update(label=f"Composing video {timeline_label}...")
|
335 |
-
st.write(f"ποΈ Assembling video {timeline_label}..."); logger.info(f"ποΈ Assembling video {timeline_label}...")
|
336 |
output_filename = os.path.join(temp_dir, f"timeline_{timeline_id}_final.mp4"); final_timeline_video = None
|
337 |
try: final_timeline_video = concatenate_videoclips(video_clips, method="compose"); final_timeline_video.write_videofile(output_filename, fps=VIDEO_FPS, codec=VIDEO_CODEC, audio_codec=AUDIO_CODEC, logger=None); final_video_paths[timeline_id] = output_filename; logger.info(f" β
[{timeline_label}] Video saved: {os.path.basename(output_filename)}"); st.success(f"β
Video {timeline_label} completed in {timeline_duration:.2f}s.")
|
338 |
except Exception as e: logger.exception(f" β [{timeline_label}] Video assembly failed: {e}"); st.error(f"Assemble video {timeline_label} failed: {e}", icon="πΌ"); all_timelines_successful = False; generation_errors[timeline_id].append(f"T{timeline_id}: Assembly fail.")
|
@@ -345,7 +304,6 @@ if generate_button:
|
|
345 |
if generation_errors[timeline_id]: logger.error(f"Errors {timeline_label}: {generation_errors[timeline_id]}")
|
346 |
|
347 |
# --- End of Timelines Loop ---
|
348 |
-
# (Final status update logic remains the same)
|
349 |
overall_duration = time.time() - overall_start_time
|
350 |
if all_timelines_successful and final_video_paths: status_msg = f"Complete! ({len(final_video_paths)} videos in {overall_duration:.2f}s)"; status.update(label=status_msg, state="complete", expanded=False); logger.info(status_msg)
|
351 |
elif final_video_paths: status_msg = f"Partially Complete ({len(final_video_paths)} videos, errors). {overall_duration:.2f}s"; status.update(label=status_msg, state="warning", expanded=True); logger.warning(status_msg)
|
@@ -354,7 +312,6 @@ if generate_button:
|
|
354 |
# --- 3. Display Results ---
|
355 |
st.header("π¬ Generated Timelines")
|
356 |
if final_video_paths:
|
357 |
-
# ... (Display logic - same as before) ...
|
358 |
sorted_timeline_ids = sorted(final_video_paths.keys()); num_cols = min(len(sorted_timeline_ids), 3); cols = st.columns(num_cols)
|
359 |
for idx, timeline_id in enumerate(sorted_timeline_ids):
|
360 |
col = cols[idx % num_cols]; video_path = final_video_paths[timeline_id]
|
@@ -366,33 +323,30 @@ if generate_button:
|
|
366 |
with open(video_path, 'rb') as vf: video_bytes = vf.read()
|
367 |
st.video(video_bytes); logger.info(f"Displaying T{timeline_id}")
|
368 |
st.download_button(f"Download T{timeline_id}", video_bytes, f"timeline_{timeline_id}.mp4", "video/mp4", key=f"dl_{timeline_id}")
|
369 |
-
if generation_errors.get(timeline_id):
|
370 |
-
# Filter out non-assembly errors for display below video
|
371 |
scene_errors = [err for err in generation_errors[timeline_id] if not err.startswith(f"T{timeline_id}:")]
|
372 |
if scene_errors:
|
373 |
with st.expander(f"β οΈ View {len(scene_errors)} Scene Issues"):
|
374 |
-
|
|
|
|
|
375 |
except FileNotFoundError: logger.error(f"Video missing: {video_path}"); st.error(f"Error: Video missing T{timeline_id}.", icon="π¨")
|
376 |
except Exception as e: logger.exception(f"Display error {video_path}: {e}"); st.error(f"Display error T{timeline_id}: {e}", icon="π¨")
|
377 |
else: # No videos generated
|
378 |
st.warning("No final videos were successfully generated.")
|
379 |
-
# Display summary of ALL errors using a standard loop to avoid ValueError
|
380 |
st.subheader("Summary of Generation Issues")
|
381 |
has_errors = any(generation_errors.values())
|
382 |
if has_errors:
|
383 |
with st.expander("View All Errors", expanded=True):
|
384 |
for tid, errors in generation_errors.items():
|
385 |
if errors:
|
386 |
-
st.error(f"**Timeline {tid}:**")
|
387 |
# Use standard for loop here - FIX for ValueError
|
388 |
for msg in errors:
|
389 |
st.error(f" - {msg}")
|
390 |
-
else:
|
391 |
-
st.info("No generation errors were recorded.")
|
392 |
-
|
393 |
|
394 |
# --- 4. Cleanup ---
|
395 |
-
# (Cleanup logic remains the same)
|
396 |
st.info(f"Attempting cleanup: {temp_dir}")
|
397 |
try: shutil.rmtree(temp_dir); logger.info(f"β
Temp dir removed: {temp_dir}"); st.success("β
Temp files cleaned.")
|
398 |
except Exception as e: logger.error(f"β οΈ Failed remove temp dir {temp_dir}: {e}"); st.warning(f"Could not remove temp files: {temp_dir}.", icon="β οΈ")
|
|
|
98 |
logger.exception("Failed to initialize Google Clients/Models."); st.error(f"π¨ Failed Init: {e}", icon="π¨"); st.stop()
|
99 |
|
100 |
# --- Define Pydantic Schemas (Using V2 Syntax) ---
|
|
|
101 |
class StorySegment(BaseModel):
|
102 |
scene_id: int = Field(..., ge=0)
|
103 |
image_prompt: str = Field(..., min_length=10, max_length=250)
|
|
|
125 |
return self
|
126 |
|
127 |
# --- Helper Functions ---
|
128 |
+
|
129 |
+
# CORRECTED wave_file_writer function with proper indentation
|
130 |
@contextlib.contextmanager
|
131 |
def wave_file_writer(filename: str, channels: int = 1, rate: int = AUDIO_SAMPLING_RATE, sample_width: int = 2):
|
132 |
"""Context manager to safely write WAV files."""
|
133 |
+
wf = None
|
134 |
+
try:
|
135 |
+
# Indent these lines correctly under the try:
|
136 |
+
wf = wave.open(filename, "wb")
|
137 |
+
wf.setnchannels(channels)
|
138 |
+
wf.setsampwidth(sample_width) # 2 bytes for 16-bit audio
|
139 |
+
wf.setframerate(rate)
|
140 |
+
yield wf # yield remains inside the try block
|
141 |
+
except Exception as e:
|
142 |
+
logger.error(f"Error opening/configuring wave file {filename}: {e}")
|
143 |
+
raise # Re-raise the exception
|
144 |
finally:
|
145 |
+
if wf:
|
146 |
+
try:
|
147 |
+
wf.close()
|
148 |
+
except Exception as e_close:
|
149 |
+
logger.error(f"Error closing wave file {filename}: {e_close}")
|
150 |
+
|
151 |
|
152 |
async def generate_audio_live_async(api_text: str, output_filename: str, voice: Optional[str] = None) -> Optional[str]:
|
153 |
"""Generates audio using Gemini Live API (async version) via the GenerativeModel."""
|
|
|
197 |
logger.info(f"πΌοΈ [{task_id}] Requesting image: '{prompt[:70]}...' (Aspect: {aspect_ratio})")
|
198 |
logger.error(f" β [{task_id}] Image generation skipped: Function needs update to use Vertex AI SDK for Imagen.")
|
199 |
st.error(f"Image generation for {task_id} skipped: Requires Vertex AI SDK implementation.", icon="πΌοΈ")
|
200 |
+
# Return None because the current method is known to fail based on previous logs
|
201 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
|
203 |
# --- Streamlit UI Elements ---
|
|
|
204 |
st.sidebar.header("βοΈ Configuration")
|
205 |
if GOOGLE_API_KEY: st.sidebar.success("Google API Key Loaded", icon="β
")
|
206 |
else: st.sidebar.error("Google API Key Missing!", icon="π¨")
|
|
|
223 |
except OSError as e: st.error(f"π¨ Failed create temp dir {temp_dir}: {e}", icon="π"); st.stop()
|
224 |
final_video_paths, generation_errors = {}, {}
|
225 |
|
|
|
226 |
chrono_response: Optional[ChronoWeaveResponse] = None
|
227 |
with st.spinner("Generating narrative structure... π€"): chrono_response = generate_story_sequence_chrono(theme, num_scenes, num_timelines, divergence_prompt)
|
228 |
|
229 |
if chrono_response:
|
|
|
230 |
overall_start_time = time.time(); all_timelines_successful = True
|
231 |
with st.status("Generating assets and composing videos...", expanded=True) as status:
|
232 |
for timeline_index, timeline in enumerate(chrono_response.timelines):
|
|
|
233 |
timeline_id, divergence, segments = timeline.timeline_id, timeline.divergence_reason, timeline.segments
|
234 |
timeline_label = f"Timeline {timeline_id}"; st.subheader(f"Processing {timeline_label}: {divergence}")
|
235 |
logger.info(f"--- Processing {timeline_label} (Idx: {timeline_index}) ---"); generation_errors[timeline_id] = []
|
|
|
237 |
timeline_start_time = time.time(); scene_success_count = 0
|
238 |
|
239 |
for scene_index, segment in enumerate(segments):
|
|
|
240 |
scene_id = segment.scene_id; task_id = f"T{timeline_id}_S{scene_id}"
|
241 |
status.update(label=f"Processing {timeline_label}, Scene {scene_id + 1}/{len(segments)}...")
|
242 |
st.markdown(f"--- **Scene {scene_id + 1} ({task_id})** ---")
|
|
|
245 |
st.write(f" *Img Prompt:* {segment.image_prompt}" + (f" *(Mod: {segment.timeline_visual_modifier})*" if segment.timeline_visual_modifier else "")); st.write(f" *Audio Text:* {segment.audio_text}")
|
246 |
|
247 |
# --- 2a. Image Generation ---
|
|
|
248 |
generated_image: Optional[Image.Image] = None
|
249 |
with st.spinner(f"[{task_id}] Generating image... π¨"):
|
250 |
combined_prompt = segment.image_prompt
|
251 |
if segment.character_description: combined_prompt += f" Featuring: {segment.character_description}"
|
252 |
if segment.timeline_visual_modifier: combined_prompt += f" Style hint: {segment.timeline_visual_modifier}."
|
253 |
+
generated_image = generate_image_imagen(combined_prompt, aspect_ratio, task_id) # <<< Needs Vertex AI SDK update
|
254 |
|
255 |
+
if generated_image:
|
256 |
image_path = os.path.join(temp_dir, f"{task_id}_image.png")
|
257 |
try: generated_image.save(image_path); temp_image_files[scene_id] = image_path; st.image(generated_image, width=180, caption=f"Scene {scene_id+1}")
|
258 |
except Exception as e: logger.error(f" β [{task_id}] Img save error: {e}"); st.error(f"Save image {task_id} failed.", icon="πΎ"); scene_has_error = True; generation_errors[timeline_id].append(f"S{scene_id+1}: Img save fail.")
|
259 |
+
else: scene_has_error = True; generation_errors[timeline_id].append(f"S{scene_id+1}: Img gen fail."); continue
|
|
|
|
|
|
|
260 |
|
261 |
# --- 2b. Audio Generation ---
|
|
|
262 |
generated_audio_path: Optional[str] = None
|
263 |
if not scene_has_error:
|
|
|
264 |
with st.spinner(f"[{task_id}] Generating audio... π"):
|
265 |
audio_path_temp = os.path.join(temp_dir, f"{task_id}_audio.wav")
|
266 |
try: generated_audio_path = asyncio.run(generate_audio_live_async(segment.audio_text, audio_path_temp, audio_voice))
|
|
|
269 |
if generated_audio_path:
|
270 |
temp_audio_files[scene_id] = generated_audio_path; try: open(generated_audio_path,'rb') as ap: st.audio(ap.read(), format='audio/wav')
|
271 |
except Exception as e: logger.warning(f" β οΈ [{task_id}] Audio preview error: {e}")
|
272 |
+
else: scene_has_error = True; generation_errors[timeline_id].append(f"S{scene_id+1}: Audio gen fail."); continue
|
273 |
|
274 |
# --- 2c. Create Video Clip ---
|
|
|
275 |
if not scene_has_error and scene_id in temp_image_files and scene_id in temp_audio_files:
|
276 |
+
st.write(f" π¬ Creating clip S{scene_id+1}...")
|
277 |
+
img_path, aud_path = temp_image_files[scene_id], temp_audio_files[scene_id]
|
278 |
audio_clip_instance, image_clip_instance, composite_clip = None, None, None
|
279 |
try:
|
280 |
if not os.path.exists(img_path): raise FileNotFoundError(f"Img missing: {img_path}")
|
|
|
284 |
composite_clip = image_clip_instance.set_audio(audio_clip_instance); video_clips.append(composite_clip)
|
285 |
logger.info(f" β
[{task_id}] Clip created (Dur: {audio_clip_instance.duration:.2f}s)."); st.write(f" β
Clip created (Dur: {audio_clip_instance.duration:.2f}s)."); scene_success_count += 1
|
286 |
except Exception as e: logger.exception(f" β [{task_id}] Failed clip creation: {e}"); st.error(f"Failed clip {task_id}: {e}", icon="π¬"); scene_has_error = True; generation_errors[timeline_id].append(f"S{scene_id+1}: Clip fail.")
|
287 |
+
finally:
|
288 |
if audio_clip_instance: audio_clip_instance.close();
|
289 |
if image_clip_instance: image_clip_instance.close()
|
|
|
290 |
|
291 |
# --- 2d. Assemble Timeline Video ---
|
|
|
292 |
timeline_duration = time.time() - timeline_start_time
|
293 |
+
if video_clips and scene_success_count == len(segments):
|
294 |
+
status.update(label=f"Composing video {timeline_label}..."); st.write(f"ποΈ Assembling video {timeline_label}..."); logger.info(f"ποΈ Assembling video {timeline_label}...")
|
|
|
|
|
295 |
output_filename = os.path.join(temp_dir, f"timeline_{timeline_id}_final.mp4"); final_timeline_video = None
|
296 |
try: final_timeline_video = concatenate_videoclips(video_clips, method="compose"); final_timeline_video.write_videofile(output_filename, fps=VIDEO_FPS, codec=VIDEO_CODEC, audio_codec=AUDIO_CODEC, logger=None); final_video_paths[timeline_id] = output_filename; logger.info(f" β
[{timeline_label}] Video saved: {os.path.basename(output_filename)}"); st.success(f"β
Video {timeline_label} completed in {timeline_duration:.2f}s.")
|
297 |
except Exception as e: logger.exception(f" β [{timeline_label}] Video assembly failed: {e}"); st.error(f"Assemble video {timeline_label} failed: {e}", icon="πΌ"); all_timelines_successful = False; generation_errors[timeline_id].append(f"T{timeline_id}: Assembly fail.")
|
|
|
304 |
if generation_errors[timeline_id]: logger.error(f"Errors {timeline_label}: {generation_errors[timeline_id]}")
|
305 |
|
306 |
# --- End of Timelines Loop ---
|
|
|
307 |
overall_duration = time.time() - overall_start_time
|
308 |
if all_timelines_successful and final_video_paths: status_msg = f"Complete! ({len(final_video_paths)} videos in {overall_duration:.2f}s)"; status.update(label=status_msg, state="complete", expanded=False); logger.info(status_msg)
|
309 |
elif final_video_paths: status_msg = f"Partially Complete ({len(final_video_paths)} videos, errors). {overall_duration:.2f}s"; status.update(label=status_msg, state="warning", expanded=True); logger.warning(status_msg)
|
|
|
312 |
# --- 3. Display Results ---
|
313 |
st.header("π¬ Generated Timelines")
|
314 |
if final_video_paths:
|
|
|
315 |
sorted_timeline_ids = sorted(final_video_paths.keys()); num_cols = min(len(sorted_timeline_ids), 3); cols = st.columns(num_cols)
|
316 |
for idx, timeline_id in enumerate(sorted_timeline_ids):
|
317 |
col = cols[idx % num_cols]; video_path = final_video_paths[timeline_id]
|
|
|
323 |
with open(video_path, 'rb') as vf: video_bytes = vf.read()
|
324 |
st.video(video_bytes); logger.info(f"Displaying T{timeline_id}")
|
325 |
st.download_button(f"Download T{timeline_id}", video_bytes, f"timeline_{timeline_id}.mp4", "video/mp4", key=f"dl_{timeline_id}")
|
326 |
+
if generation_errors.get(timeline_id):
|
|
|
327 |
scene_errors = [err for err in generation_errors[timeline_id] if not err.startswith(f"T{timeline_id}:")]
|
328 |
if scene_errors:
|
329 |
with st.expander(f"β οΈ View {len(scene_errors)} Scene Issues"):
|
330 |
+
# Use standard loop here to avoid ValueError
|
331 |
+
for err in scene_errors:
|
332 |
+
st.warning(f"- {err}")
|
333 |
except FileNotFoundError: logger.error(f"Video missing: {video_path}"); st.error(f"Error: Video missing T{timeline_id}.", icon="π¨")
|
334 |
except Exception as e: logger.exception(f"Display error {video_path}: {e}"); st.error(f"Display error T{timeline_id}: {e}", icon="π¨")
|
335 |
else: # No videos generated
|
336 |
st.warning("No final videos were successfully generated.")
|
|
|
337 |
st.subheader("Summary of Generation Issues")
|
338 |
has_errors = any(generation_errors.values())
|
339 |
if has_errors:
|
340 |
with st.expander("View All Errors", expanded=True):
|
341 |
for tid, errors in generation_errors.items():
|
342 |
if errors:
|
343 |
+
st.error(f"**Timeline {tid}:**")
|
344 |
# Use standard for loop here - FIX for ValueError
|
345 |
for msg in errors:
|
346 |
st.error(f" - {msg}")
|
347 |
+
else: st.info("No generation errors recorded.")
|
|
|
|
|
348 |
|
349 |
# --- 4. Cleanup ---
|
|
|
350 |
st.info(f"Attempting cleanup: {temp_dir}")
|
351 |
try: shutil.rmtree(temp_dir); logger.info(f"β
Temp dir removed: {temp_dir}"); st.success("β
Temp files cleaned.")
|
352 |
except Exception as e: logger.error(f"β οΈ Failed remove temp dir {temp_dir}: {e}"); st.warning(f"Could not remove temp files: {temp_dir}.", icon="β οΈ")
|