mgbam commited on
Commit
62f88b4
Β·
verified Β·
1 Parent(s): 4ed1d3d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +493 -0
app.py ADDED
@@ -0,0 +1,493 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Google LLC. Based on work by Yousif Ahmed.
2
+ # Concept: ChronoWeave - Branching Narrative Generation
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
6
+
7
+ import streamlit as st
8
+ import google.generativeai as genai
9
+ import os
10
+ import json
11
+ import numpy as np
12
+ from io import BytesIO
13
+ import time
14
+ import wave
15
+ import contextlib
16
+ import asyncio
17
+ import uuid # For unique filenames
18
+ import shutil # For cleaning up temp dirs
19
+
20
+ # Image handling
21
+ from PIL import Image
22
+
23
+ # Video and audio processing
24
+ from moviepy.editor import ImageClip, AudioFileClip, CompositeVideoClip, concatenate_videoclips
25
+
26
+ # Type hints
27
+ import typing_extensions as typing
28
+
29
+ # Async support for Streamlit/Google API
30
+ import nest_asyncio
31
+ nest_asyncio.apply() # Apply patch for asyncio in environments like Streamlit/Jupyter
32
+
33
+ # --- Configuration ---
34
+ st.set_page_config(page_title="ChronoWeave", layout="wide")
35
+ st.title("πŸŒ€ ChronoWeave: Branching Narrative Generator")
36
+ st.markdown("""
37
+ Generate multiple, branching story timelines from a single theme using AI.
38
+ Based on the work of Yousif Ahmed. Copyright 2025 Google LLC.
39
+ """)
40
+
41
+ # --- Constants ---
42
+ MODEL = "models/gemini-1.5-flash" # Or other suitable text model supporting JSON
43
+ # Using v1alpha for the Live API for audio output.
44
+ AUDIO_MODEL_VERSION = 'v1alpha' # Must be alpha for audio modality
45
+ IMAGE_MODEL_ID = "imagen-3" # Or your preferred Imagen model "imagen-3.0-generate-002"
46
+
47
+ # --- API Key Handling ---
48
+ try:
49
+ # Preferred way to handle secrets in Streamlit sharing/HF Spaces
50
+ GOOGLE_API_KEY = st.secrets["GOOGLE_API_KEY"]
51
+ os.environ['GOOGLE_API_KEY'] = GOOGLE_API_KEY
52
+ except KeyError:
53
+ st.error("🚨 Google API Key not found! Please add it as a Secret named 'GOOGLE_API_KEY' in your Hugging Face Space settings.", icon="🚨")
54
+ st.stop() # Halt execution if no key
55
+
56
+ # --- Initialize Google Client ---
57
+ try:
58
+ # Initialize the client with the API key
59
+ genai.configure(api_key=GOOGLE_API_KEY)
60
+
61
+ # Create separate clients or configure one for different API versions if needed
62
+ # Client for Text/Imagen (standard API)
63
+ client_standard = genai.GenerativeModel(MODEL)
64
+ # Client for Live Audio (v1alpha) - requires different client init
65
+ client_live = genai.Client(
66
+ client_options={'api_endpoint': f'{AUDIO_MODEL_VERSION}.generativelanguage.googleapis.com'}
67
+ )
68
+ # Note: As of recent updates, genai.configure might handle this better,
69
+ # but separating clients or explicitly setting endpoints can be more robust.
70
+ # Adjust based on the library version and observed behavior.
71
+
72
+
73
+ except Exception as e:
74
+ st.error(f"🚨 Failed to initialize Google AI Client: {e}", icon="🚨")
75
+ st.stop()
76
+
77
+
78
+ # --- Define Structured Output Schemas ---
79
+ class StorySegment(typing.TypedDict):
80
+ scene_id: int
81
+ image_prompt: str
82
+ audio_text: str
83
+ character_description: str
84
+ timeline_visual_modifier: typing.Optional[str]
85
+
86
+ class Timeline(typing.TypedDict):
87
+ timeline_id: int
88
+ divergence_reason: str
89
+ segments: list[StorySegment]
90
+
91
+ class ChronoWeaveResponse(typing.TypedDict):
92
+ core_theme: str
93
+ timelines: list[Timeline]
94
+ total_scenes_per_timeline: int
95
+
96
+ # --- Helper Functions ---
97
+
98
+ @contextlib.contextmanager
99
+ def wave_file(filename, channels=1, rate=24000, sample_width=2):
100
+ """Context manager to write WAV files."""
101
+ with wave.open(filename, "wb") as wf:
102
+ wf.setnchannels(channels)
103
+ wf.setsampwidth(sample_width)
104
+ wf.setframerate(rate)
105
+ yield wf
106
+
107
+ async def generate_audio_live_async(api_text, output_filename):
108
+ """Generates audio using Gemini Live API (async version)."""
109
+ collected_audio = bytearray()
110
+ st.write(f"πŸŽ™οΈ Generating audio for: '{api_text[:50]}...'") # Log start
111
+
112
+ try:
113
+ # Use the 'client_live' specifically configured for v1alpha
114
+ live_model = client_live.get_model(f"models/gemini-1.5-flash") # Specify model within the live client context
115
+
116
+ config = {
117
+ "response_modalities": ["AUDIO"]
118
+ }
119
+ # Connect to the Live API using the live client.
120
+ async with live_model.connect(config=config) as session:
121
+ await session.send_request([api_text]) # Simpler send for single prompt
122
+ async for response in session.stream_content():
123
+ if response.audio_chunk:
124
+ collected_audio.extend(response.audio_chunk.data)
125
+
126
+ if not collected_audio:
127
+ st.warning(f"⚠️ No audio data received for: '{api_text[:50]}...'")
128
+ return None # Indicate failure
129
+
130
+ audio_bytes = bytes(collected_audio)
131
+ # Write the collected audio bytes into a WAV file.
132
+ with wave_file(output_filename) as wf:
133
+ wf.writeframes(audio_bytes)
134
+ st.write(f" βœ… Audio saved: {os.path.basename(output_filename)}")
135
+ return output_filename
136
+ except Exception as e:
137
+ st.error(f" ❌ Audio generation failed for '{api_text[:50]}...': {e}", icon="🚨")
138
+ return None
139
+
140
+
141
+ def generate_story_sequence_chrono(theme: str, num_scenes: int, num_timelines: int, divergence_prompt: str = "") -> ChronoWeaveResponse | None:
142
+ """Generates branching story sequences using Gemini structured output."""
143
+ st.write(f"πŸ“š Generating {num_timelines} timeline(s) for theme: '{theme}'...")
144
+ divergence_instruction = f"Introduce divergence between timelines. {divergence_prompt}" if divergence_prompt else "Introduce natural points of divergence between timelines after the first scene or two."
145
+
146
+ prompt = f'''
147
+ As an expert narrative designer, create a branching story based on the theme: "{theme}".
148
+ Generate exactly {num_timelines} distinct timelines, each containing exactly {num_scenes} scenes.
149
+ Each scene should be approximately 5-10 seconds long when narrated.
150
+
151
+ {divergence_instruction} Clearly state the reason for divergence for each timeline after the first.
152
+
153
+ For each scene in each timeline, provide:
154
+ - scene_id: An integer starting from 0 for the scene number within its timeline.
155
+ - image_prompt: A concise (15-25 words) description for an image generation model. Focus on visual details, characters (animals/objects only, NO PEOPLE), background, and action. Maintain a consistent 'kids animation style' (e.g., simple, rounded shapes, bright colors) across all scenes and timelines unless specified by a timeline_visual_modifier.
156
+ - audio_text: A single, engaging sentence of narration or dialogue for the scene (max 25 words).
157
+ - character_description: Brief description of recurring characters (names, key features) mentioned in *this specific scene's image prompt*. Keep consistent within a timeline. (Max 30 words).
158
+ - timeline_visual_modifier: (Optional, string or null) A *brief* hint if this timeline should have a slightly different visual feel from this scene onwards (e.g., "slightly darker lighting", "more cluttered background", "character looks worried"). Keep it subtle. Use null if no specific modifier.
159
+
160
+ Constraint: Ensure the output strictly adheres to the following JSON schema. Do not include preamble or explanations outside the JSON structure. Respond ONLY with the JSON object.
161
+
162
+ JSON Schema:
163
+ {{
164
+ "type": "object",
165
+ "properties": {{
166
+ "core_theme": {{"type": "string"}},
167
+ "timelines": {{
168
+ "type": "array",
169
+ "items": {{
170
+ "type": "object",
171
+ "properties": {{
172
+ "timeline_id": {{"type": "integer"}},
173
+ "divergence_reason": {{"type": "string"}},
174
+ "segments": {{
175
+ "type": "array",
176
+ "items": {{
177
+ "type": "object",
178
+ "properties": {{
179
+ "scene_id": {{"type": "integer"}},
180
+ "image_prompt": {{"type": "string"}},
181
+ "audio_text": {{"type": "string"}},
182
+ "character_description": {{"type": "string"}},
183
+ "timeline_visual_modifier": {{"type": ["string", "null"]}}
184
+ }},
185
+ "required": ["scene_id", "image_prompt", "audio_text", "character_description", "timeline_visual_modifier"]
186
+ }}
187
+ }}
188
+ }},
189
+ "required": ["timeline_id", "divergence_reason", "segments"]
190
+ }}
191
+ }},
192
+ "total_scenes_per_timeline": {{"type": "integer"}}
193
+ }},
194
+ "required": ["core_theme", "timelines", "total_scenes_per_timeline"]
195
+ }}
196
+ '''
197
+
198
+ try:
199
+ response = client_standard.generate_content(
200
+ contents=prompt,
201
+ generation_config=genai.types.GenerationConfig(
202
+ response_mime_type="application/json",
203
+ # Optional: Add temperature, etc. if needed
204
+ )
205
+ # The schema can also be passed via generation_config in some versions/models
206
+ # config={
207
+ # 'response_mime_type': 'application/json',
208
+ # 'response_schema': ChronoWeaveResponse # Pass the TypedDict directly
209
+ # }
210
+ )
211
+
212
+ # Debugging: Print raw response text
213
+ # st.text_area("Raw Gemini Response:", response.text, height=200)
214
+
215
+ story_data = json.loads(response.text) # Assuming response.text contains the JSON string
216
+ st.success("βœ… Story structure generated successfully!")
217
+ # Basic validation (can be more thorough)
218
+ if 'timelines' in story_data and isinstance(story_data['timelines'], list):
219
+ # Further validation could check segment structure, etc.
220
+ return story_data # Return the parsed dictionary
221
+ else:
222
+ st.error("🚨 Generated story data is missing the 'timelines' list.", icon="🚨")
223
+ return None
224
+
225
+ except json.JSONDecodeError as e:
226
+ st.error(f"🚨 Failed to decode JSON response from Gemini: {e}", icon="🚨")
227
+ st.text_area("Problematic Response Text:", response.text if 'response' in locals() else "No response object.", height=150)
228
+ return None
229
+ except Exception as e:
230
+ st.error(f"🚨 Error generating story sequence: {e}", icon="🚨")
231
+ # Log the prompt potentially? Be careful with sensitive data if applicable.
232
+ # st.text_area("Failed Prompt:", prompt, height=200)
233
+ return None
234
+
235
+
236
+ def generate_image_imagen(prompt: str, aspect_ratio: str = "1:1") -> Image.Image | None:
237
+ """Generates an image using Imagen."""
238
+ st.write(f"πŸ–ΌοΈ Generating image for: '{prompt[:60]}...'")
239
+ try:
240
+ # Use the standard client's dedicated image generation method
241
+ response = client_standard.generate_content(
242
+ f"Generate an image with the following prompt, ensuring a child-friendly animation style and NO human figures: {prompt}",
243
+ generation_config=genai.types.GenerationConfig(
244
+ candidate_count=1, # Generate one image
245
+ # Imagen specific parameters are often passed differently or rely on model defaults
246
+ # Check documentation for precise Imagen control via the unified API
247
+ ),
248
+ # If the model/API version requires specific image parameters:
249
+ # tools=[genai.ImageParams(model=IMAGE_MODEL_ID, number_of_images=1, aspect_ratio=aspect_ratio, person_generation="DONT_ALLOW")]
250
+ )
251
+
252
+ # Accessing image data might vary slightly depending on API response structure
253
+ # This assumes response.parts contains the image data if successful
254
+ if response.parts and response.parts[0].inline_data:
255
+ image_bytes = response.parts[0].inline_data.data
256
+ image = Image.open(BytesIO(image_bytes))
257
+ st.write(" βœ… Image generated.")
258
+ return image
259
+ else:
260
+ # Check for safety blocks or other reasons for failure
261
+ if response.prompt_feedback.block_reason:
262
+ st.warning(f" ⚠️ Image generation blocked for prompt '{prompt[:60]}...'. Reason: {response.prompt_feedback.block_reason}", icon="⚠️")
263
+ else:
264
+ st.warning(f" ⚠️ No image data received for prompt '{prompt[:60]}...'.", icon="⚠️")
265
+ # Debugging: st.write(response)
266
+ return None
267
+
268
+ except Exception as e:
269
+ st.error(f" ❌ Image generation failed for '{prompt[:60]}...': {e}", icon="🚨")
270
+ return None
271
+
272
+
273
+ # --- Streamlit UI Elements ---
274
+ st.sidebar.header("Configuration")
275
+
276
+ # API Key display/check (already handled above, but sidebar is a good place)
277
+ if GOOGLE_API_KEY:
278
+ st.sidebar.success("Google API Key Loaded!", icon="βœ…")
279
+ else:
280
+ st.sidebar.error("Google API Key Missing!", icon="🚨")
281
+
282
+ theme = st.sidebar.text_input("Story Theme:", "A curious squirrel finds a shiny object")
283
+ num_scenes = st.sidebar.slider("Scenes per Timeline:", min_value=2, max_value=7, value=3)
284
+ num_timelines = st.sidebar.slider("Number of Timelines:", min_value=1, max_value=4, value=2)
285
+ divergence_prompt = st.sidebar.text_input("Divergence Hint (Optional):", placeholder="e.g., What if it started raining?")
286
+ aspect_ratio = st.sidebar.selectbox("Image Aspect Ratio:", ["1:1", "16:9", "9:16"], index=0)
287
+
288
+ generate_button = st.sidebar.button("✨ Generate ChronoWeave ✨", type="primary", disabled=(not GOOGLE_API_KEY))
289
+
290
+ st.sidebar.markdown("---")
291
+ st.sidebar.info("Note: Generation can take several minutes depending on settings.")
292
+
293
+ # --- Main Logic ---
294
+ if generate_button:
295
+ if not theme:
296
+ st.error("Please enter a story theme.", icon="πŸ‘ˆ")
297
+ else:
298
+ # Create a unique temporary directory for this run
299
+ run_id = str(uuid.uuid4())
300
+ temp_dir = os.path.join(".", f"chrono_temp_{run_id}") # Create in current dir
301
+ os.makedirs(temp_dir, exist_ok=True)
302
+ st.write(f"Working directory: {temp_dir}")
303
+
304
+ final_video_paths = {} # To store {timeline_id: video_path}
305
+
306
+ with st.spinner("Generating narrative structure..."):
307
+ chrono_data = generate_story_sequence_chrono(theme, num_scenes, num_timelines, divergence_prompt)
308
+
309
+ if chrono_data and 'timelines' in chrono_data:
310
+ st.success(f"Found {len(chrono_data['timelines'])} timelines. Processing each...")
311
+
312
+ all_timelines_successful = True # Flag to track if all timelines worked
313
+
314
+ # Use st.status for detailed progress
315
+ with st.status("Generating assets and composing videos...", expanded=True) as status:
316
+
317
+ for timeline in chrono_data['timelines']:
318
+ timeline_id = timeline['timeline_id']
319
+ divergence = timeline['divergence_reason']
320
+ segments = timeline['segments']
321
+ st.subheader(f"Timeline {timeline_id}: {divergence}")
322
+
323
+ temp_image_files = []
324
+ temp_audio_files = []
325
+ video_clips = []
326
+ timeline_successful = True # Flag for this specific timeline
327
+
328
+ for i, segment in enumerate(segments):
329
+ status.update(label=f"Processing Timeline {timeline_id}, Scene {i+1}/{num_scenes}...")
330
+ scene_id = segment['scene_id']
331
+ image_prompt = segment['image_prompt']
332
+ audio_text = segment['audio_text']
333
+ char_desc = segment['character_description']
334
+ vis_mod = segment['timeline_visual_modifier']
335
+
336
+ st.write(f"--- Scene {i+1} (T{timeline_id}) ---")
337
+ st.write(f"* **Image Prompt:** {image_prompt}" + (f" (Modifier: {vis_mod})" if vis_mod else ""))
338
+ st.write(f"* **Audio Text:** {audio_text}")
339
+ # st.write(f"* Character Desc: {char_desc}") # Can be verbose
340
+
341
+ # --- Image Generation ---
342
+ combined_prompt = f"{image_prompt} {char_desc}"
343
+ if vis_mod:
344
+ combined_prompt += f" Style hint: {vis_mod}"
345
+
346
+ generated_image = generate_image_imagen(combined_prompt, aspect_ratio)
347
+
348
+ if generated_image:
349
+ image_path = os.path.join(temp_dir, f"t{timeline_id}_s{i}_image.png")
350
+ generated_image.save(image_path)
351
+ temp_image_files.append(image_path)
352
+ st.image(generated_image, width=200) # Show thumbnail
353
+ else:
354
+ st.warning(f"Skipping scene {i+1} in timeline {timeline_id} due to image generation failure.")
355
+ timeline_successful = False
356
+ continue # Skip to next segment if image fails
357
+
358
+ # --- Audio Generation ---
359
+ # Add negative prompt to prevent conversational filler
360
+ audio_negative_prompt = "Narrate the following sentence directly, with expression, without any introduction or closing remarks like 'Okay' or 'Here is the narration'. Just read the sentence:"
361
+ full_audio_prompt = f"{audio_negative_prompt}\n{audio_text}"
362
+ audio_path = os.path.join(temp_dir, f"t{timeline_id}_s{i}_audio.wav")
363
+
364
+ # Run the async audio generation function
365
+ try:
366
+ generated_audio_path = asyncio.run(generate_audio_live_async(full_audio_prompt, audio_path))
367
+ except Exception as e:
368
+ st.error(f"Asyncio error during audio gen: {e}")
369
+ generated_audio_path = None
370
+
371
+
372
+ if generated_audio_path:
373
+ temp_audio_files.append(generated_audio_path)
374
+ # st.audio(generated_audio_path) # Optional: Preview audio
375
+ else:
376
+ st.warning(f"Skipping video clip for scene {i+1} in timeline {timeline_id} due to audio generation failure.")
377
+ # Clean up the image file for this failed scene segment
378
+ if os.path.exists(image_path):
379
+ os.remove(image_path)
380
+ temp_image_files.remove(image_path)
381
+ timeline_successful = False
382
+ continue # Skip making video clip if audio fails
383
+
384
+
385
+ # --- Create Video Clip ---
386
+ try:
387
+ st.write(" 🎬 Creating video clip...")
388
+ audio_clip = AudioFileClip(generated_audio_path)
389
+ # Ensure PIL Image is used if needed, or numpy array directly
390
+ np_image = np.array(Image.open(image_path))
391
+ # Create ImageClip, ensure duration matches audio
392
+ image_clip = ImageClip(np_image).set_duration(audio_clip.duration)
393
+
394
+ # Handle potential size mismatch if needed (resize image_clip or set size explicitly)
395
+ # image_clip = image_clip.resize(width=...)
396
+
397
+ composite_clip = image_clip.set_audio(audio_clip) # Simpler composition
398
+ video_clips.append(composite_clip)
399
+ st.write(" βœ… Clip created.")
400
+ except Exception as e:
401
+ st.error(f" ❌ Failed to create video clip for scene {i+1} (T{timeline_id}): {e}", icon="🚨")
402
+ timeline_successful = False
403
+ # Don't break the whole timeline, just skip this clip maybe? Or mark timeline as failed.
404
+
405
+
406
+ # --- Assemble Timeline Video ---
407
+ if video_clips and timeline_successful: # Only assemble if clips were made and no major errors
408
+ status.update(label=f"Composing final video for Timeline {timeline_id}...")
409
+ st.write(f"🎞️ Assembling final video for Timeline {timeline_id}...")
410
+ try:
411
+ final_timeline_video = concatenate_videoclips(video_clips, method="compose")
412
+ output_filename = os.path.join(temp_dir, f"timeline_{timeline_id}_final_video.mp4")
413
+ # Use 'libx264' for broader compatibility, specify audio codec
414
+ final_timeline_video.write_videofile(output_filename, fps=24, codec='libx264', audio_codec='aac')
415
+ final_video_paths[timeline_id] = output_filename
416
+ st.success(f" βœ… Video for Timeline {timeline_id} saved: {os.path.basename(output_filename)}")
417
+
418
+ # Close clips to release resources
419
+ for clip in video_clips:
420
+ if hasattr(clip, 'close'): clip.close()
421
+ if hasattr(clip, 'audio') and hasattr(clip.audio, 'close'): clip.audio.close()
422
+ if hasattr(final_timeline_video, 'close'): final_timeline_video.close()
423
+
424
+
425
+ except Exception as e:
426
+ st.error(f" ❌ Failed to write final video for Timeline {timeline_id}: {e}", icon="🚨")
427
+ all_timelines_successful = False
428
+ elif not video_clips:
429
+ st.warning(f"No video clips were successfully generated for Timeline {timeline_id}. Skipping final video assembly.")
430
+ all_timelines_successful = False
431
+ else:
432
+ st.warning(f"Timeline {timeline_id} encountered errors. Skipping final video assembly.")
433
+ all_timelines_successful = False
434
+
435
+
436
+ # Intermediate cleanup for the timeline (optional, helps manage files)
437
+ # for file in temp_audio_files:
438
+ # if os.path.exists(file): os.remove(file)
439
+ # for file in temp_image_files:
440
+ # if os.path.exists(file): os.remove(file)
441
+
442
+ # Final status update
443
+ if all_timelines_successful and final_video_paths:
444
+ status.update(label="ChronoWeave Generation Complete!", state="complete", expanded=False)
445
+ elif final_video_paths:
446
+ status.update(label="ChronoWeave Generation Partially Complete (some errors occurred).", state="warning", expanded=False)
447
+ else:
448
+ status.update(label="ChronoWeave Generation Failed.", state="error", expanded=False)
449
+
450
+
451
+ # --- Display Results ---
452
+ st.header("Generated Timelines")
453
+ if final_video_paths:
454
+ sorted_timeline_ids = sorted(final_video_paths.keys())
455
+ for timeline_id in sorted_timeline_ids:
456
+ video_path = final_video_paths[timeline_id]
457
+ # Find matching timeline divergence reason
458
+ reason = "Unknown"
459
+ for t in chrono_data.get('timelines', []):
460
+ if t.get('timeline_id') == timeline_id:
461
+ reason = t.get('divergence_reason', 'N/A')
462
+ break
463
+ st.subheader(f"Timeline {timeline_id}: {reason}")
464
+ try:
465
+ video_file = open(video_path, 'rb')
466
+ video_bytes = video_file.read()
467
+ st.video(video_bytes)
468
+ video_file.close()
469
+ except FileNotFoundError:
470
+ st.error(f"Could not find video file: {video_path}", icon="🚨")
471
+ except Exception as e:
472
+ st.error(f"Could not display video {video_path}: {e}", icon="🚨")
473
+ else:
474
+ st.warning("No final videos were successfully generated.")
475
+
476
+ # --- Cleanup ---
477
+ st.write("Cleaning up temporary files...")
478
+ try:
479
+ shutil.rmtree(temp_dir)
480
+ st.write(" βœ… Temporary files removed.")
481
+ except Exception as e:
482
+ st.warning(f" ⚠️ Could not remove temporary directory {temp_dir}: {e}", icon="⚠️")
483
+
484
+
485
+ elif not chrono_data:
486
+ st.error("Story generation failed. Cannot proceed.", icon="πŸ›‘")
487
+ else:
488
+ # This case might happen if chrono_data is returned but is malformed (e.g., no 'timelines' key)
489
+ st.error("Story data seems malformed. Cannot proceed.", icon="πŸ›‘")
490
+ # st.json(chrono_data) # Display the problematic data
491
+
492
+ else:
493
+ st.info("Configure settings in the sidebar and click 'Generate ChronoWeave'")