mgbam commited on
Commit
17235f9
Β·
verified Β·
1 Parent(s): 66aa79d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -89
app.py CHANGED
@@ -27,20 +27,20 @@ from pydantic import BaseModel, Field, ValidationError, field_validator, model_v
27
  # Video and audio processing
28
  from moviepy.editor import ImageClip, AudioFileClip, concatenate_videoclips
29
 
30
- # Google generative API and async patch
31
  import google.generativeai as genai
32
  import nest_asyncio
33
- nest_asyncio.apply() # Make asyncio work in Streamlit/Jupyter
34
 
35
  # --- Logging Setup ---
36
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
37
  logger = logging.getLogger(__name__)
38
 
39
  # --- Constants & Configurations ---
40
- TEXT_MODEL_ID = "models/gemini-1.5-flash" # Alternatively "gemini-1.5-pro"
41
- AUDIO_MODEL_ID = "models/gemini-1.5-flash" # Audio generation uses the text model for now
42
  AUDIO_SAMPLING_RATE = 24000
43
- IMAGE_MODEL_ID = "imagen-3" # NOTE: Requires Vertex AI SDK update for production
44
  DEFAULT_ASPECT_RATIO = "1:1"
45
  VIDEO_FPS = 24
46
  VIDEO_CODEC = "libx264"
@@ -60,7 +60,7 @@ class StorySegment(BaseModel):
60
  @classmethod
61
  def image_prompt_no_humans(cls, v: str) -> str:
62
  if any(word in v.lower() for word in ["person", "people", "human", "man", "woman", "boy", "girl", "child"]):
63
- logger.warning(f"Image prompt '{v[:50]}...' may contain human-related descriptors.")
64
  return v
65
 
66
 
@@ -78,16 +78,18 @@ class ChronoWeaveResponse(BaseModel):
78
  @model_validator(mode="after")
79
  def check_timeline_segment_count(self) -> "ChronoWeaveResponse":
80
  expected = self.total_scenes_per_timeline
81
- for i, t in enumerate(self.timelines):
82
- if len(t.segments) != expected:
83
- raise ValueError(f"Timeline {i} (ID: {t.timeline_id}): Expected {expected} segments, got {len(t.segments)}.")
84
  return self
85
 
86
 
87
  # --- Helper Functions ---
88
  @contextlib.contextmanager
89
  def wave_file_writer(filename: str, channels: int = 1, rate: int = AUDIO_SAMPLING_RATE, sample_width: int = 2):
90
- """Safely writes a WAV file using a context manager."""
 
 
91
  wf = None
92
  try:
93
  wf = wave.open(filename, "wb")
@@ -109,8 +111,8 @@ def wave_file_writer(filename: str, channels: int = 1, rate: int = AUDIO_SAMPLIN
109
  # --- ChronoWeave Generator Class ---
110
  class ChronoWeaveGenerator:
111
  """
112
- Encapsulates the logic for generating branching narratives, processing assets (audio, image)
113
- and assembling final videos.
114
  """
115
 
116
  def __init__(self, api_key: str):
@@ -134,7 +136,7 @@ class ChronoWeaveGenerator:
134
  """
135
  Generates a story structure as JSON using the text model and validates it via Pydantic.
136
  """
137
- st.info(f"Generating {num_timelines} timeline(s) each with {num_scenes} scene(s) for theme: '{theme}'")
138
  logger.info(f"Story generation request: Theme='{theme}', Timelines={num_timelines}, Scenes={num_scenes}")
139
 
140
  divergence_instruction = (
@@ -150,7 +152,7 @@ Instructions:
150
  4. {divergence_instruction}
151
  5. Style: **'Simple, friendly kids animation, bright colors, rounded shapes'** unless modified by `timeline_visual_modifier`.
152
  6. `audio_text`: One concise sentence (max 30 words).
153
- 7. `image_prompt`: Descriptive prompt (15–35 words) that emphasizes scene elements. **Avoid repeating general style.**
154
  8. `character_description`: Very brief (name and features; < 20 words).
155
 
156
  Output only a valid JSON object conforming exactly to this schema:
@@ -185,61 +187,55 @@ JSON Schema: ```json
185
 
186
  async def generate_audio(self, text: str, output_filename: str, voice: Optional[str] = None) -> Optional[str]:
187
  """
188
- Asynchronously generates audio using the Gemini Live API.
 
189
  """
190
  task_id = os.path.basename(output_filename).split(".")[0]
191
- collected_audio = bytearray()
192
  logger.info(f"πŸŽ™οΈ [{task_id}] Generating audio for text: '{text[:60]}...'")
 
193
  try:
194
- config = {
195
- "response_modalities": ["AUDIO"],
196
- "audio_config": {"audio_encoding": "LINEAR16", "sample_rate_hertz": AUDIO_SAMPLING_RATE},
197
- }
198
- directive = f"Narrate directly: \"{text}\""
199
- async with self.client_audio.connect(config=config) as session:
200
- await session.send_request([directive])
201
- async for response in session.stream_content():
202
- if response.audio_chunk and response.audio_chunk.data:
203
- collected_audio.extend(response.audio_chunk.data)
204
- if hasattr(response, "error") and response.error:
205
- logger.error(f"❌ [{task_id}] Audio error: {response.error}")
206
- st.error(f"Audio stream error {task_id}: {response.error}", icon="πŸ”Š")
207
- return None
208
-
209
- if not collected_audio:
210
- logger.warning(f"⚠️ [{task_id}] No audio data received.")
211
- st.warning(f"No audio data for {task_id}.", icon="πŸ”Š")
 
 
212
  return None
213
 
 
214
  with wave_file_writer(output_filename) as wf:
215
- wf.writeframes(bytes(collected_audio))
216
- logger.info(f"βœ… [{task_id}] Audio saved: {os.path.basename(output_filename)} ({len(collected_audio)} bytes)")
217
  return output_filename
218
 
219
- except genai.types.generation_types.BlockedPromptException as bpe:
220
- logger.error(f"❌ [{task_id}] Audio blocked: {bpe}")
221
- st.error(f"Audio blocked for {task_id}.", icon="πŸ”‡")
222
  except Exception as e:
223
- logger.exception(f"❌ [{task_id}] Audio generation failed: {e}")
224
  st.error(f"Audio generation failed for {task_id}: {e}", icon="πŸ”Š")
225
- return None
226
 
227
  async def generate_image_async(self, prompt: str, aspect_ratio: str, task_id: str) -> Optional[Image.Image]:
228
  """
229
- Wraps the synchronous image generation function in a thread pool to allow asynchronous invocation.
230
- Currently, this function is a stub pending Vertex AI SDK integration.
231
  """
232
- loop = asyncio.get_event_loop()
233
  logger.info(f"πŸ–ΌοΈ [{task_id}] Requesting image for prompt: '{prompt[:70]}...' (Aspect Ratio: {aspect_ratio})")
234
- # Placeholder: the real implementation would call a Vertex AI SDK function.
235
- def gen_image():
236
- logger.error(f"❌ [{task_id}] Image generation not implemented. Update required for Vertex AI.")
237
- return None
238
-
239
- image_result = await loop.run_in_executor(None, gen_image)
240
- if image_result is None:
241
- st.error(f"Image generation for {task_id} skipped: Requires Vertex AI SDK implementation.", icon="πŸ–ΌοΈ")
242
- return image_result
243
 
244
  async def process_scene(
245
  self,
@@ -250,7 +246,8 @@ JSON Schema: ```json
250
  audio_voice: Optional[str] = None,
251
  ) -> Tuple[Optional[str], Optional[str], Optional[Any], List[str]]:
252
  """
253
- Processes a single scene: generates image and audio concurrently, creates a video clip if both succeed.
 
254
  Returns a tuple of (image_path, audio_path, video_clip, [error messages]).
255
  """
256
  errors: List[str] = []
@@ -260,16 +257,18 @@ JSON Schema: ```json
260
  video_clip = None
261
 
262
  # Launch image and audio generation concurrently.
263
- image_future = asyncio.create_task(self.generate_image_async(
264
- prompt=f"{segment.image_prompt} Featuring: {segment.character_description} {'Style hint: ' + segment.timeline_visual_modifier if segment.timeline_visual_modifier else ''}",
265
- aspect_ratio=aspect_ratio,
266
- task_id=task_id,
267
- ))
 
 
 
268
  audio_future = asyncio.create_task(self.generate_audio(segment.audio_text, audio_path, audio_voice))
269
 
270
  image_result, audio_result = await asyncio.gather(image_future, audio_future)
271
 
272
- # Handle image result (if available, save and preview)
273
  if image_result:
274
  try:
275
  image_result.save(image_path)
@@ -280,7 +279,6 @@ JSON Schema: ```json
280
  else:
281
  errors.append(f"Scene {segment.scene_id + 1}: Image generation failed.")
282
 
283
- # Handle audio result and preview
284
  if audio_result:
285
  try:
286
  with open(audio_result, "rb") as ap:
@@ -290,7 +288,6 @@ JSON Schema: ```json
290
  else:
291
  errors.append(f"Scene {segment.scene_id + 1}: Audio generation failed.")
292
 
293
- # Create video clip if both image and audio exist.
294
  if not errors and os.path.exists(image_path) and os.path.exists(audio_path):
295
  try:
296
  audio_clip = AudioFileClip(audio_path)
@@ -302,7 +299,6 @@ JSON Schema: ```json
302
  logger.exception(f"❌ [{task_id}] Failed to create video clip: {e}")
303
  errors.append(f"Scene {segment.scene_id + 1}: Video clip creation failed.")
304
  finally:
305
- # Cleanup moviepy instances.
306
  try:
307
  if 'audio_clip' in locals():
308
  audio_clip.close()
@@ -311,10 +307,12 @@ JSON Schema: ```json
311
  except Exception:
312
  pass
313
 
314
- return (image_path if os.path.exists(image_path) else None,
315
- audio_path if os.path.exists(audio_path) else None,
316
- video_clip,
317
- errors)
 
 
318
 
319
  async def process_timeline(
320
  self,
@@ -324,9 +322,9 @@ JSON Schema: ```json
324
  audio_voice: Optional[str] = None,
325
  ) -> Tuple[Optional[str], List[str]]:
326
  """
327
- Processes an entire timeline by concurrently processing all scenes,
328
- and then assembling a final video if all scenes succeed.
329
- Returns the final video path and a list of error messages.
330
  """
331
  timeline_id = timeline.timeline_id
332
  scene_tasks = [
@@ -342,20 +340,14 @@ JSON Schema: ```json
342
  if clip is not None:
343
  video_clips.append(clip)
344
 
345
- # Assemble the timeline video only if every scene produced a valid clip.
346
  if video_clips and len(video_clips) == len(timeline.segments):
347
  output_filename = os.path.join(temp_dir, f"timeline_{timeline_id}_final.mp4")
348
  try:
349
  final_video = concatenate_videoclips(video_clips, method="compose")
350
  final_video.write_videofile(
351
- output_filename,
352
- fps=VIDEO_FPS,
353
- codec=VIDEO_CODEC,
354
- audio_codec=AUDIO_CODEC,
355
- logger=None
356
  )
357
  logger.info(f"βœ… Timeline {timeline_id} video saved: {output_filename}")
358
- # Cleanup the clips.
359
  for clip in video_clips:
360
  clip.close()
361
  final_video.close()
@@ -370,7 +362,7 @@ JSON Schema: ```json
370
 
371
  # --- Streamlit UI and Main Process ---
372
  def main():
373
- # --- API Key Retrieval ---
374
  GOOGLE_API_KEY: Optional[str] = None
375
  try:
376
  GOOGLE_API_KEY = st.secrets["GOOGLE_API_KEY"]
@@ -383,7 +375,6 @@ def main():
383
  st.error("🚨 **Google API Key Not Found!** Please configure it.", icon="🚨")
384
  st.stop()
385
 
386
- # --- UI Configuration ---
387
  st.set_page_config(page_title="ChronoWeave", layout="wide", initial_sidebar_state="expanded")
388
  st.title("πŸŒ€ ChronoWeave: Advanced Branching Narrative Generator")
389
  st.markdown("""
@@ -415,7 +406,6 @@ def main():
415
  st.error("Please enter a story theme.", icon="πŸ‘ˆ")
416
  return
417
 
418
- # Create a unique temporary directory for this run
419
  run_id = str(uuid.uuid4()).split('-')[0]
420
  temp_dir = os.path.join(TEMP_DIR_BASE, f"run_{run_id}")
421
  try:
@@ -425,7 +415,7 @@ def main():
425
  st.error(f"🚨 Failed to create temporary directory {temp_dir}: {e}", icon="πŸ“‚")
426
  st.stop()
427
 
428
- # Instantiate the ChronoWeave generator
429
  generator = ChronoWeaveGenerator(GOOGLE_API_KEY)
430
  chrono_response = None
431
  with st.spinner("Generating narrative structure... πŸ€”"):
@@ -440,30 +430,29 @@ def main():
440
  generation_errors: Dict[int, List[str]] = {}
441
 
442
  async def process_all_timelines():
443
- timeline_tasks = {}
444
- for timeline in chrono_response.timelines:
445
- timeline_tasks[timeline.timeline_id] = asyncio.create_task(
446
  generator.process_timeline(timeline, temp_dir, aspect_ratio, audio_voice)
447
  )
448
- return await asyncio.gather(*timeline_tasks.values(), return_exceptions=False)
 
 
 
449
 
450
  with st.spinner("Processing scenes and assembling videos..."):
451
  timeline_results = asyncio.run(process_all_timelines())
452
 
453
- # Collect results per timeline.
454
  for timeline, (video_path, errors) in zip(chrono_response.timelines, timeline_results):
455
  generation_errors[timeline.timeline_id] = errors
456
  if video_path:
457
  final_video_paths[timeline.timeline_id] = video_path
458
 
459
  overall_duration = time.time() - overall_start_time
460
- # Display status messages
461
  if final_video_paths:
462
  st.success(f"Complete! ({len(final_video_paths)} video(s) created in {overall_duration:.2f}s)")
463
  else:
464
  st.error(f"Failed. No final videos generated in {overall_duration:.2f}s")
465
 
466
- # --- Display Final Videos ---
467
  st.header("🎬 Generated Timelines")
468
  if final_video_paths:
469
  sorted_ids = sorted(final_video_paths.keys())
@@ -506,7 +495,6 @@ def main():
506
  for msg in errs:
507
  st.error(f" - {msg}")
508
 
509
- # --- Cleanup ---
510
  st.info(f"Cleaning up temporary files: {temp_dir}")
511
  try:
512
  shutil.rmtree(temp_dir)
@@ -518,5 +506,6 @@ def main():
518
  else:
519
  st.info("Configure settings and click '✨ Generate ChronoWeave ✨' to start.")
520
 
 
521
  if __name__ == "__main__":
522
  main()
 
27
  # Video and audio processing
28
  from moviepy.editor import ImageClip, AudioFileClip, concatenate_videoclips
29
 
30
+ # Google Generative AI library and async patch
31
  import google.generativeai as genai
32
  import nest_asyncio
33
+ nest_asyncio.apply() # Ensure asyncio works correctly in Streamlit/Jupyter
34
 
35
  # --- Logging Setup ---
36
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
37
  logger = logging.getLogger(__name__)
38
 
39
  # --- Constants & Configurations ---
40
+ TEXT_MODEL_ID = "models/gemini-1.5-flash" # Alternatively "gemini-1.5-pro"
41
+ AUDIO_MODEL_ID = "models/gemini-1.5-flash" # Synchronous generation for audio now
42
  AUDIO_SAMPLING_RATE = 24000
43
+ IMAGE_MODEL_ID = "imagen-3" # NOTE: Requires Vertex AI SDK integration in the future
44
  DEFAULT_ASPECT_RATIO = "1:1"
45
  VIDEO_FPS = 24
46
  VIDEO_CODEC = "libx264"
 
60
  @classmethod
61
  def image_prompt_no_humans(cls, v: str) -> str:
62
  if any(word in v.lower() for word in ["person", "people", "human", "man", "woman", "boy", "girl", "child"]):
63
+ logger.warning(f"Image prompt '{v[:50]}...' may include human-related descriptions.")
64
  return v
65
 
66
 
 
78
  @model_validator(mode="after")
79
  def check_timeline_segment_count(self) -> "ChronoWeaveResponse":
80
  expected = self.total_scenes_per_timeline
81
+ for i, timeline in enumerate(self.timelines):
82
+ if len(timeline.segments) != expected:
83
+ raise ValueError(f"Timeline {i} (ID: {timeline.timeline_id}): Expected {expected} segments, got {len(timeline.segments)}.")
84
  return self
85
 
86
 
87
  # --- Helper Functions ---
88
  @contextlib.contextmanager
89
  def wave_file_writer(filename: str, channels: int = 1, rate: int = AUDIO_SAMPLING_RATE, sample_width: int = 2):
90
+ """
91
+ Safely writes a WAV file using a context manager.
92
+ """
93
  wf = None
94
  try:
95
  wf = wave.open(filename, "wb")
 
111
  # --- ChronoWeave Generator Class ---
112
  class ChronoWeaveGenerator:
113
  """
114
+ Encapsulates the logic for generating branching narratives,
115
+ processing audio, images, and assembling video outputs.
116
  """
117
 
118
  def __init__(self, api_key: str):
 
136
  """
137
  Generates a story structure as JSON using the text model and validates it via Pydantic.
138
  """
139
+ st.info(f"Generating {num_timelines} timeline(s) with {num_scenes} scene(s) for theme: '{theme}'")
140
  logger.info(f"Story generation request: Theme='{theme}', Timelines={num_timelines}, Scenes={num_scenes}")
141
 
142
  divergence_instruction = (
 
152
  4. {divergence_instruction}
153
  5. Style: **'Simple, friendly kids animation, bright colors, rounded shapes'** unless modified by `timeline_visual_modifier`.
154
  6. `audio_text`: One concise sentence (max 30 words).
155
+ 7. `image_prompt`: Descriptive prompt (15–35 words) emphasizing scene elements. **Avoid repeating general style.**
156
  8. `character_description`: Very brief (name and features; < 20 words).
157
 
158
  Output only a valid JSON object conforming exactly to this schema:
 
187
 
188
  async def generate_audio(self, text: str, output_filename: str, voice: Optional[str] = None) -> Optional[str]:
189
  """
190
+ Asynchronously generates audio by wrapping the synchronous generate_content call.
191
+ The call is executed using asyncio.to_thread to avoid blocking.
192
  """
193
  task_id = os.path.basename(output_filename).split(".")[0]
 
194
  logger.info(f"πŸŽ™οΈ [{task_id}] Generating audio for text: '{text[:60]}...'")
195
+
196
  try:
197
+ # Define a synchronous function for audio generation.
198
+ def sync_generate_audio():
199
+ prompt = f"Narrate directly: \"{text}\""
200
+ response = self.client_audio.generate_content(
201
+ contents=prompt,
202
+ generation_config=genai.types.GenerationConfig(
203
+ response_mime_type="application/octet-stream",
204
+ temperature=0.7,
205
+ audio_config={"audio_encoding": "LINEAR16", "sample_rate_hertz": AUDIO_SAMPLING_RATE}
206
+ )
207
+ )
208
+ return response
209
+
210
+ # Execute the synchronous call in a separate thread.
211
+ response = await asyncio.to_thread(sync_generate_audio)
212
+
213
+ # Process the response. Adjust as necessary based on the API’s actual response structure.
214
+ if not response or not hasattr(response, "audio_chunk") or not response.audio_chunk.data:
215
+ logger.error(f"❌ [{task_id}] No audio data returned.")
216
+ st.error(f"Audio generation failed for {task_id}: No audio data.", icon="πŸ”Š")
217
  return None
218
 
219
+ audio_data = response.audio_chunk.data
220
  with wave_file_writer(output_filename) as wf:
221
+ wf.writeframes(audio_data)
222
+ logger.info(f"βœ… [{task_id}] Audio saved: {os.path.basename(output_filename)} ({len(audio_data)} bytes)")
223
  return output_filename
224
 
 
 
 
225
  except Exception as e:
226
+ logger.exception(f"❌ [{task_id}] Audio generation error: {e}")
227
  st.error(f"Audio generation failed for {task_id}: {e}", icon="πŸ”Š")
228
+ return None
229
 
230
  async def generate_image_async(self, prompt: str, aspect_ratio: str, task_id: str) -> Optional[Image.Image]:
231
  """
232
+ Placeholder for image generation.
233
+ Currently logs an error and returns None. Update this function once integrating Vertex AI SDK.
234
  """
 
235
  logger.info(f"πŸ–ΌοΈ [{task_id}] Requesting image for prompt: '{prompt[:70]}...' (Aspect Ratio: {aspect_ratio})")
236
+ logger.error(f"❌ [{task_id}] Image generation not implemented. Update required for Vertex AI.")
237
+ st.error(f"Image generation for {task_id} skipped: Requires Vertex AI SDK implementation.", icon="πŸ–ΌοΈ")
238
+ return None
 
 
 
 
 
 
239
 
240
  async def process_scene(
241
  self,
 
246
  audio_voice: Optional[str] = None,
247
  ) -> Tuple[Optional[str], Optional[str], Optional[Any], List[str]]:
248
  """
249
+ Processes a single scene: concurrently generates image and audio,
250
+ and then creates a video clip if both outputs are available.
251
  Returns a tuple of (image_path, audio_path, video_clip, [error messages]).
252
  """
253
  errors: List[str] = []
 
257
  video_clip = None
258
 
259
  # Launch image and audio generation concurrently.
260
+ image_future = asyncio.create_task(
261
+ self.generate_image_async(
262
+ prompt=f"{segment.image_prompt} Featuring: {segment.character_description} " +
263
+ (f"Style hint: {segment.timeline_visual_modifier}" if segment.timeline_visual_modifier else ""),
264
+ aspect_ratio=aspect_ratio,
265
+ task_id=task_id,
266
+ )
267
+ )
268
  audio_future = asyncio.create_task(self.generate_audio(segment.audio_text, audio_path, audio_voice))
269
 
270
  image_result, audio_result = await asyncio.gather(image_future, audio_future)
271
 
 
272
  if image_result:
273
  try:
274
  image_result.save(image_path)
 
279
  else:
280
  errors.append(f"Scene {segment.scene_id + 1}: Image generation failed.")
281
 
 
282
  if audio_result:
283
  try:
284
  with open(audio_result, "rb") as ap:
 
288
  else:
289
  errors.append(f"Scene {segment.scene_id + 1}: Audio generation failed.")
290
 
 
291
  if not errors and os.path.exists(image_path) and os.path.exists(audio_path):
292
  try:
293
  audio_clip = AudioFileClip(audio_path)
 
299
  logger.exception(f"❌ [{task_id}] Failed to create video clip: {e}")
300
  errors.append(f"Scene {segment.scene_id + 1}: Video clip creation failed.")
301
  finally:
 
302
  try:
303
  if 'audio_clip' in locals():
304
  audio_clip.close()
 
307
  except Exception:
308
  pass
309
 
310
+ return (
311
+ image_path if os.path.exists(image_path) else None,
312
+ audio_path if os.path.exists(audio_path) else None,
313
+ video_clip,
314
+ errors,
315
+ )
316
 
317
  async def process_timeline(
318
  self,
 
322
  audio_voice: Optional[str] = None,
323
  ) -> Tuple[Optional[str], List[str]]:
324
  """
325
+ Processes an entire timeline by concurrently processing all its scenes,
326
+ then assembling a final video if every scene produced a valid clip.
327
+ Returns a tuple of (final video path, list of error messages).
328
  """
329
  timeline_id = timeline.timeline_id
330
  scene_tasks = [
 
340
  if clip is not None:
341
  video_clips.append(clip)
342
 
 
343
  if video_clips and len(video_clips) == len(timeline.segments):
344
  output_filename = os.path.join(temp_dir, f"timeline_{timeline_id}_final.mp4")
345
  try:
346
  final_video = concatenate_videoclips(video_clips, method="compose")
347
  final_video.write_videofile(
348
+ output_filename, fps=VIDEO_FPS, codec=VIDEO_CODEC, audio_codec=AUDIO_CODEC, logger=None
 
 
 
 
349
  )
350
  logger.info(f"βœ… Timeline {timeline_id} video saved: {output_filename}")
 
351
  for clip in video_clips:
352
  clip.close()
353
  final_video.close()
 
362
 
363
  # --- Streamlit UI and Main Process ---
364
  def main():
365
+ # API Key Retrieval
366
  GOOGLE_API_KEY: Optional[str] = None
367
  try:
368
  GOOGLE_API_KEY = st.secrets["GOOGLE_API_KEY"]
 
375
  st.error("🚨 **Google API Key Not Found!** Please configure it.", icon="🚨")
376
  st.stop()
377
 
 
378
  st.set_page_config(page_title="ChronoWeave", layout="wide", initial_sidebar_state="expanded")
379
  st.title("πŸŒ€ ChronoWeave: Advanced Branching Narrative Generator")
380
  st.markdown("""
 
406
  st.error("Please enter a story theme.", icon="πŸ‘ˆ")
407
  return
408
 
 
409
  run_id = str(uuid.uuid4()).split('-')[0]
410
  temp_dir = os.path.join(TEMP_DIR_BASE, f"run_{run_id}")
411
  try:
 
415
  st.error(f"🚨 Failed to create temporary directory {temp_dir}: {e}", icon="πŸ“‚")
416
  st.stop()
417
 
418
+ # Instantiate ChronoWeaveGenerator and generate story structure.
419
  generator = ChronoWeaveGenerator(GOOGLE_API_KEY)
420
  chrono_response = None
421
  with st.spinner("Generating narrative structure... πŸ€”"):
 
430
  generation_errors: Dict[int, List[str]] = {}
431
 
432
  async def process_all_timelines():
433
+ timeline_tasks = {
434
+ timeline.timeline_id: asyncio.create_task(
 
435
  generator.process_timeline(timeline, temp_dir, aspect_ratio, audio_voice)
436
  )
437
+ for timeline in chrono_response.timelines
438
+ }
439
+ results = await asyncio.gather(*timeline_tasks.values(), return_exceptions=False)
440
+ return results
441
 
442
  with st.spinner("Processing scenes and assembling videos..."):
443
  timeline_results = asyncio.run(process_all_timelines())
444
 
 
445
  for timeline, (video_path, errors) in zip(chrono_response.timelines, timeline_results):
446
  generation_errors[timeline.timeline_id] = errors
447
  if video_path:
448
  final_video_paths[timeline.timeline_id] = video_path
449
 
450
  overall_duration = time.time() - overall_start_time
 
451
  if final_video_paths:
452
  st.success(f"Complete! ({len(final_video_paths)} video(s) created in {overall_duration:.2f}s)")
453
  else:
454
  st.error(f"Failed. No final videos generated in {overall_duration:.2f}s")
455
 
 
456
  st.header("🎬 Generated Timelines")
457
  if final_video_paths:
458
  sorted_ids = sorted(final_video_paths.keys())
 
495
  for msg in errs:
496
  st.error(f" - {msg}")
497
 
 
498
  st.info(f"Cleaning up temporary files: {temp_dir}")
499
  try:
500
  shutil.rmtree(temp_dir)
 
506
  else:
507
  st.info("Configure settings and click '✨ Generate ChronoWeave ✨' to start.")
508
 
509
+
510
  if __name__ == "__main__":
511
  main()