mgbam commited on
Commit
7ce8888
Β·
verified Β·
1 Parent(s): 33efea0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +135 -76
app.py CHANGED
@@ -21,54 +21,42 @@ initialize_image_llms()
21
 
22
  # --- Get API Readiness Status ---
23
  GEMINI_TEXT_IS_READY = is_gemini_text_ready()
24
- HF_TEXT_IS_READY = is_hf_text_ready() # For text fallback
25
  DALLE_IMAGE_IS_READY = is_dalle_ready()
26
- HF_IMAGE_IS_READY = is_hf_image_api_ready() # For image fallback
27
 
28
  # --- Application Configuration (Models, Defaults) ---
29
  TEXT_MODELS = {}
30
  UI_DEFAULT_TEXT_MODEL_KEY = None
31
-
32
- # Define your Gemini Preview Model IDs here (REPLACE PLACEHOLDERS WITH ACTUAL IDs IF DIFFERENT)
33
- GEMINI_2_5_PRO_PREVIEW_ID = "gemini-2.5-pro-preview-05-06" # From your screenshot
34
- GEMINI_2_5_FLASH_PREVIEW_ID = "gemini-2.5-flash-preview-04-17" # From your screenshot
35
-
36
  if GEMINI_TEXT_IS_READY:
37
- # Add the new Preview models with high priority
38
- TEXT_MODELS[f"πŸš€ Gemini 2.5 Pro Preview (Narrate)"] = {"id": GEMINI_2_5_PRO_PREVIEW_ID, "type": "gemini"}
39
- TEXT_MODELS[f"⚑ Gemini 2.5 Flash Preview (Narrate)"] = {"id": GEMINI_2_5_FLASH_PREVIEW_ID, "type": "gemini"}
40
  TEXT_MODELS["✨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
41
  TEXT_MODELS["Legacy Gemini 1.0 Pro (Narrate)"] = {"id": "gemini-1.0-pro-latest", "type": "gemini"}
42
-
43
- # Set the default to your newest preferred model if available
44
- if f"πŸš€ Gemini 2.5 Pro Preview (Narrate)" in TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = f"πŸš€ Gemini 2.5 Pro Preview (Narrate)"
45
- elif f"⚑ Gemini 2.5 Flash Preview (Narrate)" in TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = f"⚑ Gemini 2.5 Flash Preview (Narrate)"
46
- elif "✨ Gemini 1.5 Flash (Narrate)" in TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = "✨ Gemini 1.5 Flash (Narrate)"
47
- else: UI_DEFAULT_TEXT_MODEL_KEY = "Legacy Gemini 1.0 Pro (Narrate)" # Further fallback
48
- print(f"INFO: app.py - Gemini text models populated. Default set to: {UI_DEFAULT_TEXT_MODEL_KEY}")
49
-
50
- elif HF_TEXT_IS_READY: # Fallback to HF if no Gemini is ready
51
- TEXT_MODELS["Mistral 7B (Narrate via HF - Fallback)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
52
- TEXT_MODELS["Gemma 2B (Narrate via HF - Fallback)"] = {"id": "google/gemma-2b-it", "type": "hf_text"}
53
- UI_DEFAULT_TEXT_MODEL_KEY = "Mistral 7B (Narrate via HF - Fallback)"
54
- print(f"INFO: app.py - HF text models populated as fallback. Default set to: {UI_DEFAULT_TEXT_MODEL_KEY}")
55
-
56
- if not TEXT_MODELS: # If neither is ready
57
  TEXT_MODELS["No Text Models Configured"] = {"id": "dummy_text_error", "type": "none"}
58
  UI_DEFAULT_TEXT_MODEL_KEY = "No Text Models Configured"
59
 
60
-
61
  IMAGE_PROVIDERS = {}
62
  UI_DEFAULT_IMAGE_PROVIDER_KEY = None
63
  if DALLE_IMAGE_IS_READY:
64
  IMAGE_PROVIDERS["πŸ–ΌοΈ OpenAI DALL-E 3"] = "dalle_3"
65
  IMAGE_PROVIDERS["πŸ–ΌοΈ OpenAI DALL-E 2 (Legacy)"] = "dalle_2"
66
  UI_DEFAULT_IMAGE_PROVIDER_KEY = "πŸ–ΌοΈ OpenAI DALL-E 3"
67
- elif HF_IMAGE_IS_READY:
68
- IMAGE_PROVIDERS["🎑 HF - Stable Diffusion XL Base (Fallback)"] = "hf_sdxl_base"
69
- IMAGE_PROVIDERS["🎠 HF - OpenJourney (Fallback)"] = "hf_openjourney"
70
- IMAGE_PROVIDERS["🌌 HF - Stable Diffusion v1.5 (Fallback)"] = "hf_sd_1_5"
71
- UI_DEFAULT_IMAGE_PROVIDER_KEY = "🎑 HF - Stable Diffusion XL Base (Fallback)"
72
 
73
  if not IMAGE_PROVIDERS:
74
  IMAGE_PROVIDERS["No Image Providers Configured"] = "none"
@@ -78,13 +66,53 @@ elif not UI_DEFAULT_IMAGE_PROVIDER_KEY and IMAGE_PROVIDERS :
78
 
79
 
80
  # --- Gradio UI Theme and CSS ---
81
- # (omega_theme and omega_css definitions remain THE SAME as the last full app.py version)
82
- omega_theme = gr.themes.Base(font=[gr.themes.GoogleFont("Lexend Deca")], primary_hue=gr.themes.colors.purple, secondary_hue=gr.themes.colors.pink, neutral_hue=gr.themes.colors.slate).set(body_background_fill="#0F0F1A", block_background_fill="#1A1A2E", block_border_width="1px", block_border_color="#2A2A4A", block_label_background_fill="#2A2A4A", input_background_fill="#2A2A4A", input_border_color="#4A4A6A", button_primary_background_fill="linear-gradient(135deg, #7F00FF 0%, #E100FF 100%)", button_primary_text_color="white", button_secondary_background_fill="#4A4A6A", button_secondary_text_color="#E0E0FF", slider_color="#A020F0")
83
- omega_css = """ /* ... Paste your full omega_css string here ... */ """ # Make sure this is complete
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
  # --- Helper: Placeholder Image Creation ---
86
  def create_placeholder_image(text="Processing...", size=(512, 512), color="#23233A", text_color="#E0E0FF"):
87
- # ... (Full implementation as before)
88
  img = Image.new('RGB', size, color=color); draw = ImageDraw.Draw(img)
89
  try: font_path = "arial.ttf" if os.path.exists("arial.ttf") else None
90
  except: font_path = None
@@ -107,13 +135,19 @@ def add_scene_to_story_orchestrator(
107
  log_accumulator = [f"**πŸš€ Scene {current_story_obj.current_scene_number + 1} - {time.strftime('%H:%M:%S')}**"]
108
 
109
  ret_story_state = current_story_obj
110
- ret_gallery = current_story_obj.get_all_scenes_for_gallery_display()
 
 
 
 
 
 
111
  ret_latest_image = None
112
  ret_latest_narrative_md_obj = gr.Markdown(value="## Processing...\nNarrative being woven...")
113
  ret_status_bar_html_obj = gr.HTML(value="<p class='processing_text status_text'>Processing...</p>")
114
  # ret_log_md will be built up
115
 
116
- # Initial yield for UI updates (buttons handled by .then() chain)
117
  yield {
118
  output_status_bar: gr.HTML(value=f"<p class='processing_text status_text'>🌌 Weaving Scene {current_story_obj.current_scene_number + 1}...</p>"),
119
  output_latest_scene_image: gr.Image(value=create_placeholder_image("🎨 Conjuring visuals...")),
@@ -170,8 +204,9 @@ def add_scene_to_story_orchestrator(
170
  else: image_generation_error_message = "**Image Error:** DALL-E selected but API not ready."
171
  elif selected_image_provider_type.startswith("hf_"):
172
  if HF_IMAGE_IS_READY:
173
- hf_model_id_to_call = "stabilityai/stable-diffusion-xl-base-1.0"; img_width, img_height = 768, 768
174
  if selected_image_provider_type == "hf_openjourney": hf_model_id_to_call = "prompthero/openjourney"; img_width,img_height = 512,512
 
175
  elif selected_image_provider_type == "hf_sd_1_5": hf_model_id_to_call = "runwayml/stable-diffusion-v1-5"; img_width,img_height = 512,512
176
  image_response = generate_image_hf_model(full_image_prompt, model_id=hf_model_id_to_call, negative_prompt=negative_prompt_text or COMMON_NEGATIVE_PROMPTS, width=img_width, height=img_height)
177
  else: image_generation_error_message = "**Image Error:** HF Image Model selected but API not ready."
@@ -208,6 +243,17 @@ def add_scene_to_story_orchestrator(
208
 
209
  # --- 4. Prepare Final Values for Return Tuple ---
210
  ret_gallery = current_story_obj.get_all_scenes_for_gallery_display()
 
 
 
 
 
 
 
 
 
 
 
211
  _ , latest_narr_for_display_final_str_temp = current_story_obj.get_latest_scene_details_for_display()
212
  ret_latest_narrative_md_obj = gr.Markdown(value=latest_narr_for_display_final_str_temp)
213
 
@@ -229,6 +275,7 @@ def add_scene_to_story_orchestrator(
229
  log_accumulator.append(f" Cycle ended at {time.strftime('%H:%M:%S')}. Total time: {current_total_time:.2f}s")
230
  ret_log_md = gr.Markdown(value="\n".join(log_accumulator))
231
 
 
232
  return (
233
  ret_story_state, ret_gallery, ret_latest_image,
234
  ret_latest_narrative_md_obj, ret_status_bar_html_obj, ret_log_md
@@ -236,38 +283,36 @@ def add_scene_to_story_orchestrator(
236
 
237
  def clear_story_state_ui_wrapper():
238
  new_story = Story(); ph_img = create_placeholder_image("Blank canvas...", color="#1A1A2E", text_color="#A0A0C0")
239
- return (new_story, [(ph_img,"New StoryVerse...")], None, gr.Markdown("## ✨ New Story ✨"), gr.HTML("<p class='processing_text status_text'>πŸ“œ Story Cleared.</p>"), "Log Cleared.", "")
 
 
 
 
240
 
241
  def surprise_me_func():
242
- themes = ["Cosmic Horror", "Solarpunk Utopia", "Mythic Fantasy", "Noir Detective"]; actions = ["unearths an artifact", "negotiates"]; settings = ["on a rogue planet", "in a city in a tree"]; prompt = f"A protagonist {random.choice(actions)} {random.choice(settings)}. Theme: {random.choice(themes)}."; style = random.choice(list(STYLE_PRESETS.keys())); artist = random.choice(["H.R. Giger", "Moebius", ""]*2); return prompt, style, artist
 
 
 
243
 
244
  def disable_buttons_for_processing():
 
245
  return gr.Button(interactive=False), gr.Button(interactive=False)
246
 
247
  def enable_buttons_after_processing():
 
248
  return gr.Button(interactive=True), gr.Button(interactive=True)
249
 
250
  # --- Gradio UI Definition ---
251
  with gr.Blocks(theme=omega_theme, css=omega_css, title="✨ StoryVerse Omega ✨") as story_weaver_demo:
 
252
  story_state_output = gr.State(Story())
253
-
254
- gr.Markdown("<div align='center'><h1>✨ StoryVerse Omega ✨</h1>\n<h3>Craft Immersive Multimodal Worlds with AI</h3></div>")
255
- gr.HTML("<div class='important-note'><strong>Welcome, Worldsmith!</strong> Describe your vision, choose your style, and let Omega help you weave captivating scenes with narrative and imagery. Ensure API keys (<code>STORYVERSE_...</code>) are correctly set in Space Secrets!</div>")
256
-
257
- with gr.Accordion("πŸ”§ AI Services Status & Info", open=False):
258
- status_text_list = []; text_llm_ok = (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY); image_gen_ok = (DALLE_IMAGE_IS_READY or HF_IMAGE_IS_READY)
259
- if not text_llm_ok and not image_gen_ok: status_text_list.append("<p style='color:#FCA5A5;font-weight:bold;'>⚠️ CRITICAL: NO AI SERVICES CONFIGURED.</p>")
260
- else:
261
- if text_llm_ok: status_text_list.append("<p style='color:#A7F3D0;'>βœ… Text Generation Ready.</p>")
262
- else: status_text_list.append("<p style='color:#FCD34D;'>⚠️ Text Generation NOT Ready.</p>")
263
- if image_gen_ok: status_text_list.append("<p style='color:#A7F3D0;'>βœ… Image Generation Ready.</p>")
264
- else: status_text_list.append("<p style='color:#FCD34D;'>⚠️ Image Generation NOT Ready.</p>")
265
- gr.HTML("".join(status_text_list))
266
-
267
- with gr.Row(equal_height=False, variant="panel"):
268
  with gr.Column(scale=7, min_width=450):
269
  gr.Markdown("### πŸ’‘ **Craft Your Scene**", elem_classes="input-section-header")
270
- with gr.Group(): scene_prompt_input = gr.Textbox(lines=7, label="Scene Vision:", placeholder="e.g., Amidst swirling cosmic dust...")
271
  with gr.Row(elem_classes=["compact-row"]):
272
  with gr.Column(scale=2): image_style_input = gr.Dropdown(choices=["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys())), value="Default (Cinematic Realism)", label="Visual Style", allow_custom_value=True)
273
  with gr.Column(scale=2): artist_style_input = gr.Textbox(label="Artistic Inspiration (Optional):", placeholder="e.g., Moebius...")
@@ -275,15 +320,17 @@ with gr.Blocks(theme=omega_theme, css=omega_css, title="✨ StoryVerse Omega ✨
275
  with gr.Accordion("βš™οΈ Advanced AI Configuration", open=False):
276
  with gr.Group():
277
  text_model_dropdown = gr.Dropdown(choices=list(TEXT_MODELS.keys()), value=UI_DEFAULT_TEXT_MODEL_KEY, label="Narrative AI Engine")
278
- image_provider_dropdown = gr.Dropdown(choices=list(IMAGE_PROVIDERS.keys()), value=UI_DEFAULT_IMAGE_PROVIDER_KEY, label="Visual AI Engine (DALL-E/HF)")
279
  with gr.Row():
280
  narrative_length_dropdown = gr.Dropdown(["Short (1 paragraph)", "Medium (2-3 paragraphs)", "Detailed (4+ paragraphs)"], value="Medium (2-3 paragraphs)", label="Narrative Detail")
281
  image_quality_dropdown = gr.Dropdown(["Standard", "High Detail", "Sketch Concept"], value="Standard", label="Image Detail/Style")
282
  with gr.Row(elem_classes=["compact-row"], equal_height=True):
283
- engage_button = gr.Button("🌌 Weave Scene!", variant="primary", scale=3, icon="✨")
284
- surprise_button = gr.Button("🎲 Surprise!", variant="secondary", scale=1, icon="🎁")
285
- clear_story_button = gr.Button("πŸ—‘οΈ New", variant="stop", scale=1, icon="♻️")
286
- output_status_bar = gr.HTML(value="<p class='processing_text status_text'>Ready to weave!</p>")
 
 
287
  with gr.Column(scale=10, min_width=700):
288
  gr.Markdown("### πŸ–ΌοΈ **Your StoryVerse**", elem_classes="output-section-header")
289
  with gr.Tabs():
@@ -296,18 +343,18 @@ with gr.Blocks(theme=omega_theme, css=omega_css, title="✨ StoryVerse Omega ✨
296
  with gr.Accordion("Interaction Log", open=False):
297
  output_interaction_log_markdown = gr.Markdown("Log...")
298
 
299
- engage_button.click(fn=disable_buttons_for_processing, outputs=[engage_button, surprise_button], queue=False)\
300
- .then(fn=add_scene_to_story_orchestrator,
301
- inputs=[story_state_output, scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input, text_model_dropdown, image_provider_dropdown, narrative_length_dropdown, image_quality_dropdown],
302
- outputs=[story_state_output, output_gallery, output_latest_scene_image, output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown])\
303
- .then(fn=enable_buttons_after_processing, outputs=[engage_button, surprise_button], queue=False)
304
-
305
- clear_story_button.click(fn=clear_story_state_ui_wrapper,
306
- outputs=[story_state_output, output_gallery, output_latest_scene_image, output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown, scene_prompt_input])
307
-
308
- surprise_button.click(fn=surprise_me_func,
309
- outputs=[scene_prompt_input, image_style_input, artist_style_input])
310
-
311
  gr.Examples(
312
  examples=[
313
  ["A lone, weary traveler on a mechanical steed crosses a vast, crimson desert under twin suns. Dust devils dance in the distance.", "Sci-Fi Western", "Moebius", "greenery, water, modern city"],
@@ -320,13 +367,25 @@ with gr.Blocks(theme=omega_theme, css=omega_css, title="✨ StoryVerse Omega ✨
320
  )
321
  gr.HTML("<div style='text-align:center; margin-top:30px; padding-bottom:20px;'><p style='font-size:0.9em; color:#8080A0;'>✨ StoryVerse Omegaβ„’ - Weaving Worlds with Words and Pixels ✨</p></div>")
322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
  # --- Entry Point ---
324
  if __name__ == "__main__":
325
- print("="*80); print("✨ StoryVerse Omega (DALL-E/Gemini/HF Focus) Launching... ✨")
326
  print(f" Gemini Text Ready: {GEMINI_TEXT_IS_READY}"); print(f" HF Text Ready: {HF_TEXT_IS_READY}")
327
  print(f" DALL-E Image Ready: {DALLE_IMAGE_IS_READY}"); print(f" HF Image API Ready: {HF_IMAGE_IS_READY}")
328
- if not (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY) or not (DALLE_IMAGE_IS_READY or HF_IMAGE_IS_READY):
329
- print(" πŸ”΄ WARNING: Not all primary/fallback AI services configured.")
330
  print(f" Default Text Model: {UI_DEFAULT_TEXT_MODEL_KEY}"); print(f" Default Image Provider: {UI_DEFAULT_IMAGE_PROVIDER_KEY}")
331
  print("="*80)
332
  story_weaver_demo.launch(debug=True, server_name="0.0.0.0", share=False)
 
21
 
22
  # --- Get API Readiness Status ---
23
  GEMINI_TEXT_IS_READY = is_gemini_text_ready()
24
+ HF_TEXT_IS_READY = is_hf_text_ready()
25
  DALLE_IMAGE_IS_READY = is_dalle_ready()
26
+ HF_IMAGE_IS_READY = is_hf_image_api_ready()
27
 
28
  # --- Application Configuration (Models, Defaults) ---
29
  TEXT_MODELS = {}
30
  UI_DEFAULT_TEXT_MODEL_KEY = None
 
 
 
 
 
31
  if GEMINI_TEXT_IS_READY:
 
 
 
32
  TEXT_MODELS["✨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
33
  TEXT_MODELS["Legacy Gemini 1.0 Pro (Narrate)"] = {"id": "gemini-1.0-pro-latest", "type": "gemini"}
34
+ if HF_TEXT_IS_READY: # This will be used if Gemini is not ready
35
+ TEXT_MODELS["Mistral 7B (Narrate via HF)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
36
+ TEXT_MODELS["Gemma 2B (Narrate via HF)"] = {"id": "google/gemma-2b-it", "type": "hf_text"}
37
+
38
+ if TEXT_MODELS: # Determine default text model
39
+ if GEMINI_TEXT_IS_READY and "✨ Gemini 1.5 Flash (Narrate)" in TEXT_MODELS:
40
+ UI_DEFAULT_TEXT_MODEL_KEY = "✨ Gemini 1.5 Flash (Narrate)"
41
+ elif HF_TEXT_IS_READY and "Mistral 7B (Narrate via HF)" in TEXT_MODELS:
42
+ UI_DEFAULT_TEXT_MODEL_KEY = "Mistral 7B (Narrate via HF)"
43
+ elif TEXT_MODELS: # Fallback if preferred defaults are somehow not in the populated list
44
+ UI_DEFAULT_TEXT_MODEL_KEY = list(TEXT_MODELS.keys())[0]
45
+ else: # No text models configured at all
 
 
 
46
  TEXT_MODELS["No Text Models Configured"] = {"id": "dummy_text_error", "type": "none"}
47
  UI_DEFAULT_TEXT_MODEL_KEY = "No Text Models Configured"
48
 
 
49
  IMAGE_PROVIDERS = {}
50
  UI_DEFAULT_IMAGE_PROVIDER_KEY = None
51
  if DALLE_IMAGE_IS_READY:
52
  IMAGE_PROVIDERS["πŸ–ΌοΈ OpenAI DALL-E 3"] = "dalle_3"
53
  IMAGE_PROVIDERS["πŸ–ΌοΈ OpenAI DALL-E 2 (Legacy)"] = "dalle_2"
54
  UI_DEFAULT_IMAGE_PROVIDER_KEY = "πŸ–ΌοΈ OpenAI DALL-E 3"
55
+ elif HF_IMAGE_IS_READY: # Fallback to HF if DALL-E not ready
56
+ IMAGE_PROVIDERS["🎑 HF - SDXL Base"] = "hf_sdxl_base"
57
+ IMAGE_PROVIDERS["🎠 HF - OpenJourney"] = "hf_openjourney"
58
+ IMAGE_PROVIDERS["🌌 HF - SD v1.5"] = "hf_sd_1_5"
59
+ UI_DEFAULT_IMAGE_PROVIDER_KEY = "🎑 HF - SDXL Base"
60
 
61
  if not IMAGE_PROVIDERS:
62
  IMAGE_PROVIDERS["No Image Providers Configured"] = "none"
 
66
 
67
 
68
  # --- Gradio UI Theme and CSS ---
69
+ omega_theme = gr.themes.Base(
70
+ font=[gr.themes.GoogleFont("Lexend Deca"), "ui-sans-serif", "system-ui", "sans-serif"],
71
+ primary_hue=gr.themes.colors.purple, secondary_hue=gr.themes.colors.pink, neutral_hue=gr.themes.colors.slate
72
+ ).set(
73
+ body_background_fill="#0F0F1A", block_background_fill="#1A1A2E", block_border_width="1px",
74
+ block_border_color="#2A2A4A", block_label_background_fill="#2A2A4A", input_background_fill="#2A2A4A",
75
+ input_border_color="#4A4A6A", button_primary_background_fill="linear-gradient(135deg, #7F00FF 0%, #E100FF 100%)",
76
+ button_primary_text_color="white", button_secondary_background_fill="#4A4A6A",
77
+ button_secondary_text_color="#E0E0FF", slider_color="#A020F0"
78
+ )
79
+ omega_css = """
80
+ body, .gradio-container { background-color: #0F0F1A !important; color: #D0D0E0 !important; }
81
+ .gradio-container { max-width: 1400px !important; margin: auto !important; border-radius: 20px; box-shadow: 0 10px 30px rgba(0,0,0,0.2); padding: 25px !important; border: 1px solid #2A2A4A;}
82
+ .gr-panel, .gr-box, .gr-accordion { background-color: #1A1A2E !important; border: 1px solid #2A2A4A !important; border-radius: 12px !important; box-shadow: 0 4px 15px rgba(0,0,0,0.1);}
83
+ .gr-markdown h1 { font-size: 2.8em !important; text-align: center; color: transparent; background: linear-gradient(135deg, #A020F0 0%, #E040FB 100%); -webkit-background-clip: text; background-clip: text; margin-bottom: 5px !important; letter-spacing: -1px;}
84
+ .gr-markdown h3 { color: #C080F0 !important; text-align: center; font-weight: 400; margin-bottom: 25px !important;}
85
+ .input-section-header { font-size: 1.6em; font-weight: 600; color: #D0D0FF; margin-top: 15px; margin-bottom: 8px; border-bottom: 2px solid #7F00FF; padding-bottom: 5px;}
86
+ .output-section-header { font-size: 1.8em; font-weight: 600; color: #D0D0FF; margin-top: 15px; margin-bottom: 12px;}
87
+ .gr-input input, .gr-input textarea, .gr-dropdown select, .gr-textbox textarea { background-color: #2A2A4A !important; color: #E0E0FF !important; border: 1px solid #4A4A6A !important; border-radius: 8px !important; padding: 10px !important;}
88
+ .gr-button { border-radius: 8px !important; font-weight: 500 !important; transition: all 0.2s ease-in-out !important; display: flex; align-items: center; justify-content: center;}
89
+ .gr-button span { white-space: nowrap !important; overflow: hidden; text-overflow: ellipsis; display: inline-block; max-width: 90%; line-height: normal !important; }
90
+ .gr-button svg { width: 1.1em !important; height: 1.1em !important; margin-right: 4px !important; flex-shrink: 0;}
91
+ .gr-button-primary { padding: 10px 15px !important; } /* Adjusted padding for potentially shorter text */
92
+ .gr-button-primary:hover { transform: scale(1.03) translateY(-1px) !important; box-shadow: 0 8px 16px rgba(127,0,255,0.3) !important; }
93
+ .panel_image { border-radius: 12px !important; overflow: hidden; box-shadow: 0 6px 15px rgba(0,0,0,0.25) !important; background-color: #23233A;}
94
+ .panel_image img { max-height: 600px !important; }
95
+ .gallery_output { background-color: transparent !important; border: none !important; }
96
+ .gallery_output .thumbnail-item { border-radius: 8px !important; box-shadow: 0 3px 8px rgba(0,0,0,0.2) !important; margin: 6px !important; transition: transform 0.2s ease; height: 180px !important; width: 180px !important;}
97
+ .gallery_output .thumbnail-item:hover { transform: scale(1.05); }
98
+ .status_text { font-weight: 500; padding: 12px 18px; text-align: center; border-radius: 8px; margin-top:12px; border: 1px solid transparent; font-size: 1.05em;}
99
+ .error_text { background-color: #401010 !important; color: #FFB0B0 !important; border-color: #802020 !important; }
100
+ .success_text { background-color: #104010 !important; color: #B0FFB0 !important; border-color: #208020 !important;}
101
+ .processing_text { background-color: #102040 !important; color: #B0D0FF !important; border-color: #204080 !important;}
102
+ .important-note { background-color: rgba(127,0,255,0.1); border-left: 5px solid #7F00FF; padding: 15px; margin-bottom:20px; color: #E0E0FF; border-radius: 6px;}
103
+ .gr-tabitem { background-color: #1A1A2E !important; border-radius: 0 0 12px 12px !important; padding: 15px !important;}
104
+ .gr-tab-button.selected { background-color: #2A2A4A !important; color: white !important; border-bottom: 3px solid #A020F0 !important; border-radius: 8px 8px 0 0 !important; font-weight: 600 !important;}
105
+ .gr-tab-button { color: #A0A0C0 !important; border-radius: 8px 8px 0 0 !important;}
106
+ .gr-accordion > .gr-block { border-top: 1px solid #2A2A4A !important; }
107
+ .gr-markdown code { background-color: #2A2A4A !important; color: #C0C0E0 !important; padding: 0.2em 0.5em; border-radius: 4px; }
108
+ .gr-markdown pre { background-color: #23233A !important; padding: 1em !important; border-radius: 6px !important; border: 1px solid #2A2A4A !important;}
109
+ .gr-markdown pre > code { padding: 0 !important; background-color: transparent !important; }
110
+ #surprise_button { background: linear-gradient(135deg, #ff7e5f 0%, #feb47b 100%) !important; font-weight:600 !important;}
111
+ #surprise_button:hover { transform: scale(1.03) translateY(-1px) !important; box-shadow: 0 8px 16px rgba(255,126,95,0.3) !important; }
112
+ """
113
 
114
  # --- Helper: Placeholder Image Creation ---
115
  def create_placeholder_image(text="Processing...", size=(512, 512), color="#23233A", text_color="#E0E0FF"):
 
116
  img = Image.new('RGB', size, color=color); draw = ImageDraw.Draw(img)
117
  try: font_path = "arial.ttf" if os.path.exists("arial.ttf") else None
118
  except: font_path = None
 
135
  log_accumulator = [f"**πŸš€ Scene {current_story_obj.current_scene_number + 1} - {time.strftime('%H:%M:%S')}**"]
136
 
137
  ret_story_state = current_story_obj
138
+ # Initialize gallery with placeholders or current items to avoid errors if generation fails early
139
+ initial_gallery_items = current_story_obj.get_all_scenes_for_gallery_display()
140
+ if not initial_gallery_items: # Handle case where story is new and has no scenes
141
+ placeholder_img = create_placeholder_image("Waiting for first scene...", size=(180,180), color="#1A1A2E")
142
+ initial_gallery_items = [(placeholder_img, "Your StoryVerse awaits!")]
143
+ ret_gallery = initial_gallery_items
144
+
145
  ret_latest_image = None
146
  ret_latest_narrative_md_obj = gr.Markdown(value="## Processing...\nNarrative being woven...")
147
  ret_status_bar_html_obj = gr.HTML(value="<p class='processing_text status_text'>Processing...</p>")
148
  # ret_log_md will be built up
149
 
150
+ # Initial yield for UI updates (buttons disabled by .then() chain)
151
  yield {
152
  output_status_bar: gr.HTML(value=f"<p class='processing_text status_text'>🌌 Weaving Scene {current_story_obj.current_scene_number + 1}...</p>"),
153
  output_latest_scene_image: gr.Image(value=create_placeholder_image("🎨 Conjuring visuals...")),
 
204
  else: image_generation_error_message = "**Image Error:** DALL-E selected but API not ready."
205
  elif selected_image_provider_type.startswith("hf_"):
206
  if HF_IMAGE_IS_READY:
207
+ hf_model_id_to_call = "stabilityai/stable-diffusion-xl-base-1.0"; img_width, img_height = 768, 768 # Defaults
208
  if selected_image_provider_type == "hf_openjourney": hf_model_id_to_call = "prompthero/openjourney"; img_width,img_height = 512,512
209
+ elif selected_image_provider_type == "hf_sdxl_base": hf_model_id_to_call = "stabilityai/stable-diffusion-xl-base-1.0"; # Redundant, but explicit
210
  elif selected_image_provider_type == "hf_sd_1_5": hf_model_id_to_call = "runwayml/stable-diffusion-v1-5"; img_width,img_height = 512,512
211
  image_response = generate_image_hf_model(full_image_prompt, model_id=hf_model_id_to_call, negative_prompt=negative_prompt_text or COMMON_NEGATIVE_PROMPTS, width=img_width, height=img_height)
212
  else: image_generation_error_message = "**Image Error:** HF Image Model selected but API not ready."
 
243
 
244
  # --- 4. Prepare Final Values for Return Tuple ---
245
  ret_gallery = current_story_obj.get_all_scenes_for_gallery_display()
246
+ # Ensure gallery items are PIL Images or None for errored/missing images
247
+ processed_gallery_tuples = []
248
+ for img_item, cap_text in ret_gallery:
249
+ if isinstance(img_item, Image.Image):
250
+ processed_gallery_tuples.append((img_item, cap_text))
251
+ else: # Assume it's an error or no image, create placeholder for gallery
252
+ gallery_placeholder = create_placeholder_image(f"S{cap_text.split(':')[0][1:]}\nError/NoImg", size=(180,180), color="#2A2A4A")
253
+ processed_gallery_tuples.append((gallery_placeholder, cap_text))
254
+ ret_gallery = processed_gallery_tuples
255
+
256
+
257
  _ , latest_narr_for_display_final_str_temp = current_story_obj.get_latest_scene_details_for_display()
258
  ret_latest_narrative_md_obj = gr.Markdown(value=latest_narr_for_display_final_str_temp)
259
 
 
275
  log_accumulator.append(f" Cycle ended at {time.strftime('%H:%M:%S')}. Total time: {current_total_time:.2f}s")
276
  ret_log_md = gr.Markdown(value="\n".join(log_accumulator))
277
 
278
+ # This is the FINAL return. It must be a tuple matching the `outputs` list of engage_button.click()
279
  return (
280
  ret_story_state, ret_gallery, ret_latest_image,
281
  ret_latest_narrative_md_obj, ret_status_bar_html_obj, ret_log_md
 
283
 
284
  def clear_story_state_ui_wrapper():
285
  new_story = Story(); ph_img = create_placeholder_image("Blank canvas...", color="#1A1A2E", text_color="#A0A0C0")
286
+ # Ensure gallery output for clear is also a list of (image, caption)
287
+ cleared_gallery_display = [(ph_img, "Your StoryVerse is new and untold...")]
288
+ initial_narrative = "## ✨ New Story ✨\nDescribe your first scene!"
289
+ status_msg = "<p class='processing_text status_text'>πŸ“œ Story Cleared.</p>"
290
+ return (new_story, cleared_gallery_display, None, gr.Markdown(initial_narrative), gr.HTML(status_msg), "Log Cleared.", "")
291
 
292
  def surprise_me_func():
293
+ print("DEBUG: surprise_me_func called") # For checking button functionality
294
+ themes = ["Cosmic Horror", "Solarpunk Utopia", "Mythic Fantasy", "Noir Detective"]; actions = ["unearths an artifact", "negotiates"]; settings = ["on a rogue planet", "in a city in a tree"]; prompt = f"A protagonist {random.choice(actions)} {random.choice(settings)}. Theme: {random.choice(themes)}."; style = random.choice(list(STYLE_PRESETS.keys())); artist = random.choice(["H.R. Giger", "Moebius", ""]*2)
295
+ print(f"DEBUG: surprise_me_func returning: {prompt}, {style}, {artist}")
296
+ return prompt, style, artist
297
 
298
  def disable_buttons_for_processing():
299
+ print("DEBUG: Disabling buttons")
300
  return gr.Button(interactive=False), gr.Button(interactive=False)
301
 
302
  def enable_buttons_after_processing():
303
+ print("DEBUG: Enabling buttons")
304
  return gr.Button(interactive=True), gr.Button(interactive=True)
305
 
306
  # --- Gradio UI Definition ---
307
  with gr.Blocks(theme=omega_theme, css=omega_css, title="✨ StoryVerse Omega ✨") as story_weaver_demo:
308
+ # Define Python variables for UI components
309
  story_state_output = gr.State(Story())
310
+
311
+ with gr.Row(equal_height=False, variant="panel"): # Main layout row
312
+ # Input Column
 
 
 
 
 
 
 
 
 
 
 
 
313
  with gr.Column(scale=7, min_width=450):
314
  gr.Markdown("### πŸ’‘ **Craft Your Scene**", elem_classes="input-section-header")
315
+ with gr.Group(): scene_prompt_input = gr.Textbox(lines=7, label="Scene Vision (Description, Dialogue, Action):", placeholder="e.g., Amidst swirling cosmic dust...")
316
  with gr.Row(elem_classes=["compact-row"]):
317
  with gr.Column(scale=2): image_style_input = gr.Dropdown(choices=["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys())), value="Default (Cinematic Realism)", label="Visual Style", allow_custom_value=True)
318
  with gr.Column(scale=2): artist_style_input = gr.Textbox(label="Artistic Inspiration (Optional):", placeholder="e.g., Moebius...")
 
320
  with gr.Accordion("βš™οΈ Advanced AI Configuration", open=False):
321
  with gr.Group():
322
  text_model_dropdown = gr.Dropdown(choices=list(TEXT_MODELS.keys()), value=UI_DEFAULT_TEXT_MODEL_KEY, label="Narrative AI Engine")
323
+ image_provider_dropdown = gr.Dropdown(choices=list(IMAGE_PROVIDERS.keys()), value=UI_DEFAULT_IMAGE_PROVIDER_KEY, label="Visual AI Engine")
324
  with gr.Row():
325
  narrative_length_dropdown = gr.Dropdown(["Short (1 paragraph)", "Medium (2-3 paragraphs)", "Detailed (4+ paragraphs)"], value="Medium (2-3 paragraphs)", label="Narrative Detail")
326
  image_quality_dropdown = gr.Dropdown(["Standard", "High Detail", "Sketch Concept"], value="Standard", label="Image Detail/Style")
327
  with gr.Row(elem_classes=["compact-row"], equal_height=True):
328
+ engage_button = gr.Button("🌌 Weave!", variant="primary", scale=3, icon="✨") # Shorter text
329
+ surprise_button = gr.Button("🎲 Surprise!", variant="secondary", scale=1, icon="🎁")# Shorter text
330
+ clear_story_button = gr.Button("πŸ—‘οΈ New", variant="stop", scale=1, icon="♻️") # Shorter text
331
+ output_status_bar = gr.HTML(value="<p class='processing_text status_text'>Ready to weave your first masterpiece!</p>")
332
+
333
+ # Output Column
334
  with gr.Column(scale=10, min_width=700):
335
  gr.Markdown("### πŸ–ΌοΈ **Your StoryVerse**", elem_classes="output-section-header")
336
  with gr.Tabs():
 
343
  with gr.Accordion("Interaction Log", open=False):
344
  output_interaction_log_markdown = gr.Markdown("Log...")
345
 
346
+ # API Status (defined after main layout to ensure it's below everything)
347
+ with gr.Accordion("πŸ”§ AI Services Status & Info", open=False, elem_id="api_status_accordion"):
348
+ status_text_list = []; text_llm_ok = (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY); image_gen_ok = (DALLE_IMAGE_IS_READY or HF_IMAGE_IS_READY)
349
+ if not text_llm_ok and not image_gen_ok: status_text_list.append("<p style='color:#FCA5A5;font-weight:bold;'>⚠️ CRITICAL: NO AI SERVICES CONFIGURED.</p>")
350
+ else:
351
+ if text_llm_ok: status_text_list.append("<p style='color:#A7F3D0;'>βœ… Text Generation Ready.</p>")
352
+ else: status_text_list.append("<p style='color:#FCD34D;'>⚠️ Text Generation NOT Ready.</p>")
353
+ if image_gen_ok: status_text_list.append("<p style='color:#A7F3D0;'>βœ… Image Generation Ready.</p>")
354
+ else: status_text_list.append("<p style='color:#FCD34D;'>⚠️ Image Generation NOT Ready.</p>")
355
+ gr.HTML("".join(status_text_list))
356
+
357
+ # Examples (defined after main layout)
358
  gr.Examples(
359
  examples=[
360
  ["A lone, weary traveler on a mechanical steed crosses a vast, crimson desert under twin suns. Dust devils dance in the distance.", "Sci-Fi Western", "Moebius", "greenery, water, modern city"],
 
367
  )
368
  gr.HTML("<div style='text-align:center; margin-top:30px; padding-bottom:20px;'><p style='font-size:0.9em; color:#8080A0;'>✨ StoryVerse Omegaβ„’ - Weaving Worlds with Words and Pixels ✨</p></div>")
369
 
370
+ # Event Handlers
371
+ engage_event_actions = engage_button.click(fn=disable_buttons_for_processing, outputs=[engage_button, surprise_button], queue=False)\
372
+ .then(fn=add_scene_to_story_orchestrator,
373
+ inputs=[story_state_output, scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input, text_model_dropdown, image_provider_dropdown, narrative_length_dropdown, image_quality_dropdown],
374
+ outputs=[story_state_output, output_gallery, output_latest_scene_image, output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown])\
375
+ .then(fn=enable_buttons_after_processing, outputs=[engage_button, surprise_button], queue=False)
376
+
377
+ clear_story_button.click(fn=clear_story_state_ui_wrapper,
378
+ outputs=[story_state_output, output_gallery, output_latest_scene_image, output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown, scene_prompt_input])
379
+
380
+ surprise_button.click(fn=surprise_me_func,
381
+ outputs=[scene_prompt_input, image_style_input, artist_style_input])
382
+
383
  # --- Entry Point ---
384
  if __name__ == "__main__":
385
+ print("="*80); print("✨ StoryVerse Omega (Full App with Fixes) Launching... ✨")
386
  print(f" Gemini Text Ready: {GEMINI_TEXT_IS_READY}"); print(f" HF Text Ready: {HF_TEXT_IS_READY}")
387
  print(f" DALL-E Image Ready: {DALLE_IMAGE_IS_READY}"); print(f" HF Image API Ready: {HF_IMAGE_IS_READY}")
388
+ if not (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY) or not (DALLE_IMAGE_IS_READY or HF_IMAGE_IS_READY): print(" πŸ”΄ WARNING: Not all services configured.")
 
389
  print(f" Default Text Model: {UI_DEFAULT_TEXT_MODEL_KEY}"); print(f" Default Image Provider: {UI_DEFAULT_IMAGE_PROVIDER_KEY}")
390
  print("="*80)
391
  story_weaver_demo.launch(debug=True, server_name="0.0.0.0", share=False)