mgbam commited on
Commit
4abfaf0
·
verified ·
1 Parent(s): bb45ed1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +246 -311
app.py CHANGED
@@ -1,71 +1,60 @@
1
- # storyverse_weaver/app.py
2
- import gradio as gr
 
3
  import os
4
  import time
5
- import json
6
- from PIL import Image, ImageDraw, ImageFont
7
  import random
8
- import traceback
9
 
10
- # --- Core Logic Imports ---
 
 
 
 
11
  from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf
12
- from core.image_services import initialize_image_llms, is_dalle_ready, is_hf_image_api_ready, generate_image_dalle, generate_image_hf_model, ImageGenResponse
13
- from core.story_engine import Story, Scene # CRITICAL: Ensure this is your updated Story class
14
  from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
15
  from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
16
  from core.utils import basic_text_cleanup
17
 
18
- # --- Initialize Services ---
19
- initialize_text_llms()
20
- initialize_image_llms()
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- # --- Get API Readiness Status ---
23
- GEMINI_TEXT_IS_READY = is_gemini_text_ready()
24
- HF_TEXT_IS_READY = is_hf_text_ready()
25
- DALLE_IMAGE_IS_READY = is_dalle_ready()
26
- HF_IMAGE_IS_READY = is_hf_image_api_ready()
27
 
28
  # --- Application Configuration (Models, Defaults) ---
29
- # (This section remains the same - ensure TEXT_MODELS, UI_DEFAULT_TEXT_MODEL_KEY, etc. are defined)
30
  TEXT_MODELS = {}
31
  UI_DEFAULT_TEXT_MODEL_KEY = None
32
- if GEMINI_TEXT_IS_READY:
33
- TEXT_MODELS["✨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
34
- if HF_TEXT_IS_READY:
35
- TEXT_MODELS["Mistral 7B (Narrate via HF)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
36
- if TEXT_MODELS:
37
- UI_DEFAULT_TEXT_MODEL_KEY = list(TEXT_MODELS.keys())[0]
38
- if GEMINI_TEXT_IS_READY and "✨ Gemini 1.5 Flash (Narrate)" in TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = "✨ Gemini 1.5 Flash (Narrate)"
39
- elif HF_TEXT_IS_READY and "Mistral 7B (Narrate via HF)" in TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = "Mistral 7B (Narrate via HF)"
40
- else:
41
- TEXT_MODELS["No Text Models Configured"] = {"id": "dummy_text_error", "type": "none"}
42
- UI_DEFAULT_TEXT_MODEL_KEY = "No Text Models Configured"
43
 
44
- IMAGE_PROVIDERS = {}
45
  UI_DEFAULT_IMAGE_PROVIDER_KEY = None
46
- if DALLE_IMAGE_IS_READY:
47
- IMAGE_PROVIDERS["🖼️ OpenAI DALL-E 3"] = "dalle_3"
48
- UI_DEFAULT_IMAGE_PROVIDER_KEY = "🖼️ OpenAI DALL-E 3"
49
- elif HF_IMAGE_IS_READY:
50
- IMAGE_PROVIDERS["🎡 HF - SDXL Base"] = "hf_sdxl_base"
51
- UI_DEFAULT_IMAGE_PROVIDER_KEY = "🎡 HF - SDXL Base"
52
- if not IMAGE_PROVIDERS:
53
- IMAGE_PROVIDERS["No Image Providers Configured"] = "none"
54
- UI_DEFAULT_IMAGE_PROVIDER_KEY = "No Image Providers Configured"
55
-
56
-
57
- # --- Gradio UI Theme and CSS ---
58
- # (omega_theme and omega_css definitions remain THE SAME as the last full app.py version)
59
- omega_theme = gr.themes.Base(font=[gr.themes.GoogleFont("Lexend Deca")], primary_hue=gr.themes.colors.purple).set(body_background_fill="#0F0F1A", block_background_fill="#1A1A2E", slider_color="#A020F0")
60
- omega_css = """ /* ... Paste your full omega_css string here ...
61
- body, .gradio-container .gr-button span { white-space: nowrap !important; overflow: hidden; text-overflow: ellipsis; display: inline-block; max-width: 90%; }
62
- .gradio-container .gr-button { display: flex; align-items: center; justify-content: center; }
63
- .gradio-container .gr-button svg { margin-right: 4px !important; }
64
- */ """
65
-
66
-
67
- # --- Helper: Placeholder Image Creation ---
68
- def create_placeholder_image(text="Processing...", size=(512, 512), color="#23233A", text_color="#E0E0FF"):
69
  img = Image.new('RGB', size, color=color); draw = ImageDraw.Draw(img)
70
  try: font_path = "arial.ttf" if os.path.exists("arial.ttf") else None
71
  except: font_path = None
@@ -75,276 +64,222 @@ def create_placeholder_image(text="Processing...", size=(512, 512), color="#2323
75
  else: tw, th = draw.textsize(text, font=font)
76
  draw.text(((size[0]-tw)/2, (size[1]-th)/2), text, font=font, fill=text_color); return img
77
 
78
- # --- StoryVerse Weaver Orchestrator ---
79
- def add_scene_to_story_orchestrator(
80
- current_story_obj: Story, scene_prompt_text: str, image_style_dropdown: str, artist_style_text: str,
81
- negative_prompt_text: str, text_model_key: str, image_provider_key: str,
82
- narrative_length: str, image_quality: str,
83
- progress=gr.Progress(track_tqdm=True)
84
- ):
85
- start_time = time.time()
86
- if not current_story_obj: current_story_obj = Story() # Ensure story object exists
87
-
88
- log_accumulator = [f"**🚀 Scene {current_story_obj.current_scene_number + 1} - {time.strftime('%H:%M:%S')}**"]
89
-
90
- # --- Initialize values for the final return tuple ---
91
- # These correspond to the `outputs` list of `engage_button.click()`
92
- # Order: story_state_output, output_gallery, output_latest_scene_image,
93
- # output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown
94
-
95
- # Get initial gallery state based on current story object
96
- # This ensures that if we error out early, the gallery doesn't just disappear if it had items
97
- current_gallery_items = current_story_obj.get_all_scenes_for_gallery_display()
98
- if not current_gallery_items: # Handle initially empty story for gallery
99
- placeholder_gallery_img = create_placeholder_image("Start Weaving!", size=(180,180), color="#1A1A2E")
100
- current_gallery_items = [(placeholder_gallery_img, "Your StoryVerse awaits!")]
101
-
102
- # These will be updated and form the basis of the final 'return'
103
- ret_story_state = current_story_obj
104
- ret_gallery = current_gallery_items
105
- ret_latest_image = None
106
- ret_latest_narrative_md_obj = gr.Markdown(value="## Processing...\nNarrative being woven...")
107
- ret_status_bar_html_obj = gr.HTML(value="<p class='processing_text status_text'>Processing...</p>")
108
- # ret_log_md is built up
109
-
110
- # Initial UI update via yield (buttons disabled by .then() chain)
111
- yield {
112
- output_status_bar: gr.HTML(value=f"<p class='processing_text status_text'>🌌 Weaving Scene {current_story_obj.current_scene_number + 1}...</p>"),
113
- output_latest_scene_image: gr.Image(value=create_placeholder_image("🎨 Conjuring visuals...")),
114
- output_latest_scene_narrative: gr.Markdown(value=" Musing narrative..."),
115
- output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator))
116
  }
 
 
 
 
 
 
 
 
 
 
117
 
118
- try:
119
- if not scene_prompt_text.strip():
120
- raise ValueError("Scene prompt cannot be empty!")
121
-
122
- # --- 1. Generate Narrative Text ---
123
- progress(0.1, desc="✍️ Crafting narrative...")
124
- narrative_text_generated = f"Narrative Error: Init failed." # Default
125
- # ... (Full narrative generation logic from your previous working app.py)
126
- # ... (This part should call generate_text_gemini or generate_text_hf and update narrative_text_generated)
127
- text_model_info = TEXT_MODELS.get(text_model_key)
128
- if text_model_info and text_model_info["type"] != "none":
129
- system_p = get_narrative_system_prompt("default"); prev_narrative = current_story_obj.get_last_scene_narrative(); user_p = format_narrative_user_prompt(scene_prompt_text, prev_narrative)
130
- log_accumulator.append(f" Narrative: Using {text_model_key} ({text_model_info['id']}).")
131
- text_response = None
132
- if text_model_info["type"] == "gemini": text_response = generate_text_gemini(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if narrative_length.startswith("Detailed") else 400)
133
- elif text_model_info["type"] == "hf_text": text_response = generate_text_hf(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if narrative_length.startswith("Detailed") else 400)
134
- if text_response and text_response.success: narrative_text_generated = basic_text_cleanup(text_response.text); log_accumulator.append(f" Narrative: Success.")
135
- elif text_response: narrative_text_generated = f"**Narrative Error ({text_model_key}):** {text_response.error}"; log_accumulator.append(f" Narrative: FAILED - {text_response.error}")
136
- else: log_accumulator.append(f" Narrative: FAILED - No response from {text_model_key}.")
137
- else: narrative_text_generated = "**Narrative Error:** Text model unavailable."; log_accumulator.append(f" Narrative: FAILED - Model '{text_model_key}' unavailable.")
138
-
139
- ret_latest_narrative_str_content = f"## Scene Idea: {scene_prompt_text}\n\n{narrative_text_generated}"
140
- ret_latest_narrative_md_obj = gr.Markdown(value=ret_latest_narrative_str_content) # Prepare for final return
141
- yield { output_latest_scene_narrative: ret_latest_narrative_md_obj,
142
- output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }
143
-
144
- # --- 2. Generate Image ---
145
- progress(0.5, desc="🎨 Conjuring visuals...")
146
- image_generated_pil = None
147
- image_generation_error_message = None
148
- # ... (Full image generation logic from your previous working app.py) ...
149
- # ... (This part should call generate_image_dalle or generate_image_hf_model and update image_generated_pil)
150
- selected_image_provider_key_from_ui = image_provider_key
151
- selected_image_provider_type = IMAGE_PROVIDERS.get(selected_image_provider_key_from_ui)
152
- image_content_prompt_for_gen = narrative_text_generated if narrative_text_generated and "Error" not in narrative_text_generated else scene_prompt_text
153
- quality_keyword = "ultra detailed, " if image_quality == "High Detail" else ("concept sketch, " if image_quality == "Sketch Concept" else "")
154
- full_image_prompt = format_image_generation_prompt(quality_keyword + image_content_prompt_for_gen[:350], image_style_dropdown, artist_style_text)
155
- log_accumulator.append(f" Image: Attempting with provider key '{selected_image_provider_key_from_ui}' (maps to type '{selected_image_provider_type}').")
156
- if selected_image_provider_type and selected_image_provider_type != "none": # Actual call logic
157
- image_response = None # ... (call DALL-E or HF based on selected_image_provider_type)
158
- if selected_image_provider_type.startswith("dalle_"):
159
- if DALLE_IMAGE_IS_READY: image_response = generate_image_dalle(full_image_prompt, model="dall-e-3" if selected_image_provider_type == "dalle_3" else "dall-e-2")
160
- else: image_generation_error_message = "**Image Error:** DALL-E selected but not ready."
161
- elif selected_image_provider_type.startswith("hf_"):
162
- if HF_IMAGE_IS_READY:
163
- hf_model_id = "stabilityai/stable-diffusion-xl-base-1.0" # Default
164
- if selected_image_provider_type == "hf_openjourney": hf_model_id = "prompthero/openjourney"
165
- elif selected_image_provider_type == "hf_sd_1_5": hf_model_id = "runwayml/stable-diffusion-v1-5"
166
- image_response = generate_image_hf_model(full_image_prompt, model_id=hf_model_id, negative_prompt=negative_prompt_text or COMMON_NEGATIVE_PROMPTS)
167
- else: image_generation_error_message = "**Image Error:** HF Image selected but not ready."
168
- # ... (process image_response)
169
- if image_response and image_response.success: image_generated_pil = image_response.image; log_accumulator.append(" Image: Success.")
170
- elif image_response: image_generation_error_message = f"**Image Error:** {image_response.error}"; log_accumulator.append(f" Image: FAILED - {image_response.error}")
171
- elif not image_generation_error_message: image_generation_error_message = "**Image Error:** No response/unknown issue."
172
- else: image_generation_error_message = "**Image Error:** No valid image provider."
173
-
174
- ret_latest_image = image_generated_pil if image_generated_pil else create_placeholder_image("Image Gen Failed", color="#401010")
175
- yield { output_latest_scene_image: gr.Image(value=ret_latest_image),
176
- output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }
177
-
178
- # --- 3. Add Scene to Story Object ---
179
- final_scene_error = None
180
- if image_generation_error_message and "**Narrative Error**" in narrative_text_generated : final_scene_error = f"Narrative: {narrative_text_generated.split('**')[-1].strip()} \nImage: {image_generation_error_message.split('**')[-1].strip()}"
181
- elif "**Narrative Error**" in narrative_text_generated: final_scene_error = narrative_text_generated
182
- elif image_generation_error_message: final_scene_error = image_generation_error_message
183
-
184
- current_story_obj.add_scene_from_elements(
185
- user_prompt=scene_prompt_text,
186
- narrative_text=narrative_text_generated if "**Narrative Error**" not in narrative_text_generated else "(Narrative generation failed, see error log)",
187
- image=image_generated_pil,
188
- image_style_prompt=f"{image_style_dropdown}{f', by {artist_style_text}' if artist_style_text and artist_style_text.strip() else ''}",
189
- image_provider=selected_image_provider_key_from_ui,
190
- error_message=final_scene_error
191
  )
192
- ret_story_state = current_story_obj
193
- log_accumulator.append(f" Scene {current_story_obj.current_scene_number} processed and added to story object.")
194
 
195
- # --- 4. Prepare Final Values for Return Tuple ---
196
- gallery_tuples_final = current_story_obj.get_all_scenes_for_gallery_display()
197
- processed_gallery_tuples = []
198
- if not gallery_tuples_final: # Ensure gallery is not empty for Gradio if story just started
199
- placeholder_gallery_img = create_placeholder_image("Your Story Begins!", size=(180,180), color="#1A1A2E")
200
- processed_gallery_tuples = [(placeholder_gallery_img, "First scene pending or just added!")]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  else:
202
- for img_item, cap_text in gallery_tuples_final:
203
- if img_item is None:
204
- gallery_placeholder = create_placeholder_image(f"S{cap_text.split(':')[0][1:]}\nError/NoImg", size=(180,180), color="#2A2A4A")
205
- processed_gallery_tuples.append((gallery_placeholder, cap_text))
206
- else:
207
- processed_gallery_tuples.append((img_item, cap_text))
208
- ret_gallery = processed_gallery_tuples
209
 
210
 
211
- _ , latest_narr_for_display_final_str_temp = current_story_obj.get_latest_scene_details_for_display()
212
- ret_latest_narrative_md_obj = gr.Markdown(value=latest_narr_for_display_final_str_temp)
213
-
214
- status_html_str_temp = f"<p class='error_text status_text'>Scene {current_story_obj.current_scene_number} added with errors.</p>" if final_scene_error else f"<p class='success_text status_text'>🌌 Scene {current_story_obj.current_scene_number} woven!</p>"
215
- ret_status_bar_html_obj = gr.HTML(value=status_html_str_temp)
216
-
217
- progress(1.0, desc="Scene Complete!")
218
-
219
- except ValueError as ve:
220
- log_accumulator.append(f"\n**INPUT/CONFIG ERROR:** {ve}")
221
- ret_status_bar_html_obj = gr.HTML(value=f"<p class='error_text status_text'>❌ CONFIGURATION ERROR: {ve}</p>")
222
- ret_latest_narrative_md_obj = gr.Markdown(value=f"## Error\n{ve}")
223
- except Exception as e:
224
- log_accumulator.append(f"\n**UNEXPECTED RUNTIME ERROR:** {type(e).__name__} - {e}\n{traceback.format_exc()}")
225
- ret_status_bar_html_obj = gr.HTML(value=f"<p class='error_text status_text'>❌ UNEXPECTED ERROR: {type(e).__name__}. Check logs.</p>")
226
- ret_latest_narrative_md_obj = gr.Markdown(value=f"## Unexpected Error\n{type(e).__name__}: {e}\nSee log for details.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
 
228
- current_total_time = time.time() - start_time
229
- log_accumulator.append(f" Cycle ended at {time.strftime('%H:%M:%S')}. Total time: {current_total_time:.2f}s")
230
- ret_log_md = gr.Markdown(value="\n".join(log_accumulator)) # Prepare final log content
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
- # Final return for the .click() handler's `outputs` list
233
- return (
234
- ret_story_state,
235
- ret_gallery, # This is now processed_gallery_tuples
236
- ret_latest_image, # This is the PIL image or placeholder
237
- ret_latest_narrative_md_obj, # This is a gr.Markdown object
238
- ret_status_bar_html_obj, # This is a gr.HTML object
239
- ret_log_md # This is a gr.Markdown object
240
- )
241
 
242
- def clear_story_state_ui_wrapper():
243
- print("DEBUG: clear_story_state_ui_wrapper called")
244
- new_story = Story()
245
- placeholder_img = create_placeholder_image("Your StoryVerse is a blank canvas...", color="#1A1A2E", text_color="#A0A0C0")
246
- cleared_gallery = [(placeholder_img, "Your StoryVerse is new and untold...")]
247
- initial_narrative = "## A New Story Begins ✨\nDescribe your first scene idea..."
248
- status_msg = "<p class='processing_text status_text'>📜 Story Cleared.</p>"
249
- return (new_story, cleared_gallery, None, gr.Markdown(initial_narrative), gr.HTML(status_msg), "Log Cleared.", "")
250
-
251
- def surprise_me_func():
252
- print("DEBUG: surprise_me_func called")
253
- themes = ["Cosmic Horror", "Solarpunk Utopia", "Mythic Fantasy", "Noir Detective"]; actions = ["unearths an artifact", "negotiates"]; settings = ["on a rogue planet", "in a city in a tree"]; prompt = f"A protagonist {random.choice(actions)} {random.choice(settings)}. Theme: {random.choice(themes)}."; style = random.choice(list(STYLE_PRESETS.keys())); artist = random.choice(["H.R. Giger", "Moebius", ""]*2)
254
- print(f"DEBUG: surprise_me_func returning: Prompt='{prompt}', Style='{style}', Artist='{artist}'")
255
- return prompt, style, artist
256
-
257
- def disable_buttons_for_processing():
258
- print("DEBUG: Disabling buttons")
259
- return gr.Button(interactive=False), gr.Button(interactive=False)
260
-
261
- def enable_buttons_after_processing():
262
- print("DEBUG: Enabling buttons")
263
- return gr.Button(interactive=True), gr.Button(interactive=True)
264
-
265
- # --- Gradio UI Definition ---
266
- with gr.Blocks(theme=omega_theme, css=omega_css, title="✨ StoryVerse Omega ✨") as story_weaver_demo:
267
- # Define Python variables for UI components
268
- story_state_output = gr.State(Story())
269
-
270
- gr.Markdown("<div align='center'><h1>✨ StoryVerse Omega ✨</h1>\n<h3>Craft Immersive Multimodal Worlds with AI</h3></div>")
271
- gr.HTML("<div class='important-note'><strong>Welcome, Worldsmith!</strong> Describe your vision, choose your style, and let Omega help you weave captivating scenes with narrative and imagery. Ensure API keys (<code>STORYVERSE_...</code>) are correctly set in Space Secrets!</div>")
272
 
273
- with gr.Accordion("🔧 AI Services Status & Info", open=False):
274
- status_text_list = []; text_llm_ok = (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY); image_gen_ok = (DALLE_IMAGE_IS_READY or HF_IMAGE_IS_READY)
275
- if not text_llm_ok and not image_gen_ok: status_text_list.append("<p style='color:#FCA5A5;font-weight:bold;'>⚠️ CRITICAL: NO AI SERVICES CONFIGURED.</p>")
276
- else:
277
- if text_llm_ok: status_text_list.append("<p style='color:#A7F3D0;'>✅ Text Generation Ready.</p>")
278
- else: status_text_list.append("<p style='color:#FCD34D;'>⚠️ Text Generation NOT Ready.</p>")
279
- if image_gen_ok: status_text_list.append("<p style='color:#A7F3D0;'>✅ Image Generation Ready.</p>")
280
- else: status_text_list.append("<p style='color:#FCD34D;'>⚠️ Image Generation NOT Ready.</p>")
281
- gr.HTML("".join(status_text_list))
282
-
283
- with gr.Row(equal_height=False, variant="panel"):
284
- with gr.Column(scale=7, min_width=450):
285
- gr.Markdown("### 💡 **Craft Your Scene**", elem_classes="input-section-header")
286
- with gr.Group(): scene_prompt_input = gr.Textbox(lines=7, label="Scene Vision (Description, Dialogue, Action):", placeholder="e.g., Amidst swirling cosmic dust...")
287
- with gr.Row(elem_classes=["compact-row"]):
288
- with gr.Column(scale=2): image_style_input = gr.Dropdown(choices=["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys())), value="Default (Cinematic Realism)", label="Visual Style", allow_custom_value=True)
289
- with gr.Column(scale=2): artist_style_input = gr.Textbox(label="Artistic Inspiration (Optional):", placeholder="e.g., Moebius...")
290
- negative_prompt_input = gr.Textbox(lines=2, label="Exclude from Image:", value=COMMON_NEGATIVE_PROMPTS)
291
- with gr.Accordion("⚙️ Advanced AI Configuration", open=False):
292
- with gr.Group():
293
- text_model_dropdown = gr.Dropdown(choices=list(TEXT_MODELS.keys()), value=UI_DEFAULT_TEXT_MODEL_KEY, label="Narrative AI Engine")
294
- image_provider_dropdown = gr.Dropdown(choices=list(IMAGE_PROVIDERS.keys()), value=UI_DEFAULT_IMAGE_PROVIDER_KEY, label="Visual AI Engine")
295
- with gr.Row():
296
- narrative_length_dropdown = gr.Dropdown(["Short (1 paragraph)", "Medium (2-3 paragraphs)", "Detailed (4+ paragraphs)"], value="Medium (2-3 paragraphs)", label="Narrative Detail")
297
- image_quality_dropdown = gr.Dropdown(["Standard", "High Detail", "Sketch Concept"], value="Standard", label="Image Detail/Style")
298
- with gr.Row(elem_classes=["compact-row"], equal_height=True):
299
- engage_button = gr.Button("🌌 Weave!", variant="primary", scale=3, icon="✨") # Shorter text
300
- surprise_button = gr.Button("🎲 Surprise!", variant="secondary", scale=1, icon="🎁")
301
- clear_story_button = gr.Button("🗑️ New", variant="stop", scale=1, icon="♻️") # Shorter text
302
- output_status_bar = gr.HTML(value="<p class='processing_text status_text'>Ready to weave your first masterpiece!</p>")
303
 
304
- with gr.Column(scale=10, min_width=700):
305
- gr.Markdown("### 🖼️ **Your StoryVerse**", elem_classes="output-section-header")
306
- with gr.Tabs():
307
- with gr.TabItem("🌠 Latest Scene"):
308
- output_latest_scene_image = gr.Image(label="Latest Image", type="pil", interactive=False, height=512, show_label=False, show_download_button=True, elem_classes=["panel_image"])
309
- output_latest_scene_narrative = gr.Markdown()
310
- with gr.TabItem("📚 Story Scroll"):
311
- output_gallery = gr.Gallery(label="Story Scroll", show_label=False, columns=4, object_fit="cover", height=700, preview=True, allow_preview=True, elem_classes=["gallery_output"])
312
- with gr.TabItem("⚙️ Log"):
313
- with gr.Accordion("Interaction Log", open=False):
314
- output_interaction_log_markdown = gr.Markdown("Log...")
315
-
316
- # Event Handlers
317
- engage_button.click(fn=disable_buttons_for_processing, outputs=[engage_button, surprise_button], queue=False)\
318
- .then(fn=add_scene_to_story_orchestrator,
319
- inputs=[story_state_output, scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input, text_model_dropdown, image_provider_dropdown, narrative_length_dropdown, image_quality_dropdown],
320
- outputs=[story_state_output, output_gallery, output_latest_scene_image, output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown])\
321
- .then(fn=enable_buttons_after_processing, outputs=[engage_button, surprise_button], queue=False)
322
-
323
- clear_story_button.click(fn=clear_story_state_ui_wrapper,
324
- outputs=[story_state_output, output_gallery, output_latest_scene_image, output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown, scene_prompt_input])
325
-
326
- surprise_button.click(fn=surprise_me_func,
327
- outputs=[scene_prompt_input, image_style_input, artist_style_input])
328
 
329
- gr.Examples(
330
- examples=[
331
- ["A lone, weary traveler on a mechanical steed crosses a vast, crimson desert under twin suns. Dust devils dance in the distance.", "Sci-Fi Western", "Moebius", "greenery, water, modern city"],
332
- ["Deep within an ancient, bioluminescent forest, a hidden civilization of sentient fungi perform a mystical ritual around a pulsating crystal.", "Psychedelic Fantasy", "Alex Grey", "technology, buildings, roads"],
333
- ["A child sits on a crescent moon, fishing for stars in a swirling nebula. A friendly space whale swims nearby.", "Whimsical Cosmic", "James Jean", "realistic, dark, scary"],
334
- ["A grand, baroque library where the books fly freely and whisper forgotten lore to those who listen closely.", "Magical Realism", "Remedios Varo", "minimalist, simple, technology"]
335
- ],
336
- inputs=[scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input],
337
- label="🌌 Example Universes to Weave 🌌",
338
  )
339
- gr.HTML("<div style='text-align:center; margin-top:30px; padding-bottom:20px;'><p style='font-size:0.9em; color:#8080A0;'>✨ StoryVerse Omega™ - Weaving Worlds with Words and Pixels ✨</p></div>")
340
-
341
- # --- Entry Point ---
342
- if __name__ == "__main__":
343
- print("="*80); print("✨ StoryVerse Omega (Full App with Fixes) Launching... ✨")
344
- print(f" Gemini Text Ready: {GEMINI_TEXT_IS_READY}"); print(f" HF Text Ready: {HF_TEXT_IS_READY}")
345
- print(f" DALL-E Image Ready: {DALLE_IMAGE_IS_READY}"); print(f" HF Image API Ready: {HF_IMAGE_IS_READY}")
346
- if not (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY) or not (DALLE_IMAGE_IS_READY or HF_IMAGE_IS_READY):
347
- print(" 🔴 WARNING: Not all primary/fallback AI services configured.")
348
- print(f" Default Text Model: {UI_DEFAULT_TEXT_MODEL_KEY}"); print(f" Default Image Provider: {UI_DEFAULT_IMAGE_PROVIDER_KEY}")
349
- print("="*80)
350
- story_weaver_demo.launch(debug=True, server_name="0.0.0.0", share=False)
 
1
+ # storyverse_weaver_streamlit/app_st.py (example name)
2
+ import streamlit as st
3
+ from PIL import Image, ImageDraw, ImageFont
4
  import os
5
  import time
 
 
6
  import random
 
7
 
8
+ # --- Assuming your core logic is in a sibling 'core' directory ---
9
+ # You might need to adjust sys.path if running locally vs. deployed
10
+ # import sys
11
+ # sys.path.append(os.path.join(os.path.dirname(__file__), '..')) # If core is one level up
12
+
13
  from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf
14
+ from core.image_services import initialize_image_llms, is_dalle_ready, is_hf_image_api_ready, generate_image_dalle, generate_image_hf_model, ImageGenResponse
15
+ from core.story_engine import Story, Scene # Your existing Story and Scene classes
16
  from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
17
  from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
18
  from core.utils import basic_text_cleanup
19
 
20
+ # --- Initialize Services ONCE ---
21
+ # Use Streamlit's caching for resource-heavy initializations if they don't depend on session state
22
+ @st.cache_resource # Caches the result across sessions/reruns if inputs don't change
23
+ def load_ai_services():
24
+ print("--- Initializing AI Services (Streamlit Cache Resource) ---")
25
+ initialize_text_llms()
26
+ initialize_image_llms()
27
+ # Return status flags to be stored in session_state or used directly
28
+ return {
29
+ "gemini_text_ready": is_gemini_text_ready(),
30
+ "hf_text_ready": is_hf_text_ready(),
31
+ "dalle_image_ready": is_dalle_ready(),
32
+ "hf_image_ready": is_hf_image_api_ready()
33
+ }
34
 
35
+ ai_services_status = load_ai_services()
 
 
 
 
36
 
37
  # --- Application Configuration (Models, Defaults) ---
38
+ # (Similar logic to your Gradio app.py for populating TEXT_MODELS, IMAGE_PROVIDERS etc.)
39
  TEXT_MODELS = {}
40
  UI_DEFAULT_TEXT_MODEL_KEY = None
41
+ # ... (Populate based on ai_services_status["gemini_text_ready"], ai_services_status["hf_text_ready"]) ...
42
+ if ai_services_status["gemini_text_ready"]: TEXT_MODELS["✨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"} # etc.
43
+ if ai_services_status["hf_text_ready"]: TEXT_MODELS["Mistral 7B (Narrate via HF)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"} # etc.
44
+ if TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = list(TEXT_MODELS.keys())[0] # Simplified default
 
 
 
 
 
 
 
45
 
46
+ IMAGE_PROVIDERS = {}
47
  UI_DEFAULT_IMAGE_PROVIDER_KEY = None
48
+ # ... (Populate based on ai_services_status["dalle_image_ready"], ai_services_status["hf_image_ready"]) ...
49
+ if ai_services_status["dalle_image_ready"]: IMAGE_PROVIDERS["🖼️ DALL-E 3"] = "dalle_3" #etc.
50
+ if ai_services_status["hf_image_ready"]: IMAGE_PROVIDERS["🎡 HF - SDXL Base"] = "hf_sdxl_base" #etc.
51
+ if IMAGE_PROVIDERS: UI_DEFAULT_IMAGE_PROVIDER_KEY = list(IMAGE_PROVIDERS.keys())[0] # Simplified default
52
+
53
+
54
+ # --- Helper: Placeholder Image (can be same as before) ---
55
+ @st.cache_data # Cache placeholder images
56
+ def create_placeholder_image_st(text="Processing...", size=(512, 512), color="#23233A", text_color="#E0E0FF"):
57
+ # ... (same PIL logic as before) ...
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  img = Image.new('RGB', size, color=color); draw = ImageDraw.Draw(img)
59
  try: font_path = "arial.ttf" if os.path.exists("arial.ttf") else None
60
  except: font_path = None
 
64
  else: tw, th = draw.textsize(text, font=font)
65
  draw.text(((size[0]-tw)/2, (size[1]-th)/2), text, font=font, fill=text_color); return img
66
 
67
+ # --- Initialize Session State ---
68
+ if 'story_object' not in st.session_state:
69
+ st.session_state.story_object = Story()
70
+ if 'current_log' not in st.session_state:
71
+ st.session_state.current_log = ["Welcome to StoryVerse Weaver (Streamlit Edition)!"]
72
+ if 'latest_scene_image' not in st.session_state:
73
+ st.session_state.latest_scene_image = None
74
+ if 'latest_scene_narrative' not in st.session_state:
75
+ st.session_state.latest_scene_narrative = "Describe your first scene to begin!"
76
+ if 'processing_scene' not in st.session_state:
77
+ st.session_state.processing_scene = False
78
+
79
+ # --- Page Configuration (Do this ONCE at the top) ---
80
+ st.set_page_config(
81
+ page_title="✨ StoryVerse Weaver ✨",
82
+ page_icon="🌌",
83
+ layout="wide", # "wide" or "centered"
84
+ initial_sidebar_state="expanded" # "auto", "expanded", "collapsed"
85
+ )
86
+
87
+ # --- Custom CSS for Dark Theme "WOW" ---
88
+ # (You'd inject this using st.markdown(..., unsafe_allow_html=True) or a separate CSS file)
89
+ streamlit_omega_css = """
90
+ <style>
91
+ /* Base dark theme */
92
+ body { color: #D0D0E0; background-color: #0F0F1A; }
93
+ .stApp { background-color: #0F0F1A; }
94
+ h1, h2, h3, h4, h5, h6 { color: #C080F0; }
95
+ .stTextInput > div > div > input, .stTextArea > div > div > textarea, .stSelectbox > div > div > select {
96
+ background-color: #2A2A4A; color: #E0E0FF; border: 1px solid #4A4A6A; border-radius: 8px;
97
+ }
98
+ .stButton > button {
99
+ background: linear-gradient(135deg, #7F00FF 0%, #E100FF 100%) !important;
100
+ color: white !important; border: none !important; border-radius: 8px !important;
101
+ padding: 0.5em 1em !important; font-weight: 600 !important;
102
+ box-shadow: 0 4px 8px rgba(0,0,0,0.15) !important;
 
 
103
  }
104
+ .stButton > button:hover { transform: scale(1.03) translateY(-1px); box-shadow: 0 8px 16px rgba(127,0,255,0.3) !important; }
105
+ /* Add more specific styles for sidebar, expanders, image display etc. */
106
+ .main .block-container { padding-top: 2rem; padding-bottom: 2rem; padding-left: 3rem; padding-right: 3rem; max-width: 1400px; margin: auto;}
107
+ .stImage > img { border-radius: 12px; box-shadow: 0 6px 15px rgba(0,0,0,0.25); max-height: 600px;}
108
+ .stExpander { background-color: #1A1A2E; border: 1px solid #2A2A4A; border-radius: 12px; margin-bottom: 1em;}
109
+ .stExpander header { font-size: 1.1em; font-weight: 500; color: #D0D0FF;}
110
+ .important-note { background-color: rgba(127,0,255,0.1); border-left: 5px solid #7F00FF; padding: 15px; margin-bottom:20px; color: #E0E0FF; border-radius: 6px;}
111
+ </style>
112
+ """
113
+ st.markdown(streamlit_omega_css, unsafe_allow_html=True)
114
 
115
+
116
+ # --- Main App UI & Logic ---
117
+ st.markdown("<div align='center'><h1>✨ StoryVerse Weaver ✨</h1>\n<h3>Craft Immersive Multimodal Worlds with AI</h3></div>", unsafe_allow_html=True)
118
+ st.markdown("<div class='important-note'><strong>Welcome, Worldsmith!</strong> Describe your vision, choose your style, and let Omega help you weave captivating scenes with narrative and imagery. Ensure API keys (<code>STORYVERSE_...</code>) are correctly set in your environment/secrets!</div>", unsafe_allow_html=True)
119
+
120
+
121
+ # --- Sidebar for Inputs & Configuration ---
122
+ with st.sidebar:
123
+ st.header("🎨 Scene Weaver Panel")
124
+
125
+ with st.form("scene_input_form"):
126
+ scene_prompt_text = st.text_area(
127
+ "Scene Vision (Description, Dialogue, Action):",
128
+ height=200,
129
+ placeholder="e.g., Amidst swirling cosmic dust, Captain Eva pilots her damaged starfighter..."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  )
 
 
131
 
132
+ st.subheader("Visual Style")
133
+ col_style1, col_style2 = st.columns(2)
134
+ with col_style1:
135
+ image_style_dropdown = st.selectbox("Style Preset:", options=["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys())), index=0)
136
+ with col_style2:
137
+ artist_style_text = st.text_input("Artistic Inspiration (Optional):", placeholder="e.g., Moebius")
138
+
139
+ negative_prompt_text = st.text_area("Exclude from Image (Negative Prompt):", value=COMMON_NEGATIVE_PROMPTS, height=100)
140
+
141
+ with st.expander("⚙️ Advanced AI Configuration", expanded=False):
142
+ text_model_key = st.selectbox("Narrative AI Engine:", options=list(TEXT_MODELS.keys()), index=0 if UI_DEFAULT_TEXT_MODEL_KEY in TEXT_MODELS else (list(TEXT_MODELS.keys()).index(UI_DEFAULT_TEXT_MODEL_KEY) if UI_DEFAULT_TEXT_MODEL_KEY else 0) )
143
+ image_provider_key = st.selectbox("Visual AI Engine:", options=list(IMAGE_PROVIDERS.keys()), index=0 if UI_DEFAULT_IMAGE_PROVIDER_KEY in IMAGE_PROVIDERS else (list(IMAGE_PROVIDERS.keys()).index(UI_DEFAULT_IMAGE_PROVIDER_KEY) if UI_DEFAULT_IMAGE_PROVIDER_KEY else 0) )
144
+ narrative_length = st.selectbox("Narrative Detail:", options=["Short (1 paragraph)", "Medium (2-3 paragraphs)", "Detailed (4+ paragraphs)"], index=1)
145
+ image_quality = st.selectbox("Image Detail/Style:", options=["Standard", "High Detail", "Sketch Concept"], index=0)
146
+
147
+ submit_scene_button = st.form_submit_button("🌌 Weave This Scene!", use_container_width=True, type="primary", disabled=st.session_state.processing_scene)
148
+
149
+ if st.button("🎲 Surprise Me!", use_container_width=True, disabled=st.session_state.processing_scene):
150
+ sur_prompt, sur_style, sur_artist = surprise_me_func() # Assuming this is defined as before
151
+ # Need to update the actual input widget values; Streamlit doesn't directly map outputs to inputs like Gradio's Examples
152
+ # This requires a more involved way to update widget states, or just display the suggestion.
153
+ # For simplicity, we'll just show what it would generate. A real app might use st.experimental_rerun or callbacks.
154
+ st.info(f"Surprise Idea: Prompt='{sur_prompt}', Style='{sur_style}', Artist='{sur_artist}'\n(Copy these into the fields above!)")
155
+
156
+
157
+ if st.button("🗑️ New Story", use_container_width=True, disabled=st.session_state.processing_scene):
158
+ st.session_state.story_object = Story()
159
+ st.session_state.current_log = ["Story Cleared. Ready for a new verse!"]
160
+ st.session_state.latest_scene_image = None
161
+ st.session_state.latest_scene_narrative = "## ✨ A New Story Begins ✨\nDescribe your first scene!"
162
+ st.experimental_rerun() # Rerun the script to refresh the UI
163
+
164
+ with st.expander("🔧 AI Services Status", expanded=False):
165
+ text_llm_ok, image_gen_ok = (ai_services_status["gemini_text_ready"] or ai_services_status["hf_text_ready"]), \
166
+ (ai_services_status["dalle_image_ready"] or ai_services_status["hf_image_ready"])
167
+ if not text_llm_ok and not image_gen_ok: st.error("CRITICAL: NO AI SERVICES CONFIGURED.")
168
  else:
169
+ if text_llm_ok: st.success("Text Generation Service(s) Ready.")
170
+ else: st.warning("Text Generation Service(s) NOT Ready.")
171
+ if image_gen_ok: st.success("Image Generation Service(s) Ready.")
172
+ else: st.warning("Image Generation Service(s) NOT Ready.")
 
 
 
173
 
174
 
175
+ # --- Main Display Area ---
176
+ st.markdown("---")
177
+ st.markdown("### 🖼️ **Your Evolving StoryVerse**", unsafe_allow_html=True) # For potential custom class via CSS
178
+
179
+ if st.session_state.processing_scene:
180
+ st.info("🌌 Weaving your scene... Please wait.")
181
+ # Could use st.spinner("Weaving your scene...")
182
+
183
+ # Display Latest Scene
184
+ if st.session_state.latest_scene_image or st.session_state.latest_scene_narrative != "Describe your first scene to begin!":
185
+ st.subheader("🌠 Latest Scene")
186
+ if st.session_state.latest_scene_image:
187
+ st.image(st.session_state.latest_scene_image, use_column_width=True, caption="Latest Generated Image")
188
+ st.markdown(st.session_state.latest_scene_narrative, unsafe_allow_html=True)
189
+ st.markdown("---")
190
+
191
+
192
+ # Display Story Scroll (Gallery)
193
+ if st.session_state.story_object and st.session_state.story_object.scenes:
194
+ st.subheader("📚 Story Scroll")
195
+ # Streamlit doesn't have a direct "Gallery" like Gradio. We display images in columns.
196
+ num_columns = 3
197
+ cols = st.columns(num_columns)
198
+ scenes_for_gallery = st.session_state.story_object.get_all_scenes_for_gallery_display() # Ensure this returns (PIL.Image or None, caption)
199
+
200
+ for i, (img, caption) in enumerate(scenes_for_gallery):
201
+ with cols[i % num_columns]:
202
+ if img:
203
+ st.image(img, caption=caption if caption else f"Scene {i+1}", use_column_width=True)
204
+ elif caption: # If no image but caption (e.g. error)
205
+ st.caption(caption) # Display caption as text
206
+ else:
207
+ st.caption("Your story scroll is empty. Weave your first scene!")
208
+
209
+
210
+ # Interaction Log
211
+ with st.expander("⚙️ Interaction Log", expanded=False):
212
+ st.markdown("\n\n".join(st.session_state.current_log), unsafe_allow_html=True)
213
+
214
+
215
+ # --- Logic for Form Submission ---
216
+ if submit_scene_button and scene_prompt_text.strip(): # Check if form submitted and prompt is not empty
217
+ st.session_state.processing_scene = True
218
+ st.session_state.current_log.append(f"**🚀 New Scene Request - {time.strftime('%H:%M:%S')}**")
219
+ st.experimental_rerun() # Rerun to show "processing" state and disable button
220
+
221
+ # ---- This is where the main generation logic happens ----
222
+ # It's similar to add_scene_to_story_orchestrator but updates session_state
223
 
224
+ # 1. Generate Narrative
225
+ current_narrative = f"Narrative Error: Init failed for '{scene_prompt_text[:30]}...'"
226
+ text_model_info = TEXT_MODELS.get(text_model_key)
227
+ if text_model_info and text_model_info["type"] != "none":
228
+ system_p = get_narrative_system_prompt("default")
229
+ prev_narrative = st.session_state.story_object.get_last_scene_narrative()
230
+ user_p = format_narrative_user_prompt(scene_prompt_text, prev_narrative)
231
+ st.session_state.current_log.append(f" Narrative: Using {text_model_key} ({text_model_info['id']}).")
232
+ text_response = None
233
+ if text_model_info["type"] == "gemini" and ai_services_status["gemini_text_ready"]: text_response = generate_text_gemini(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if narrative_length.startswith("Detailed") else 400)
234
+ elif text_model_info["type"] == "hf_text" and ai_services_status["hf_text_ready"]: text_response = generate_text_hf(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if narrative_length.startswith("Detailed") else 400)
235
+
236
+ if text_response and text_response.success: current_narrative = basic_text_cleanup(text_response.text); st.session_state.current_log.append(f" Narrative: Success.")
237
+ elif text_response: current_narrative = f"**Narrative Error ({text_model_key}):** {text_response.error}"; st.session_state.current_log.append(f" Narrative: FAILED - {text_response.error}")
238
+ else: st.session_state.current_log.append(f" Narrative: FAILED - No response from {text_model_key}.")
239
+ else: current_narrative = "**Narrative Error:** Text model unavailable."; st.session_state.current_log.append(f" Narrative: FAILED - Model '{text_model_key}' unavailable.")
240
 
241
+ st.session_state.latest_scene_narrative = f"## Scene Idea: {scene_prompt_text}\n\n{current_narrative}"
 
 
 
 
 
 
 
 
242
 
243
+ # 2. Generate Image
244
+ generated_image_pil = None
245
+ image_gen_error = None
246
+ selected_image_provider_actual_type = IMAGE_PROVIDERS.get(image_provider_key)
247
+ image_content_prompt = current_narrative if current_narrative and "Error" not in current_narrative else scene_prompt_text
248
+ quality_kw = "ultra detailed, " if image_quality == "High Detail" else ("concept sketch, " if image_quality == "Sketch Concept" else "")
249
+ full_img_prompt = format_image_generation_prompt(quality_kw + image_content_prompt[:350], image_style_dropdown, artist_style_text)
250
+ st.session_state.current_log.append(f" Image: Attempting with {image_provider_key} (type '{selected_image_provider_actual_type}').")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
 
252
+ if selected_image_provider_actual_type and selected_image_provider_actual_type != "none":
253
+ img_response = None
254
+ if selected_image_provider_actual_type.startswith("dalle_") and ai_services_status["dalle_image_ready"]:
255
+ dalle_model = "dall-e-3" if selected_image_provider_actual_type == "dalle_3" else "dall-e-2"
256
+ img_response = generate_image_dalle(full_img_prompt, model=dalle_model, quality="hd" if image_quality=="High Detail" else "standard")
257
+ elif selected_image_provider_actual_type.startswith("hf_") and ai_services_status["hf_image_ready"]:
258
+ hf_model_id = "stabilityai/stable-diffusion-xl-base-1.0"; iw,ih=768,768
259
+ if selected_image_provider_actual_type == "hf_openjourney": hf_model_id="prompthero/openjourney";iw,ih=512,512
260
+ img_response = generate_image_hf_model(full_img_prompt, model_id=hf_model_id, negative_prompt=negative_prompt_text or COMMON_NEGATIVE_PROMPTS, width=iw, height=ih)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261
 
262
+ if img_response and img_response.success: generated_image_pil = img_response.image; st.session_state.current_log.append(f" Image: Success from {img_response.provider}.")
263
+ elif img_response: image_gen_error = f"**Image Error:** {img_response.error}"; st.session_state.current_log.append(f" Image: FAILED - {img_response.error}")
264
+ else: image_gen_error = "**Image Error:** No response/unknown issue."; st.session_state.current_log.append(f" Image: FAILED - No response object.")
265
+ else: image_gen_error = "**Image Error:** No valid provider."; st.session_state.current_log.append(f" Image: FAILED - No provider configured.")
266
+
267
+ st.session_state.latest_scene_image = generated_image_pil if generated_image_pil else create_placeholder_image("Image Gen Failed", color="#401010")
268
+
269
+ # 3. Add to Story Object
270
+ scene_err = None
271
+ if image_gen_error and "**Narrative Error**" in current_narrative: scene_err = f"{current_narrative}\n{image_gen_error}"
272
+ elif "**Narrative Error**" in current_narrative: scene_err = current_narrative
273
+ elif image_gen_error: scene_err = image_gen_error
 
 
 
 
 
 
 
 
 
 
 
 
274
 
275
+ st.session_state.story_object.add_scene_from_elements(
276
+ user_prompt=scene_prompt_text, narrative_text=current_narrative, image=generated_image_pil,
277
+ image_style_prompt=f"{image_style_dropdown}{f', by {artist_style_text}' if artist_style_text else ''}",
278
+ image_provider=image_provider_key, error_message=scene_err
 
 
 
 
 
279
  )
280
+ st.session_state.current_log.append(f" Scene {st.session_state.story_object.current_scene_number} processed.")
281
+ st.session_state.processing_scene = False
282
+ st.experimental_rerun() # Rerun to update the main display with new scene and re-enable button
283
+
284
+ elif submit_scene_button and not scene_prompt_text.strip(): # If form submitted but prompt is empty
285
+ st.warning("Please enter a scene vision/prompt!")