File size: 30,487 Bytes
ce507ec
0751433
c984bb4
9826cfc
886d0b0
 
 
 
fe7d37a
 
ce507ec
9c9e46a
d97ac9f
ce507ec
 
 
a1354f4
fe7d37a
ce507ec
9c9e46a
fe7d37a
 
1c5c923
7ce8888
8e26c00
7ce8888
fe7d37a
 
ce507ec
1c5c923
886d0b0
8e26c00
 
7ce8888
 
 
 
 
 
 
 
 
 
 
 
8e26c00
886d0b0
 
8e26c00
886d0b0
8e26c00
 
886d0b0
 
7ce8888
 
 
 
 
886d0b0
8e26c00
886d0b0
8e26c00
 
886d0b0
 
 
 
7ce8888
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
886d0b0
 
 
8e26c00
886d0b0
 
 
 
 
 
 
 
8e26c00
fa7abf4
d97ac9f
886d0b0
fa7abf4
ce507ec
c984bb4
3c54be4
db01582
8e26c00
 
 
 
7ce8888
 
 
 
 
 
 
8e26c00
 
 
 
 
7ce8888
d7896b6
faf8e43
 
 
a1354f4
db01582
 
fa7abf4
8e26c00
 
fa7abf4
8e26c00
fa7abf4
8e26c00
fe7d37a
 
8e26c00
 
 
 
fe7d37a
 
 
faf8e43
 
bc9ca80
8e26c00
 
70efebe
bc9ca80
8e26c00
 
9c9e46a
8e26c00
fa7abf4
fe7d37a
 
8e26c00
 
faf8e43
fe7d37a
8e26c00
fe7d37a
8e26c00
fe7d37a
886d0b0
9c9e46a
8e26c00
9c9e46a
8e26c00
 
 
 
 
886d0b0
9c9e46a
7ce8888
886d0b0
7ce8888
886d0b0
bc9ca80
8e26c00
886d0b0
faf8e43
9c9e46a
 
8e26c00
9c9e46a
886d0b0
8e26c00
9c9e46a
fa7abf4
d7896b6
a1354f4
d7896b6
fa7abf4
8e26c00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d7896b6
7ce8888
 
 
 
 
 
 
 
 
 
 
d7896b6
70efebe
8e26c00
 
70efebe
8e26c00
fa7abf4
 
8e26c00
 
 
 
 
 
 
 
 
 
 
 
 
7ce8888
8e26c00
 
 
 
 
 
 
7ce8888
 
 
 
 
8e26c00
 
7ce8888
 
 
 
8e26c00
 
7ce8888
8e26c00
70efebe
8e26c00
7ce8888
8e26c00
9826cfc
a1354f4
70efebe
7ce8888
a1354f4
7ce8888
 
 
fa7abf4
d97ac9f
7ce8888
d97ac9f
8e26c00
9c9e46a
 
d97ac9f
1c5c923
d97ac9f
7ce8888
d97ac9f
8e26c00
 
d7896b6
7ce8888
 
 
 
 
 
fa7abf4
9c9e46a
fa7abf4
8e26c00
 
 
 
 
9c9e46a
8e26c00
 
d97ac9f
7ce8888
 
 
 
 
 
 
 
 
 
 
 
8e26c00
 
 
 
33efea0
8e26c00
 
 
 
 
 
6aa264c
7ce8888
 
 
 
 
 
 
 
 
 
 
 
 
db01582
0751433
7ce8888
9c9e46a
886d0b0
7ce8888
70efebe
7dbc041
fa7abf4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
# storyverse_weaver/app.py
import gradio as gr
import os
import time
import json 
from PIL import Image, ImageDraw, ImageFont 
import random
import traceback 

# --- Core Logic Imports ---
from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf
from core.image_services import initialize_image_llms, is_dalle_ready, is_hf_image_api_ready, generate_image_dalle, generate_image_hf_model, ImageGenResponse 
from core.story_engine import Story, Scene
from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
from core.utils import basic_text_cleanup

# --- Initialize Services ---
initialize_text_llms()
initialize_image_llms() 

# --- Get API Readiness Status ---
GEMINI_TEXT_IS_READY = is_gemini_text_ready()
HF_TEXT_IS_READY = is_hf_text_ready()
DALLE_IMAGE_IS_READY = is_dalle_ready() 
HF_IMAGE_IS_READY = is_hf_image_api_ready()

# --- Application Configuration (Models, Defaults) ---
TEXT_MODELS = {}
UI_DEFAULT_TEXT_MODEL_KEY = None
if GEMINI_TEXT_IS_READY:
    TEXT_MODELS["✨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
    TEXT_MODELS["Legacy Gemini 1.0 Pro (Narrate)"] = {"id": "gemini-1.0-pro-latest", "type": "gemini"}
if HF_TEXT_IS_READY: # This will be used if Gemini is not ready
    TEXT_MODELS["Mistral 7B (Narrate via HF)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
    TEXT_MODELS["Gemma 2B (Narrate via HF)"] = {"id": "google/gemma-2b-it", "type": "hf_text"}

if TEXT_MODELS: # Determine default text model
    if GEMINI_TEXT_IS_READY and "✨ Gemini 1.5 Flash (Narrate)" in TEXT_MODELS: 
        UI_DEFAULT_TEXT_MODEL_KEY = "✨ Gemini 1.5 Flash (Narrate)"
    elif HF_TEXT_IS_READY and "Mistral 7B (Narrate via HF)" in TEXT_MODELS: 
        UI_DEFAULT_TEXT_MODEL_KEY = "Mistral 7B (Narrate via HF)"
    elif TEXT_MODELS: # Fallback if preferred defaults are somehow not in the populated list
        UI_DEFAULT_TEXT_MODEL_KEY = list(TEXT_MODELS.keys())[0]
else: # No text models configured at all
    TEXT_MODELS["No Text Models Configured"] = {"id": "dummy_text_error", "type": "none"}
    UI_DEFAULT_TEXT_MODEL_KEY = "No Text Models Configured"

IMAGE_PROVIDERS = {} 
UI_DEFAULT_IMAGE_PROVIDER_KEY = None
if DALLE_IMAGE_IS_READY: 
    IMAGE_PROVIDERS["πŸ–ΌοΈ OpenAI DALL-E 3"] = "dalle_3" 
    IMAGE_PROVIDERS["πŸ–ΌοΈ OpenAI DALL-E 2 (Legacy)"] = "dalle_2" 
    UI_DEFAULT_IMAGE_PROVIDER_KEY = "πŸ–ΌοΈ OpenAI DALL-E 3"
elif HF_IMAGE_IS_READY: # Fallback to HF if DALL-E not ready
    IMAGE_PROVIDERS["🎑 HF - SDXL Base"] = "hf_sdxl_base" 
    IMAGE_PROVIDERS["🎠 HF - OpenJourney"] = "hf_openjourney"
    IMAGE_PROVIDERS["🌌 HF - SD v1.5"] = "hf_sd_1_5" 
    UI_DEFAULT_IMAGE_PROVIDER_KEY = "🎑 HF - SDXL Base"
    
if not IMAGE_PROVIDERS: 
    IMAGE_PROVIDERS["No Image Providers Configured"] = "none"
    UI_DEFAULT_IMAGE_PROVIDER_KEY = "No Image Providers Configured"
elif not UI_DEFAULT_IMAGE_PROVIDER_KEY and IMAGE_PROVIDERS : 
    UI_DEFAULT_IMAGE_PROVIDER_KEY = list(IMAGE_PROVIDERS.keys())[0]


# --- Gradio UI Theme and CSS ---
omega_theme = gr.themes.Base(
    font=[gr.themes.GoogleFont("Lexend Deca"), "ui-sans-serif", "system-ui", "sans-serif"],
    primary_hue=gr.themes.colors.purple, secondary_hue=gr.themes.colors.pink, neutral_hue=gr.themes.colors.slate
).set(
    body_background_fill="#0F0F1A", block_background_fill="#1A1A2E", block_border_width="1px",
    block_border_color="#2A2A4A", block_label_background_fill="#2A2A4A", input_background_fill="#2A2A4A",
    input_border_color="#4A4A6A", button_primary_background_fill="linear-gradient(135deg, #7F00FF 0%, #E100FF 100%)",
    button_primary_text_color="white", button_secondary_background_fill="#4A4A6A",
    button_secondary_text_color="#E0E0FF", slider_color="#A020F0"
)
omega_css = """
body, .gradio-container { background-color: #0F0F1A !important; color: #D0D0E0 !important; }
.gradio-container { max-width: 1400px !important; margin: auto !important; border-radius: 20px; box-shadow: 0 10px 30px rgba(0,0,0,0.2); padding: 25px !important; border: 1px solid #2A2A4A;}
.gr-panel, .gr-box, .gr-accordion { background-color: #1A1A2E !important; border: 1px solid #2A2A4A !important; border-radius: 12px !important; box-shadow: 0 4px 15px rgba(0,0,0,0.1);}
.gr-markdown h1 { font-size: 2.8em !important; text-align: center; color: transparent; background: linear-gradient(135deg, #A020F0 0%, #E040FB 100%); -webkit-background-clip: text; background-clip: text; margin-bottom: 5px !important; letter-spacing: -1px;}
.gr-markdown h3 { color: #C080F0 !important; text-align: center; font-weight: 400; margin-bottom: 25px !important;}
.input-section-header { font-size: 1.6em; font-weight: 600; color: #D0D0FF; margin-top: 15px; margin-bottom: 8px; border-bottom: 2px solid #7F00FF; padding-bottom: 5px;}
.output-section-header { font-size: 1.8em; font-weight: 600; color: #D0D0FF; margin-top: 15px; margin-bottom: 12px;}
.gr-input input, .gr-input textarea, .gr-dropdown select, .gr-textbox textarea { background-color: #2A2A4A !important; color: #E0E0FF !important; border: 1px solid #4A4A6A !important; border-radius: 8px !important; padding: 10px !important;}
.gr-button { border-radius: 8px !important; font-weight: 500 !important; transition: all 0.2s ease-in-out !important; display: flex; align-items: center; justify-content: center;}
.gr-button span { white-space: nowrap !important; overflow: hidden; text-overflow: ellipsis; display: inline-block; max-width: 90%; line-height: normal !important; }
.gr-button svg { width: 1.1em !important; height: 1.1em !important; margin-right: 4px !important; flex-shrink: 0;}
.gr-button-primary { padding: 10px 15px !important; } /* Adjusted padding for potentially shorter text */
.gr-button-primary:hover { transform: scale(1.03) translateY(-1px) !important; box-shadow: 0 8px 16px rgba(127,0,255,0.3) !important; }
.panel_image { border-radius: 12px !important; overflow: hidden; box-shadow: 0 6px 15px rgba(0,0,0,0.25) !important; background-color: #23233A;}
.panel_image img { max-height: 600px !important; }
.gallery_output { background-color: transparent !important; border: none !important; }
.gallery_output .thumbnail-item { border-radius: 8px !important; box-shadow: 0 3px 8px rgba(0,0,0,0.2) !important; margin: 6px !important; transition: transform 0.2s ease; height: 180px !important; width: 180px !important;}
.gallery_output .thumbnail-item:hover { transform: scale(1.05); }
.status_text { font-weight: 500; padding: 12px 18px; text-align: center; border-radius: 8px; margin-top:12px; border: 1px solid transparent; font-size: 1.05em;}
.error_text { background-color: #401010 !important; color: #FFB0B0 !important; border-color: #802020 !important; }
.success_text { background-color: #104010 !important; color: #B0FFB0 !important; border-color: #208020 !important;}
.processing_text { background-color: #102040 !important; color: #B0D0FF !important; border-color: #204080 !important;}
.important-note { background-color: rgba(127,0,255,0.1); border-left: 5px solid #7F00FF; padding: 15px; margin-bottom:20px; color: #E0E0FF; border-radius: 6px;}
.gr-tabitem { background-color: #1A1A2E !important; border-radius: 0 0 12px 12px !important; padding: 15px !important;}
.gr-tab-button.selected { background-color: #2A2A4A !important; color: white !important; border-bottom: 3px solid #A020F0 !important; border-radius: 8px 8px 0 0 !important; font-weight: 600 !important;}
.gr-tab-button { color: #A0A0C0 !important; border-radius: 8px 8px 0 0 !important;}
.gr-accordion > .gr-block { border-top: 1px solid #2A2A4A !important; }
.gr-markdown code { background-color: #2A2A4A !important; color: #C0C0E0 !important; padding: 0.2em 0.5em; border-radius: 4px; }
.gr-markdown pre { background-color: #23233A !important; padding: 1em !important; border-radius: 6px !important; border: 1px solid #2A2A4A !important;}
.gr-markdown pre > code { padding: 0 !important; background-color: transparent !important; }
#surprise_button { background: linear-gradient(135deg, #ff7e5f 0%, #feb47b 100%) !important; font-weight:600 !important;}
#surprise_button:hover { transform: scale(1.03) translateY(-1px) !important; box-shadow: 0 8px 16px rgba(255,126,95,0.3) !important; }
"""

# --- Helper: Placeholder Image Creation ---
def create_placeholder_image(text="Processing...", size=(512, 512), color="#23233A", text_color="#E0E0FF"):
    img = Image.new('RGB', size, color=color); draw = ImageDraw.Draw(img)
    try: font_path = "arial.ttf" if os.path.exists("arial.ttf") else None 
    except: font_path = None 
    try: font = ImageFont.truetype(font_path, 40) if font_path else ImageFont.load_default()
    except IOError: font = ImageFont.load_default()
    if hasattr(draw, 'textbbox'): bbox = draw.textbbox((0,0), text, font=font); tw, th = bbox[2]-bbox[0], bbox[3]-bbox[1]
    else: tw, th = draw.textsize(text, font=font) 
    draw.text(((size[0]-tw)/2, (size[1]-th)/2), text, font=font, fill=text_color); return img

# --- StoryVerse Weaver Orchestrator ---
def add_scene_to_story_orchestrator(
    current_story_obj: Story, scene_prompt_text: str, image_style_dropdown: str, artist_style_text: str,
    negative_prompt_text: str, text_model_key: str, image_provider_key: str,
    narrative_length: str, image_quality: str,
    progress=gr.Progress(track_tqdm=True)
):
    start_time = time.time() 
    if not current_story_obj: current_story_obj = Story() 
    
    log_accumulator = [f"**πŸš€ Scene {current_story_obj.current_scene_number + 1} - {time.strftime('%H:%M:%S')}**"]
    
    ret_story_state = current_story_obj
    # Initialize gallery with placeholders or current items to avoid errors if generation fails early
    initial_gallery_items = current_story_obj.get_all_scenes_for_gallery_display()
    if not initial_gallery_items: # Handle case where story is new and has no scenes
        placeholder_img = create_placeholder_image("Waiting for first scene...", size=(180,180), color="#1A1A2E")
        initial_gallery_items = [(placeholder_img, "Your StoryVerse awaits!")]
    ret_gallery = initial_gallery_items

    ret_latest_image = None
    ret_latest_narrative_md_obj = gr.Markdown(value="## Processing...\nNarrative being woven...")
    ret_status_bar_html_obj = gr.HTML(value="<p class='processing_text status_text'>Processing...</p>")
    # ret_log_md will be built up

    # Initial yield for UI updates (buttons disabled by .then() chain)
    yield {
        output_status_bar: gr.HTML(value=f"<p class='processing_text status_text'>🌌 Weaving Scene {current_story_obj.current_scene_number + 1}...</p>"),
        output_latest_scene_image: gr.Image(value=create_placeholder_image("🎨 Conjuring visuals...")),
        output_latest_scene_narrative: gr.Markdown(value=" Musing narrative..."),
        output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator))
    }

    try:
        if not scene_prompt_text.strip():
            raise ValueError("Scene prompt cannot be empty!")

        # --- 1. Generate Narrative Text ---
        progress(0.1, desc="✍️ Crafting narrative...")
        narrative_text_generated = f"Narrative Error: Init failed."
        text_model_info = TEXT_MODELS.get(text_model_key)
        if text_model_info and text_model_info["type"] != "none":
            system_p = get_narrative_system_prompt("default")
            prev_narrative = current_story_obj.get_last_scene_narrative()
            user_p = format_narrative_user_prompt(scene_prompt_text, prev_narrative)
            log_accumulator.append(f"  Narrative: Using {text_model_key} ({text_model_info['id']}). Length: {narrative_length}")
            text_response = None
            if text_model_info["type"] == "gemini": text_response = generate_text_gemini(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if narrative_length.startswith("Detailed") else 400)
            elif text_model_info["type"] == "hf_text": text_response = generate_text_hf(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if narrative_length.startswith("Detailed") else 400)
            if text_response and text_response.success: narrative_text_generated = basic_text_cleanup(text_response.text); log_accumulator.append(f"  Narrative: Success.")
            elif text_response: narrative_text_generated = f"**Narrative Error ({text_model_key}):** {text_response.error}"; log_accumulator.append(f"  Narrative: FAILED - {text_response.error}")
            else: log_accumulator.append(f"  Narrative: FAILED - No response from {text_model_key}.")
        else: narrative_text_generated = "**Narrative Error:** Selected text model not available or misconfigured."; log_accumulator.append(f"  Narrative: FAILED - Model '{text_model_key}' unavailable.")
        
        ret_latest_narrative_str_content = f"## Scene Idea: {scene_prompt_text}\n\n{narrative_text_generated}"
        ret_latest_narrative_md_obj = gr.Markdown(value=ret_latest_narrative_str_content)
        yield { output_latest_scene_narrative: ret_latest_narrative_md_obj,
                output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }

        # --- 2. Generate Image ---
        progress(0.5, desc="🎨 Conjuring visuals...")
        image_generated_pil = None
        image_generation_error_message = None
        selected_image_provider_key_from_ui = image_provider_key
        selected_image_provider_type = IMAGE_PROVIDERS.get(selected_image_provider_key_from_ui)

        image_content_prompt_for_gen = narrative_text_generated if narrative_text_generated and "Error" not in narrative_text_generated else scene_prompt_text
        quality_keyword = "ultra detailed, intricate, masterpiece, " if image_quality == "High Detail" else ("concept sketch, line art, " if image_quality == "Sketch Concept" else "")
        full_image_prompt = format_image_generation_prompt(quality_keyword + image_content_prompt_for_gen[:350], image_style_dropdown, artist_style_text)
        log_accumulator.append(f"  Image: Attempting with provider key '{selected_image_provider_key_from_ui}' (maps to type '{selected_image_provider_type}'). Style: {image_style_dropdown}.")

        if selected_image_provider_type and selected_image_provider_type != "none":
            image_response = None
            if selected_image_provider_type.startswith("dalle_"):
                if DALLE_IMAGE_IS_READY:
                    dalle_model_version = "dall-e-3" if selected_image_provider_type == "dalle_3" else "dall-e-2"
                    dalle_size = "1024x1024" 
                    dalle_quality_param = "hd" if image_quality=="High Detail" and dalle_model_version == "dall-e-3" else "standard"
                    image_response = generate_image_dalle(full_image_prompt, model=dalle_model_version, size=dalle_size, quality=dalle_quality_param)
                else: image_generation_error_message = "**Image Error:** DALL-E selected but API not ready."
            elif selected_image_provider_type.startswith("hf_"):
                if HF_IMAGE_IS_READY:
                    hf_model_id_to_call = "stabilityai/stable-diffusion-xl-base-1.0"; img_width, img_height = 768, 768 # Defaults
                    if selected_image_provider_type == "hf_openjourney": hf_model_id_to_call = "prompthero/openjourney"; img_width,img_height = 512,512
                    elif selected_image_provider_type == "hf_sdxl_base": hf_model_id_to_call = "stabilityai/stable-diffusion-xl-base-1.0"; # Redundant, but explicit
                    elif selected_image_provider_type == "hf_sd_1_5": hf_model_id_to_call = "runwayml/stable-diffusion-v1-5"; img_width,img_height = 512,512
                    image_response = generate_image_hf_model(full_image_prompt, model_id=hf_model_id_to_call, negative_prompt=negative_prompt_text or COMMON_NEGATIVE_PROMPTS, width=img_width, height=img_height)
                else: image_generation_error_message = "**Image Error:** HF Image Model selected but API not ready."
            else: image_generation_error_message = f"**Image Error:** Provider type '{selected_image_provider_type}' not handled."

            if image_response and image_response.success: image_generated_pil = image_response.image; log_accumulator.append(f"  Image: Success from {image_response.provider} (Model: {image_response.model_id_used}).")
            elif image_response: image_generation_error_message = f"**Image Error ({image_response.provider} - {image_response.model_id_used}):** {image_response.error}"; log_accumulator.append(f"  Image: FAILED - {image_response.error}")
            elif not image_generation_error_message: image_generation_error_message = f"**Image Error:** No response with {image_provider_key}."
        
        if not image_generated_pil and not image_generation_error_message:
            image_generation_error_message = "**Image Error:** No valid image provider configured or selected."
            log_accumulator.append(f"  Image: FAILED - {image_generation_error_message}")

        ret_latest_image = image_generated_pil if image_generated_pil else create_placeholder_image("Image Gen Failed", color="#401010")
        yield { output_latest_scene_image: gr.Image(value=ret_latest_image),
                output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }

        # --- 3. Add Scene to Story Object ---
        final_scene_error = None
        if image_generation_error_message and "**Narrative Error**" in narrative_text_generated : final_scene_error = f"{narrative_text_generated}\n{image_generation_error_message}"
        elif "**Narrative Error**" in narrative_text_generated: final_scene_error = narrative_text_generated
        elif image_generation_error_message: final_scene_error = image_generation_error_message
        
        current_story_obj.add_scene_from_elements(
            user_prompt=scene_prompt_text,
            narrative_text=narrative_text_generated if "**Narrative Error**" not in narrative_text_generated else "(Narrative gen failed)",
            image=image_generated_pil,
            image_style_prompt=f"{image_style_dropdown}{f', by {artist_style_text}' if artist_style_text and artist_style_text.strip() else ''}",
            image_provider=selected_image_provider_key_from_ui,
            error_message=final_scene_error
        )
        ret_story_state = current_story_obj 
        log_accumulator.append(f"  Scene {current_story_obj.current_scene_number} processed and added.")
        
        # --- 4. Prepare Final Values for Return Tuple ---
        ret_gallery = current_story_obj.get_all_scenes_for_gallery_display()
        # Ensure gallery items are PIL Images or None for errored/missing images
        processed_gallery_tuples = []
        for img_item, cap_text in ret_gallery:
            if isinstance(img_item, Image.Image):
                processed_gallery_tuples.append((img_item, cap_text))
            else: # Assume it's an error or no image, create placeholder for gallery
                gallery_placeholder = create_placeholder_image(f"S{cap_text.split(':')[0][1:]}\nError/NoImg", size=(180,180), color="#2A2A4A")
                processed_gallery_tuples.append((gallery_placeholder, cap_text))
        ret_gallery = processed_gallery_tuples


        _ , latest_narr_for_display_final_str_temp = current_story_obj.get_latest_scene_details_for_display()
        ret_latest_narrative_md_obj = gr.Markdown(value=latest_narr_for_display_final_str_temp)
        
        status_html_str_temp = f"<p class='error_text status_text'>Scene {current_story_obj.current_scene_number} added with errors.</p>" if final_scene_error else f"<p class='success_text status_text'>🌌 Scene {current_story_obj.current_scene_number} woven!</p>"
        ret_status_bar_html_obj = gr.HTML(value=status_html_str_temp)
        
        progress(1.0, desc="Scene Complete!")

    except ValueError as ve:
        log_accumulator.append(f"\n**INPUT/CONFIG ERROR:** {ve}")
        ret_status_bar_html_obj = gr.HTML(value=f"<p class='error_text status_text'>❌ CONFIGURATION ERROR: {ve}</p>")
        ret_latest_narrative_md_obj = gr.Markdown(value=f"## Error\n{ve}")
    except Exception as e:
        log_accumulator.append(f"\n**UNEXPECTED RUNTIME ERROR:** {type(e).__name__} - {e}\n{traceback.format_exc()}")
        ret_status_bar_html_obj = gr.HTML(value=f"<p class='error_text status_text'>❌ UNEXPECTED ERROR: {type(e).__name__}. Check logs.</p>")
        ret_latest_narrative_md_obj = gr.Markdown(value=f"## Unexpected Error\n{type(e).__name__}: {e}\nSee log for details.")
    
    current_total_time = time.time() - start_time
    log_accumulator.append(f"  Cycle ended at {time.strftime('%H:%M:%S')}. Total time: {current_total_time:.2f}s")
    ret_log_md = gr.Markdown(value="\n".join(log_accumulator))
    
    # This is the FINAL return. It must be a tuple matching the `outputs` list of engage_button.click()
    return (
        ret_story_state, ret_gallery, ret_latest_image, 
        ret_latest_narrative_md_obj, ret_status_bar_html_obj, ret_log_md
    )

def clear_story_state_ui_wrapper():
    new_story = Story(); ph_img = create_placeholder_image("Blank canvas...", color="#1A1A2E", text_color="#A0A0C0")
    # Ensure gallery output for clear is also a list of (image, caption)
    cleared_gallery_display = [(ph_img, "Your StoryVerse is new and untold...")]
    initial_narrative = "## ✨ New Story ✨\nDescribe your first scene!"
    status_msg = "<p class='processing_text status_text'>πŸ“œ Story Cleared.</p>"
    return (new_story, cleared_gallery_display, None, gr.Markdown(initial_narrative), gr.HTML(status_msg), "Log Cleared.", "")

def surprise_me_func():
    print("DEBUG: surprise_me_func called") # For checking button functionality
    themes = ["Cosmic Horror", "Solarpunk Utopia", "Mythic Fantasy", "Noir Detective"]; actions = ["unearths an artifact", "negotiates"]; settings = ["on a rogue planet", "in a city in a tree"]; prompt = f"A protagonist {random.choice(actions)} {random.choice(settings)}. Theme: {random.choice(themes)}."; style = random.choice(list(STYLE_PRESETS.keys())); artist = random.choice(["H.R. Giger", "Moebius", ""]*2)
    print(f"DEBUG: surprise_me_func returning: {prompt}, {style}, {artist}")
    return prompt, style, artist

def disable_buttons_for_processing():
    print("DEBUG: Disabling buttons")
    return gr.Button(interactive=False), gr.Button(interactive=False)

def enable_buttons_after_processing():
    print("DEBUG: Enabling buttons")
    return gr.Button(interactive=True), gr.Button(interactive=True)

# --- Gradio UI Definition ---
with gr.Blocks(theme=omega_theme, css=omega_css, title="✨ StoryVerse Omega ✨") as story_weaver_demo:
    # Define Python variables for UI components
    story_state_output = gr.State(Story()) 
    
    with gr.Row(equal_height=False, variant="panel"): # Main layout row
        # Input Column
        with gr.Column(scale=7, min_width=450): 
            gr.Markdown("### πŸ’‘ **Craft Your Scene**", elem_classes="input-section-header")
            with gr.Group(): scene_prompt_input = gr.Textbox(lines=7, label="Scene Vision (Description, Dialogue, Action):", placeholder="e.g., Amidst swirling cosmic dust...")
            with gr.Row(elem_classes=["compact-row"]):
                with gr.Column(scale=2): image_style_input = gr.Dropdown(choices=["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys())), value="Default (Cinematic Realism)", label="Visual Style", allow_custom_value=True)
                with gr.Column(scale=2): artist_style_input = gr.Textbox(label="Artistic Inspiration (Optional):", placeholder="e.g., Moebius...")
            negative_prompt_input = gr.Textbox(lines=2, label="Exclude from Image:", value=COMMON_NEGATIVE_PROMPTS)
            with gr.Accordion("βš™οΈ Advanced AI Configuration", open=False):
                 with gr.Group():
                    text_model_dropdown = gr.Dropdown(choices=list(TEXT_MODELS.keys()), value=UI_DEFAULT_TEXT_MODEL_KEY, label="Narrative AI Engine")
                    image_provider_dropdown = gr.Dropdown(choices=list(IMAGE_PROVIDERS.keys()), value=UI_DEFAULT_IMAGE_PROVIDER_KEY, label="Visual AI Engine")
                    with gr.Row():
                        narrative_length_dropdown = gr.Dropdown(["Short (1 paragraph)", "Medium (2-3 paragraphs)", "Detailed (4+ paragraphs)"], value="Medium (2-3 paragraphs)", label="Narrative Detail")
                        image_quality_dropdown = gr.Dropdown(["Standard", "High Detail", "Sketch Concept"], value="Standard", label="Image Detail/Style")
            with gr.Row(elem_classes=["compact-row"], equal_height=True):
                engage_button = gr.Button("🌌 Weave!", variant="primary", scale=3, icon="✨") # Shorter text
                surprise_button = gr.Button("🎲 Surprise!", variant="secondary", scale=1, icon="🎁")# Shorter text
                clear_story_button = gr.Button("πŸ—‘οΈ New", variant="stop", scale=1, icon="♻️") # Shorter text
            output_status_bar = gr.HTML(value="<p class='processing_text status_text'>Ready to weave your first masterpiece!</p>")
        
        # Output Column
        with gr.Column(scale=10, min_width=700): 
            gr.Markdown("### πŸ–ΌοΈ **Your StoryVerse**", elem_classes="output-section-header")
            with gr.Tabs():
                with gr.TabItem("🌠 Latest Scene"): 
                    output_latest_scene_image = gr.Image(label="Latest Image", type="pil", interactive=False, height=512, show_label=False, show_download_button=True, elem_classes=["panel_image"])
                    output_latest_scene_narrative = gr.Markdown() 
                with gr.TabItem("πŸ“š Story Scroll"): 
                    output_gallery = gr.Gallery(label="Story Scroll", show_label=False, columns=4, object_fit="cover", height=700, preview=True, allow_preview=True, elem_classes=["gallery_output"])
                with gr.TabItem("βš™οΈ Log"): 
                    with gr.Accordion("Interaction Log", open=False): 
                        output_interaction_log_markdown = gr.Markdown("Log...")

    # API Status (defined after main layout to ensure it's below everything)
    with gr.Accordion("πŸ”§ AI Services Status & Info", open=False, elem_id="api_status_accordion"):
        status_text_list = []; text_llm_ok = (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY); image_gen_ok = (DALLE_IMAGE_IS_READY or HF_IMAGE_IS_READY)
        if not text_llm_ok and not image_gen_ok: status_text_list.append("<p style='color:#FCA5A5;font-weight:bold;'>⚠️ CRITICAL: NO AI SERVICES CONFIGURED.</p>")
        else:
            if text_llm_ok: status_text_list.append("<p style='color:#A7F3D0;'>βœ… Text Generation Ready.</p>")
            else: status_text_list.append("<p style='color:#FCD34D;'>⚠️ Text Generation NOT Ready.</p>")
            if image_gen_ok: status_text_list.append("<p style='color:#A7F3D0;'>βœ… Image Generation Ready.</p>")
            else: status_text_list.append("<p style='color:#FCD34D;'>⚠️ Image Generation NOT Ready.</p>")
        gr.HTML("".join(status_text_list))

    # Examples (defined after main layout)
    gr.Examples(
        examples=[
            ["A lone, weary traveler on a mechanical steed crosses a vast, crimson desert under twin suns. Dust devils dance in the distance.", "Sci-Fi Western", "Moebius", "greenery, water, modern city"],
            ["Deep within an ancient, bioluminescent forest, a hidden civilization of sentient fungi perform a mystical ritual around a pulsating crystal.", "Psychedelic Fantasy", "Alex Grey", "technology, buildings, roads"],
            ["A child sits on a crescent moon, fishing for stars in a swirling nebula. A friendly space whale swims nearby.", "Whimsical Cosmic", "James Jean", "realistic, dark, scary"],
            ["A grand, baroque library where the books fly freely and whisper forgotten lore to those who listen closely.", "Magical Realism", "Remedios Varo", "minimalist, simple, technology"]
        ],
        inputs=[scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input], 
        label="🌌 Example Universes to Weave 🌌",
    )
    gr.HTML("<div style='text-align:center; margin-top:30px; padding-bottom:20px;'><p style='font-size:0.9em; color:#8080A0;'>✨ StoryVerse Omegaβ„’ - Weaving Worlds with Words and Pixels ✨</p></div>")

    # Event Handlers
    engage_event_actions = engage_button.click(fn=disable_buttons_for_processing, outputs=[engage_button, surprise_button], queue=False)\
        .then(fn=add_scene_to_story_orchestrator, 
              inputs=[story_state_output, scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input, text_model_dropdown, image_provider_dropdown, narrative_length_dropdown, image_quality_dropdown], 
              outputs=[story_state_output, output_gallery, output_latest_scene_image, output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown])\
        .then(fn=enable_buttons_after_processing, outputs=[engage_button, surprise_button], queue=False)
    
    clear_story_button.click(fn=clear_story_state_ui_wrapper, 
                             outputs=[story_state_output, output_gallery, output_latest_scene_image, output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown, scene_prompt_input])
    
    surprise_button.click(fn=surprise_me_func, 
                            outputs=[scene_prompt_input, image_style_input, artist_style_input])
    
# --- Entry Point ---
if __name__ == "__main__":
    print("="*80); print("✨ StoryVerse Omega (Full App with Fixes) Launching... ✨")
    print(f"  Gemini Text Ready: {GEMINI_TEXT_IS_READY}"); print(f"  HF Text Ready: {HF_TEXT_IS_READY}")
    print(f"  DALL-E Image Ready: {DALLE_IMAGE_IS_READY}"); print(f"  HF Image API Ready: {HF_IMAGE_IS_READY}")
    if not (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY) or not (DALLE_IMAGE_IS_READY or HF_IMAGE_IS_READY): print("  πŸ”΄ WARNING: Not all services configured.")
    print(f"  Default Text Model: {UI_DEFAULT_TEXT_MODEL_KEY}"); print(f"  Default Image Provider: {UI_DEFAULT_IMAGE_PROVIDER_KEY}")
    print("="*80)
    story_weaver_demo.launch(debug=True, server_name="0.0.0.0", share=False)