mgbam commited on
Commit
4cb150c
Β·
verified Β·
1 Parent(s): 8a761fa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +279 -190
app.py CHANGED
@@ -1,30 +1,43 @@
1
- # storyverse_weaver_streamlit/app_st.py (example name)
2
- import streamlit as st
3
- from PIL import Image, ImageDraw, ImageFont
4
  import os
5
  import time
6
  import random
 
7
 
8
- # --- Assuming your core logic is in a sibling 'core' directory ---
9
- # You might need to adjust sys.path if running locally vs. deployed
 
 
 
 
 
 
 
 
 
 
 
10
  # import sys
11
- # sys.path.append(os.path.join(os.path.dirname(__file__), '..')) # If core is one level up
 
 
12
 
13
- from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf
 
14
  from core.image_services import initialize_image_llms, is_dalle_ready, is_hf_image_api_ready, generate_image_dalle, generate_image_hf_model, ImageGenResponse
15
- from core.story_engine import Story, Scene # Your existing Story and Scene classes
16
  from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
17
  from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
18
  from core.utils import basic_text_cleanup
19
 
20
- # --- Initialize Services ONCE ---
21
- # Use Streamlit's caching for resource-heavy initializations if they don't depend on session state
22
- @st.cache_resource # Caches the result across sessions/reruns if inputs don't change
23
- def load_ai_services():
24
  print("--- Initializing AI Services (Streamlit Cache Resource) ---")
25
  initialize_text_llms()
26
  initialize_image_llms()
27
- # Return status flags to be stored in session_state or used directly
28
  return {
29
  "gemini_text_ready": is_gemini_text_ready(),
30
  "hf_text_ready": is_hf_text_ready(),
@@ -32,29 +45,117 @@ def load_ai_services():
32
  "hf_image_ready": is_hf_image_api_ready()
33
  }
34
 
35
- ai_services_status = load_ai_services()
36
 
37
  # --- Application Configuration (Models, Defaults) ---
38
- # (Similar logic to your Gradio app.py for populating TEXT_MODELS, IMAGE_PROVIDERS etc.)
39
  TEXT_MODELS = {}
40
  UI_DEFAULT_TEXT_MODEL_KEY = None
41
- # ... (Populate based on ai_services_status["gemini_text_ready"], ai_services_status["hf_text_ready"]) ...
42
- if ai_services_status["gemini_text_ready"]: TEXT_MODELS["✨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"} # etc.
43
- if ai_services_status["hf_text_ready"]: TEXT_MODELS["Mistral 7B (Narrate via HF)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"} # etc.
44
- if TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = list(TEXT_MODELS.keys())[0] # Simplified default
 
 
 
 
 
 
 
 
 
 
45
 
46
  IMAGE_PROVIDERS = {}
47
  UI_DEFAULT_IMAGE_PROVIDER_KEY = None
48
- # ... (Populate based on ai_services_status["dalle_image_ready"], ai_services_status["hf_image_ready"]) ...
49
- if ai_services_status["dalle_image_ready"]: IMAGE_PROVIDERS["πŸ–ΌοΈ DALL-E 3"] = "dalle_3" #etc.
50
- if ai_services_status["hf_image_ready"]: IMAGE_PROVIDERS["🎑 HF - SDXL Base"] = "hf_sdxl_base" #etc.
51
- if IMAGE_PROVIDERS: UI_DEFAULT_IMAGE_PROVIDER_KEY = list(IMAGE_PROVIDERS.keys())[0] # Simplified default
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- # --- Helper: Placeholder Image (can be same as before) ---
55
- @st.cache_data # Cache placeholder images
56
  def create_placeholder_image_st(text="Processing...", size=(512, 512), color="#23233A", text_color="#E0E0FF"):
57
- # ... (same PIL logic as before) ...
58
  img = Image.new('RGB', size, color=color); draw = ImageDraw.Draw(img)
59
  try: font_path = "arial.ttf" if os.path.exists("arial.ttf") else None
60
  except: font_path = None
@@ -68,102 +169,78 @@ def create_placeholder_image_st(text="Processing...", size=(512, 512), color="#2
68
  if 'story_object' not in st.session_state:
69
  st.session_state.story_object = Story()
70
  if 'current_log' not in st.session_state:
71
- st.session_state.current_log = ["Welcome to StoryVerse Weaver (Streamlit Edition)!"]
72
- if 'latest_scene_image' not in st.session_state:
73
- st.session_state.latest_scene_image = None
74
  if 'latest_scene_narrative' not in st.session_state:
75
- st.session_state.latest_scene_narrative = "Describe your first scene to begin!"
 
 
76
  if 'processing_scene' not in st.session_state:
77
  st.session_state.processing_scene = False
78
-
79
- # --- Page Configuration (Do this ONCE at the top) ---
80
- st.set_page_config(
81
- page_title="✨ StoryVerse Weaver ✨",
82
- page_icon="🌌",
83
- layout="wide", # "wide" or "centered"
84
- initial_sidebar_state="expanded" # "auto", "expanded", "collapsed"
85
- )
86
-
87
- # --- Custom CSS for Dark Theme "WOW" ---
88
- # (You'd inject this using st.markdown(..., unsafe_allow_html=True) or a separate CSS file)
89
- streamlit_omega_css = """
90
- <style>
91
- /* Base dark theme */
92
- body { color: #D0D0E0; background-color: #0F0F1A; }
93
- .stApp { background-color: #0F0F1A; }
94
- h1, h2, h3, h4, h5, h6 { color: #C080F0; }
95
- .stTextInput > div > div > input, .stTextArea > div > div > textarea, .stSelectbox > div > div > select {
96
- background-color: #2A2A4A; color: #E0E0FF; border: 1px solid #4A4A6A; border-radius: 8px;
97
- }
98
- .stButton > button {
99
- background: linear-gradient(135deg, #7F00FF 0%, #E100FF 100%) !important;
100
- color: white !important; border: none !important; border-radius: 8px !important;
101
- padding: 0.5em 1em !important; font-weight: 600 !important;
102
- box-shadow: 0 4px 8px rgba(0,0,0,0.15) !important;
103
- }
104
- .stButton > button:hover { transform: scale(1.03) translateY(-1px); box-shadow: 0 8px 16px rgba(127,0,255,0.3) !important; }
105
- /* Add more specific styles for sidebar, expanders, image display etc. */
106
- .main .block-container { padding-top: 2rem; padding-bottom: 2rem; padding-left: 3rem; padding-right: 3rem; max-width: 1400px; margin: auto;}
107
- .stImage > img { border-radius: 12px; box-shadow: 0 6px 15px rgba(0,0,0,0.25); max-height: 600px;}
108
- .stExpander { background-color: #1A1A2E; border: 1px solid #2A2A4A; border-radius: 12px; margin-bottom: 1em;}
109
- .stExpander header { font-size: 1.1em; font-weight: 500; color: #D0D0FF;}
110
- .important-note { background-color: rgba(127,0,255,0.1); border-left: 5px solid #7F00FF; padding: 15px; margin-bottom:20px; color: #E0E0FF; border-radius: 6px;}
111
- </style>
112
- """
113
- st.markdown(streamlit_omega_css, unsafe_allow_html=True)
114
 
115
 
116
  # --- Main App UI & Logic ---
117
- st.markdown("<div align='center'><h1>✨ StoryVerse Weaver ✨</h1>\n<h3>Craft Immersive Multimodal Worlds with AI</h3></div>", unsafe_allow_html=True)
118
  st.markdown("<div class='important-note'><strong>Welcome, Worldsmith!</strong> Describe your vision, choose your style, and let Omega help you weave captivating scenes with narrative and imagery. Ensure API keys (<code>STORYVERSE_...</code>) are correctly set in your environment/secrets!</div>", unsafe_allow_html=True)
119
 
120
 
121
  # --- Sidebar for Inputs & Configuration ---
122
  with st.sidebar:
123
- st.header("🎨 Scene Weaver Panel")
124
 
125
  with st.form("scene_input_form"):
126
- scene_prompt_text = st.text_area(
127
  "Scene Vision (Description, Dialogue, Action):",
128
  height=200,
129
- placeholder="e.g., Amidst swirling cosmic dust, Captain Eva pilots her damaged starfighter..."
 
130
  )
131
-
132
- st.subheader("Visual Style")
133
  col_style1, col_style2 = st.columns(2)
134
  with col_style1:
135
- image_style_dropdown = st.selectbox("Style Preset:", options=["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys())), index=0)
 
 
136
  with col_style2:
137
- artist_style_text = st.text_input("Artistic Inspiration (Optional):", placeholder="e.g., Moebius")
138
 
139
- negative_prompt_text = st.text_area("Exclude from Image (Negative Prompt):", value=COMMON_NEGATIVE_PROMPTS, height=100)
140
 
141
  with st.expander("βš™οΈ Advanced AI Configuration", expanded=False):
142
- text_model_key = st.selectbox("Narrative AI Engine:", options=list(TEXT_MODELS.keys()), index=0 if UI_DEFAULT_TEXT_MODEL_KEY in TEXT_MODELS else (list(TEXT_MODELS.keys()).index(UI_DEFAULT_TEXT_MODEL_KEY) if UI_DEFAULT_TEXT_MODEL_KEY else 0) )
143
- image_provider_key = st.selectbox("Visual AI Engine:", options=list(IMAGE_PROVIDERS.keys()), index=0 if UI_DEFAULT_IMAGE_PROVIDER_KEY in IMAGE_PROVIDERS else (list(IMAGE_PROVIDERS.keys()).index(UI_DEFAULT_IMAGE_PROVIDER_KEY) if UI_DEFAULT_IMAGE_PROVIDER_KEY else 0) )
144
- narrative_length = st.selectbox("Narrative Detail:", options=["Short (1 paragraph)", "Medium (2-3 paragraphs)", "Detailed (4+ paragraphs)"], index=1)
145
- image_quality = st.selectbox("Image Detail/Style:", options=["Standard", "High Detail", "Sketch Concept"], index=0)
 
 
 
 
146
 
147
  submit_scene_button = st.form_submit_button("🌌 Weave This Scene!", use_container_width=True, type="primary", disabled=st.session_state.processing_scene)
148
 
149
- if st.button("🎲 Surprise Me!", use_container_width=True, disabled=st.session_state.processing_scene):
150
- sur_prompt, sur_style, sur_artist = surprise_me_func() # Assuming this is defined as before
151
- # Need to update the actual input widget values; Streamlit doesn't directly map outputs to inputs like Gradio's Examples
152
- # This requires a more involved way to update widget states, or just display the suggestion.
153
- # For simplicity, we'll just show what it would generate. A real app might use st.experimental_rerun or callbacks.
154
- st.info(f"Surprise Idea: Prompt='{sur_prompt}', Style='{sur_style}', Artist='{sur_artist}'\n(Copy these into the fields above!)")
155
 
156
-
157
- if st.button("πŸ—‘οΈ New Story", use_container_width=True, disabled=st.session_state.processing_scene):
158
  st.session_state.story_object = Story()
159
  st.session_state.current_log = ["Story Cleared. Ready for a new verse!"]
160
- st.session_state.latest_scene_image = None
161
  st.session_state.latest_scene_narrative = "## ✨ A New Story Begins ✨\nDescribe your first scene!"
162
- st.experimental_rerun() # Rerun the script to refresh the UI
 
 
163
 
164
  with st.expander("πŸ”§ AI Services Status", expanded=False):
165
- text_llm_ok, image_gen_ok = (ai_services_status["gemini_text_ready"] or ai_services_status["hf_text_ready"]), \
166
- (ai_services_status["dalle_image_ready"] or ai_services_status["hf_image_ready"])
167
  if not text_llm_ok and not image_gen_ok: st.error("CRITICAL: NO AI SERVICES CONFIGURED.")
168
  else:
169
  if text_llm_ok: st.success("Text Generation Service(s) Ready.")
@@ -173,113 +250,125 @@ with st.sidebar:
173
 
174
 
175
  # --- Main Display Area ---
176
- st.markdown("---")
177
- st.markdown("### πŸ–ΌοΈ **Your Evolving StoryVerse**", unsafe_allow_html=True) # For potential custom class via CSS
178
 
179
- if st.session_state.processing_scene:
180
- st.info("🌌 Weaving your scene... Please wait.")
181
- # Could use st.spinner("Weaving your scene...")
182
-
183
- # Display Latest Scene
184
- if st.session_state.latest_scene_image or st.session_state.latest_scene_narrative != "Describe your first scene to begin!":
185
  st.subheader("🌠 Latest Scene")
186
- if st.session_state.latest_scene_image:
187
- st.image(st.session_state.latest_scene_image, use_column_width=True, caption="Latest Generated Image")
188
- st.markdown(st.session_state.latest_scene_narrative, unsafe_allow_html=True)
189
- st.markdown("---")
190
-
 
 
 
 
 
 
191
 
192
- # Display Story Scroll (Gallery)
193
- if st.session_state.story_object and st.session_state.story_object.scenes:
194
  st.subheader("πŸ“š Story Scroll")
195
- # Streamlit doesn't have a direct "Gallery" like Gradio. We display images in columns.
196
- num_columns = 3
197
- cols = st.columns(num_columns)
198
- scenes_for_gallery = st.session_state.story_object.get_all_scenes_for_gallery_display() # Ensure this returns (PIL.Image or None, caption)
199
-
200
- for i, (img, caption) in enumerate(scenes_for_gallery):
201
- with cols[i % num_columns]:
202
- if img:
203
- st.image(img, caption=caption if caption else f"Scene {i+1}", use_column_width=True)
204
- elif caption: # If no image but caption (e.g. error)
205
- st.caption(caption) # Display caption as text
206
- else:
207
- st.caption("Your story scroll is empty. Weave your first scene!")
 
 
 
208
 
209
 
210
  # Interaction Log
211
- with st.expander("βš™οΈ Interaction Log", expanded=False):
212
- st.markdown("\n\n".join(st.session_state.current_log), unsafe_allow_html=True)
213
-
214
 
215
  # --- Logic for Form Submission ---
216
- if submit_scene_button and scene_prompt_text.strip(): # Check if form submitted and prompt is not empty
217
- st.session_state.processing_scene = True
218
- st.session_state.current_log.append(f"**πŸš€ New Scene Request - {time.strftime('%H:%M:%S')}**")
219
- st.experimental_rerun() # Rerun to show "processing" state and disable button
220
-
221
- # ---- This is where the main generation logic happens ----
222
- # It's similar to add_scene_to_story_orchestrator but updates session_state
223
-
224
- # 1. Generate Narrative
225
- current_narrative = f"Narrative Error: Init failed for '{scene_prompt_text[:30]}...'"
226
- text_model_info = TEXT_MODELS.get(text_model_key)
227
- if text_model_info and text_model_info["type"] != "none":
228
- system_p = get_narrative_system_prompt("default")
229
- prev_narrative = st.session_state.story_object.get_last_scene_narrative()
230
- user_p = format_narrative_user_prompt(scene_prompt_text, prev_narrative)
231
- st.session_state.current_log.append(f" Narrative: Using {text_model_key} ({text_model_info['id']}).")
232
- text_response = None
233
- if text_model_info["type"] == "gemini" and ai_services_status["gemini_text_ready"]: text_response = generate_text_gemini(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if narrative_length.startswith("Detailed") else 400)
234
- elif text_model_info["type"] == "hf_text" and ai_services_status["hf_text_ready"]: text_response = generate_text_hf(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if narrative_length.startswith("Detailed") else 400)
235
 
236
- if text_response and text_response.success: current_narrative = basic_text_cleanup(text_response.text); st.session_state.current_log.append(f" Narrative: Success.")
237
- elif text_response: current_narrative = f"**Narrative Error ({text_model_key}):** {text_response.error}"; st.session_state.current_log.append(f" Narrative: FAILED - {text_response.error}")
238
- else: st.session_state.current_log.append(f" Narrative: FAILED - No response from {text_model_key}.")
239
- else: current_narrative = "**Narrative Error:** Text model unavailable."; st.session_state.current_log.append(f" Narrative: FAILED - Model '{text_model_key}' unavailable.")
240
-
241
- st.session_state.latest_scene_narrative = f"## Scene Idea: {scene_prompt_text}\n\n{current_narrative}"
242
-
243
- # 2. Generate Image
244
- generated_image_pil = None
245
- image_gen_error = None
246
- selected_image_provider_actual_type = IMAGE_PROVIDERS.get(image_provider_key)
247
- image_content_prompt = current_narrative if current_narrative and "Error" not in current_narrative else scene_prompt_text
248
- quality_kw = "ultra detailed, " if image_quality == "High Detail" else ("concept sketch, " if image_quality == "Sketch Concept" else "")
249
- full_img_prompt = format_image_generation_prompt(quality_kw + image_content_prompt[:350], image_style_dropdown, artist_style_text)
250
- st.session_state.current_log.append(f" Image: Attempting with {image_provider_key} (type '{selected_image_provider_actual_type}').")
251
-
252
- if selected_image_provider_actual_type and selected_image_provider_actual_type != "none":
253
- img_response = None
254
- if selected_image_provider_actual_type.startswith("dalle_") and ai_services_status["dalle_image_ready"]:
255
- dalle_model = "dall-e-3" if selected_image_provider_actual_type == "dalle_3" else "dall-e-2"
256
- img_response = generate_image_dalle(full_img_prompt, model=dalle_model, quality="hd" if image_quality=="High Detail" else "standard")
257
- elif selected_image_provider_actual_type.startswith("hf_") and ai_services_status["hf_image_ready"]:
258
- hf_model_id = "stabilityai/stable-diffusion-xl-base-1.0"; iw,ih=768,768
259
- if selected_image_provider_actual_type == "hf_openjourney": hf_model_id="prompthero/openjourney";iw,ih=512,512
260
- img_response = generate_image_hf_model(full_img_prompt, model_id=hf_model_id, negative_prompt=negative_prompt_text or COMMON_NEGATIVE_PROMPTS, width=iw, height=ih)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261
 
262
- if img_response and img_response.success: generated_image_pil = img_response.image; st.session_state.current_log.append(f" Image: Success from {img_response.provider}.")
263
- elif img_response: image_gen_error = f"**Image Error:** {img_response.error}"; st.session_state.current_log.append(f" Image: FAILED - {img_response.error}")
264
- else: image_gen_error = "**Image Error:** No response/unknown issue."; st.session_state.current_log.append(f" Image: FAILED - No response object.")
265
- else: image_gen_error = "**Image Error:** No valid provider."; st.session_state.current_log.append(f" Image: FAILED - No provider configured.")
266
-
267
- st.session_state.latest_scene_image = generated_image_pil if generated_image_pil else create_placeholder_image("Image Gen Failed", color="#401010")
268
-
269
- # 3. Add to Story Object
270
- scene_err = None
271
- if image_gen_error and "**Narrative Error**" in current_narrative: scene_err = f"{current_narrative}\n{image_gen_error}"
272
- elif "**Narrative Error**" in current_narrative: scene_err = current_narrative
273
- elif image_gen_error: scene_err = image_gen_error
274
-
275
- st.session_state.story_object.add_scene_from_elements(
276
- user_prompt=scene_prompt_text, narrative_text=current_narrative, image=generated_image_pil,
277
- image_style_prompt=f"{image_style_dropdown}{f', by {artist_style_text}' if artist_style_text else ''}",
278
- image_provider=image_provider_key, error_message=scene_err
279
- )
280
- st.session_state.current_log.append(f" Scene {st.session_state.story_object.current_scene_number} processed.")
281
- st.session_state.processing_scene = False
282
- st.experimental_rerun() # Rerun to update the main display with new scene and re-enable button
283
-
284
- elif submit_scene_button and not scene_prompt_text.strip(): # If form submitted but prompt is empty
285
- st.warning("Please enter a scene vision/prompt!")
 
1
+ # app.py (Streamlit version for StoryVerse Weaver)
2
+ import streamlit as st # FIRST STREAMLIT IMPORT
3
+ from PIL import Image, ImageDraw, ImageFont # For creating placeholder images
4
  import os
5
  import time
6
  import random
7
+ import traceback # For better error display
8
 
9
+ # --- Page Configuration (MUST BE THE VERY FIRST STREAMLIT COMMAND) ---
10
+ st.set_page_config(
11
+ page_title="✨ StoryVerse Weaver ✨",
12
+ page_icon="😻",
13
+ layout="wide",
14
+ initial_sidebar_state="expanded"
15
+ )
16
+ # --- END OF PAGE CONFIG ---
17
+
18
+ # --- Add project root to sys.path if core modules are in a subdirectory ---
19
+ # This might be needed depending on your exact file structure when running locally.
20
+ # For Hugging Face Spaces, if 'core' and 'prompts' are at the same level as app.py,
21
+ # direct imports usually work.
22
  # import sys
23
+ # SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
24
+ # sys.path.append(os.path.dirname(SCRIPT_DIR)) # If app.py is in a subdir like 'src'
25
+ # sys.path.append(SCRIPT_DIR) # If core/prompts are subdirs of where app.py is
26
 
27
+ # --- Core Logic Imports (NOW AFTER st.set_page_config) ---
28
+ from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf, LLMTextResponse
29
  from core.image_services import initialize_image_llms, is_dalle_ready, is_hf_image_api_ready, generate_image_dalle, generate_image_hf_model, ImageGenResponse
30
+ from core.story_engine import Story, Scene
31
  from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
32
  from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
33
  from core.utils import basic_text_cleanup
34
 
35
+ # --- Initialize AI Services (Cached) ---
36
+ @st.cache_resource
37
+ def load_ai_services_config():
 
38
  print("--- Initializing AI Services (Streamlit Cache Resource) ---")
39
  initialize_text_llms()
40
  initialize_image_llms()
 
41
  return {
42
  "gemini_text_ready": is_gemini_text_ready(),
43
  "hf_text_ready": is_hf_text_ready(),
 
45
  "hf_image_ready": is_hf_image_api_ready()
46
  }
47
 
48
+ AI_SERVICES_STATUS = load_ai_services_config()
49
 
50
  # --- Application Configuration (Models, Defaults) ---
 
51
  TEXT_MODELS = {}
52
  UI_DEFAULT_TEXT_MODEL_KEY = None
53
+ if AI_SERVICES_STATUS["gemini_text_ready"]:
54
+ TEXT_MODELS["✨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
55
+ TEXT_MODELS["Legacy Gemini 1.0 Pro (Narrate)"] = {"id": "gemini-1.0-pro-latest", "type": "gemini"}
56
+ if AI_SERVICES_STATUS["hf_text_ready"]:
57
+ TEXT_MODELS["Mistral 7B (Narrate via HF)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
58
+ TEXT_MODELS["Gemma 2B (Narrate via HF)"] = {"id": "google/gemma-2b-it", "type": "hf_text"}
59
+
60
+ if TEXT_MODELS:
61
+ if AI_SERVICES_STATUS["gemini_text_ready"] and "✨ Gemini 1.5 Flash (Narrate)" in TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = "✨ Gemini 1.5 Flash (Narrate)"
62
+ elif AI_SERVICES_STATUS["hf_text_ready"] and "Mistral 7B (Narrate via HF)" in TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = "Mistral 7B (Narrate via HF)"
63
+ else: UI_DEFAULT_TEXT_MODEL_KEY = list(TEXT_MODELS.keys())[0]
64
+ else:
65
+ TEXT_MODELS["No Text Models Configured"] = {"id": "dummy_text_error", "type": "none"}
66
+ UI_DEFAULT_TEXT_MODEL_KEY = "No Text Models Configured"
67
 
68
  IMAGE_PROVIDERS = {}
69
  UI_DEFAULT_IMAGE_PROVIDER_KEY = None
70
+ if AI_SERVICES_STATUS["dalle_image_ready"]:
71
+ IMAGE_PROVIDERS["πŸ–ΌοΈ OpenAI DALL-E 3"] = "dalle_3"
72
+ IMAGE_PROVIDERS["πŸ–ΌοΈ OpenAI DALL-E 2 (Legacy)"] = "dalle_2"
73
+ UI_DEFAULT_IMAGE_PROVIDER_KEY = "πŸ–ΌοΈ OpenAI DALL-E 3"
74
+ elif AI_SERVICES_STATUS["hf_image_ready"]:
75
+ IMAGE_PROVIDERS["🎑 HF - Stable Diffusion XL Base"] = "hf_sdxl_base"
76
+ IMAGE_PROVIDERS["🎠 HF - OpenJourney"] = "hf_openjourney"
77
+ IMAGE_PROVIDERS["🌌 HF - Stable Diffusion v1.5"] = "hf_sd_1_5"
78
+ UI_DEFAULT_IMAGE_PROVIDER_KEY = "🎑 HF - Stable Diffusion XL Base"
79
+
80
+ if not IMAGE_PROVIDERS:
81
+ IMAGE_PROVIDERS["No Image Providers Configured"] = "none"
82
+ UI_DEFAULT_IMAGE_PROVIDER_KEY = "No Image Providers Configured"
83
+ elif not UI_DEFAULT_IMAGE_PROVIDER_KEY and IMAGE_PROVIDERS :
84
+ UI_DEFAULT_IMAGE_PROVIDER_KEY = list(IMAGE_PROVIDERS.keys())[0]
85
+
86
+ # --- Custom CSS for Dark Theme "WOW" ---
87
+ streamlit_omega_css = """
88
+ <style>
89
+ body { color: #D0D0E0; background-color: #0F0F1A; font-family: 'Lexend Deca', sans-serif;}
90
+ .stApp { background-color: #0F0F1A; }
91
+ h1, h2, h3, h4, h5, h6 { color: #C080F0; }
92
+ /* Main containers */
93
+ .main .block-container {
94
+ padding-top: 2rem; padding-bottom: 2rem; padding-left: 2rem; padding-right: 2rem;
95
+ max-width: 1400px; margin: auto;
96
+ background-color: #1A1A2E; /* Panel background for main content area */
97
+ border-radius: 15px;
98
+ box-shadow: 0 8px 24px rgba(0,0,0,0.15);
99
+ }
100
+ /* Sidebar styling */
101
+ [data-testid="stSidebar"] {
102
+ background-color: #131325; /* Slightly different dark for sidebar */
103
+ border-right: 1px solid #2A2A4A;
104
+ }
105
+ [data-testid="stSidebar"] .stMarkdown h3 { /* Sidebar headers */
106
+ color: #D0D0FF !important;
107
+ font-size: 1.5em;
108
+ border-bottom: 2px solid #7F00FF;
109
+ padding-bottom: 5px;
110
+ }
111
+ /* Input elements */
112
+ .stTextInput > div > div > input, .stTextArea > div > div > textarea,
113
+ .stSelectbox > div > div > div[data-baseweb="select"] > div,
114
+ div[data-baseweb="input"] > input /* For text_input */
115
+ {
116
+ background-color: #2A2A4A !important; color: #E0E0FF !important;
117
+ border: 1px solid #4A4A6A !important; border-radius: 8px !important;
118
+ }
119
+ /* Buttons */
120
+ .stButton > button {
121
+ background: linear-gradient(135deg, #7F00FF 0%, #E100FF 100%) !important;
122
+ color: white !important; border: none !important; border-radius: 8px !important;
123
+ padding: 0.7em 1.3em !important; font-weight: 600 !important;
124
+ box-shadow: 0 4px 8px rgba(0,0,0,0.15) !important;
125
+ transition: all 0.2s ease-in-out;
126
+ width: 100%; /* Make form submit button full width */
127
+ }
128
+ .stButton > button:hover { transform: scale(1.03) translateY(-1px); box-shadow: 0 8px 16px rgba(127,0,255,0.3) !important; }
129
+ .stButton > button:disabled { background: #4A4A6A !important; color: #8080A0 !important; cursor: not-allowed; }
130
+
131
+ /* Secondary button style (if you add more buttons) */
132
+ /* You might need to assign a class and target it if Streamlit doesn't have variants like Gradio */
133
+ button[kind="secondary"] { /* Conceptual, Streamlit buttons don't have 'kind' directly in CSS */
134
+ background-color: #4A4A6A !important; color: #E0E0FF !important;
135
+ }
136
 
137
+ /* Image display */
138
+ .stImage > img { border-radius: 12px; box-shadow: 0 6px 15px rgba(0,0,0,0.25); max-height: 550px; margin: auto; display: block;}
139
+ /* Expander styling */
140
+ .stExpander { background-color: #161628; border: 1px solid #2A2A4A; border-radius: 12px; margin-bottom: 1em;}
141
+ .stExpander header { font-size: 1.1em; font-weight: 500; color: #D0D0FF;}
142
+ /* Note/Alert styling */
143
+ .important-note { background-color: rgba(127,0,255,0.1); border-left: 5px solid #7F00FF; padding: 15px; margin-bottom:20px; color: #E0E0FF; border-radius: 6px;}
144
+ /* Custom class for status messages */
145
+ .status-message { padding: 10px; border-radius: 6px; margin-top: 10px; text-align: center; font-weight: 500; }
146
+ .status-success { background-color: #104010; color: #B0FFB0; border: 1px solid #208020; }
147
+ .status-error { background-color: #401010; color: #FFB0B0; border: 1px solid #802020; }
148
+ .status-processing { background-color: #102040; color: #B0D0FF; border: 1px solid #204080; }
149
+ /* Gallery columns */
150
+ .gallery-col img { border-radius: 8px; box-shadow: 0 2px 6px rgba(0,0,0,0.15); margin-bottom: 5px;}
151
+ .gallery-col .stCaption { font-size: 0.85em; text-align: center; }
152
+ </style>
153
+ """
154
+ st.markdown(streamlit_omega_css, unsafe_allow_html=True)
155
 
156
+ # --- Helper: Placeholder Image Creation ---
157
+ @st.cache_data
158
  def create_placeholder_image_st(text="Processing...", size=(512, 512), color="#23233A", text_color="#E0E0FF"):
 
159
  img = Image.new('RGB', size, color=color); draw = ImageDraw.Draw(img)
160
  try: font_path = "arial.ttf" if os.path.exists("arial.ttf") else None
161
  except: font_path = None
 
169
  if 'story_object' not in st.session_state:
170
  st.session_state.story_object = Story()
171
  if 'current_log' not in st.session_state:
172
+ st.session_state.current_log = ["Welcome to StoryVerse Omega (Streamlit Edition)!"]
173
+ if 'latest_scene_image_pil' not in st.session_state:
174
+ st.session_state.latest_scene_image_pil = None
175
  if 'latest_scene_narrative' not in st.session_state:
176
+ st.session_state.latest_scene_narrative = "## ✨ A New Story Begins ✨\nDescribe your first scene idea in the panel on the left!"
177
+ if 'status_message' not in st.session_state:
178
+ st.session_state.status_message = {"text": "Ready to weave your first masterpiece!", "type": "processing"}
179
  if 'processing_scene' not in st.session_state:
180
  st.session_state.processing_scene = False
181
+ if 'form_scene_prompt' not in st.session_state: st.session_state.form_scene_prompt = ""
182
+ if 'form_image_style' not in st.session_state: st.session_state.form_image_style = "Default (Cinematic Realism)"
183
+ if 'form_artist_style' not in st.session_state: st.session_state.form_artist_style = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
 
186
  # --- Main App UI & Logic ---
187
+ st.markdown("<div align='center'><h1>✨ StoryVerse Omega ✨</h1>\n<h3>Craft Immersive Multimodal Worlds with AI</h3></div>", unsafe_allow_html=True)
188
  st.markdown("<div class='important-note'><strong>Welcome, Worldsmith!</strong> Describe your vision, choose your style, and let Omega help you weave captivating scenes with narrative and imagery. Ensure API keys (<code>STORYVERSE_...</code>) are correctly set in your environment/secrets!</div>", unsafe_allow_html=True)
189
 
190
 
191
  # --- Sidebar for Inputs & Configuration ---
192
  with st.sidebar:
193
+ st.markdown("### πŸ’‘ **Craft Your Scene**")
194
 
195
  with st.form("scene_input_form"):
196
+ scene_prompt_text_input = st.text_area( # Unique key for widget
197
  "Scene Vision (Description, Dialogue, Action):",
198
  height=200,
199
+ value=st.session_state.form_scene_prompt,
200
+ placeholder="e.g., Amidst swirling cosmic dust..."
201
  )
202
+ st.markdown("#### 🎨 Visual Style")
 
203
  col_style1, col_style2 = st.columns(2)
204
  with col_style1:
205
+ image_style_dropdown_input = st.selectbox("Style Preset:", options=["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys())),
206
+ index= (["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys()))).index(st.session_state.form_image_style) if st.session_state.form_image_style in (["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys()))) else 0,
207
+ key="sel_image_style")
208
  with col_style2:
209
+ artist_style_text_input = st.text_input("Artistic Inspiration (Optional):", placeholder="e.g., Moebius", value=st.session_state.form_artist_style, key="sel_artist_style")
210
 
211
+ negative_prompt_text_input = st.text_area("Exclude from Image (Negative Prompt):", value=COMMON_NEGATIVE_PROMPTS, height=100, key="sel_negative_prompt")
212
 
213
  with st.expander("βš™οΈ Advanced AI Configuration", expanded=False):
214
+ text_model_key_input = st.selectbox("Narrative AI Engine:", options=list(TEXT_MODELS.keys()),
215
+ index=list(TEXT_MODELS.keys()).index(UI_DEFAULT_TEXT_MODEL_KEY) if UI_DEFAULT_TEXT_MODEL_KEY in TEXT_MODELS else 0,
216
+ key="sel_text_model")
217
+ image_provider_key_input = st.selectbox("Visual AI Engine:", options=list(IMAGE_PROVIDERS.keys()),
218
+ index=list(IMAGE_PROVIDERS.keys()).index(UI_DEFAULT_IMAGE_PROVIDER_KEY) if UI_DEFAULT_IMAGE_PROVIDER_KEY in IMAGE_PROVIDERS else 0,
219
+ key="sel_image_provider")
220
+ narrative_length_input = st.selectbox("Narrative Detail:", options=["Short (1 paragraph)", "Medium (2-3 paragraphs)", "Detailed (4+ paragraphs)"], index=1, key="sel_narr_length")
221
+ image_quality_input = st.selectbox("Image Detail/Style:", options=["Standard", "High Detail", "Sketch Concept"], index=0, key="sel_img_quality")
222
 
223
  submit_scene_button = st.form_submit_button("🌌 Weave This Scene!", use_container_width=True, type="primary", disabled=st.session_state.processing_scene)
224
 
225
+ if st.button("🎲 Surprise Me!", use_container_width=True, disabled=st.session_state.processing_scene, key="surprise_btn_sidebar"):
226
+ sur_prompt, sur_style, sur_artist = surprise_me_func()
227
+ st.session_state.form_scene_prompt = sur_prompt
228
+ st.session_state.form_image_style = sur_style
229
+ st.session_state.form_artist_style = sur_artist
230
+ st.experimental_rerun()
231
 
232
+ if st.button("πŸ—‘οΈ New Story", use_container_width=True, disabled=st.session_state.processing_scene, key="clear_btn_sidebar"):
 
233
  st.session_state.story_object = Story()
234
  st.session_state.current_log = ["Story Cleared. Ready for a new verse!"]
235
+ st.session_state.latest_scene_image_pil = None
236
  st.session_state.latest_scene_narrative = "## ✨ A New Story Begins ✨\nDescribe your first scene!"
237
+ st.session_state.status_message = {"text": "πŸ“œ Story Cleared. A fresh canvas awaits!", "type": "processing"}
238
+ st.session_state.form_scene_prompt = "" # Clear form input too
239
+ st.experimental_rerun()
240
 
241
  with st.expander("πŸ”§ AI Services Status", expanded=False):
242
+ text_llm_ok, image_gen_ok = (AI_SERVICES_STATUS["gemini_text_ready"] or AI_SERVICES_STATUS["hf_text_ready"]), \
243
+ (AI_SERVICES_STATUS["dalle_image_ready"] or AI_SERVICES_STATUS["hf_image_ready"])
244
  if not text_llm_ok and not image_gen_ok: st.error("CRITICAL: NO AI SERVICES CONFIGURED.")
245
  else:
246
  if text_llm_ok: st.success("Text Generation Service(s) Ready.")
 
250
 
251
 
252
  # --- Main Display Area ---
253
+ col_main_left, col_main_right = st.columns([2,3]) # Define columns for latest scene and gallery
 
254
 
255
+ with col_main_left:
 
 
 
 
 
256
  st.subheader("🌠 Latest Scene")
257
+ if st.session_state.processing_scene and st.session_state.latest_scene_image_pil is None :
258
+ st.image(create_placeholder_image_st("🎨 Conjuring visuals..."), use_column_width="always")
259
+ elif st.session_state.latest_scene_image_pil:
260
+ st.image(st.session_state.latest_scene_image_pil, use_column_width="always", caption="Latest Generated Image")
261
+ else:
262
+ st.image(create_placeholder_image_st("Describe a scene to begin!", size=(512,300), color="#1A1A2E"), use_column_width="always")
263
+
264
+ if st.session_state.processing_scene and "Musing narrative..." in st.session_state.latest_scene_narrative:
265
+ st.markdown(" Musing narrative...")
266
+ else:
267
+ st.markdown(st.session_state.latest_scene_narrative, unsafe_allow_html=True)
268
 
269
+ with col_main_right:
 
270
  st.subheader("πŸ“š Story Scroll")
271
+ if st.session_state.story_object and st.session_state.story_object.scenes:
272
+ num_columns_gallery = 2
273
+ gallery_cols = st.columns(num_columns_gallery)
274
+ scenes_for_gallery = st.session_state.story_object.get_all_scenes_for_gallery_display()
275
+ for i, (img, caption) in enumerate(scenes_for_gallery):
276
+ with gallery_cols[i % num_columns_gallery]:
277
+ if img: st.image(img, caption=caption if caption else f"Scene {i+1}", use_column_width="always", output_format="PNG") # Specify output format for PIL
278
+ elif caption: st.caption(caption)
279
+ else: st.caption(f"Scene {i+1} (No image)")
280
+ else:
281
+ st.caption("Your story scroll is empty. Weave your first scene in the panel on the left!")
282
+
283
+ # Status Bar (displayed below main content)
284
+ st.markdown("---")
285
+ status_type = st.session_state.status_message.get("type", "processing")
286
+ st.markdown(f"<p class='status-message status-{status_type}'>{st.session_state.status_message['text']}</p>", unsafe_allow_html=True)
287
 
288
 
289
  # Interaction Log
290
+ with st.expander("βš™οΈ Interaction Log (Newest First)", expanded=False):
291
+ log_display = "\n\n---\n\n".join(st.session_state.current_log[::-1][:30])
292
+ st.markdown(log_display, unsafe_allow_html=True)
293
 
294
  # --- Logic for Form Submission ---
295
+ if submit_scene_button:
296
+ if not scene_prompt_text_input.strip(): # Use the _input variable name
297
+ st.session_state.status_message = {"text": "Scene prompt cannot be empty!", "type": "error"}
298
+ # No explicit rerun here, warning will show, user can submit again.
299
+ # Or: st.warning("Scene prompt cannot be empty!"); st.stop()
300
+ else:
301
+ st.session_state.processing_scene = True
302
+ st.session_state.status_message = {"text": f"🌌 Weaving Scene {st.session_state.story_object.current_scene_number + 1}...", "type": "processing"}
303
+ st.session_state.current_log.append(f"**πŸš€ Scene {st.session_state.story_object.current_scene_number + 1} - {time.strftime('%H:%M:%S')}**")
304
+ # Values from widgets are automatically available by their keys
305
+ # e.g., scene_prompt_text_input, image_style_dropdown_input (from key="sel_image_style")
306
+ # However, for clarity, we re-fetch them here from their input widget keys.
 
 
 
 
 
 
 
307
 
308
+ _scene_prompt = scene_prompt_text_input # From form
309
+ _image_style = image_style_dropdown_input # From form
310
+ _artist_style = artist_style_text_input # From form
311
+ _negative_prompt = negative_prompt_text_input # From form
312
+ _text_model = text_model_key_input # From form
313
+ _image_provider = image_provider_key_input # From form
314
+ _narr_length = narrative_length_input # From form
315
+ _img_quality = image_quality_input # From form
316
+
317
+ # ---- Main Generation Logic ----
318
+ current_narrative_text = f"Narrative Error: Init failed."
319
+ generated_image_pil = None
320
+ image_gen_error_msg = None
321
+ final_scene_error_msg = None
322
+
323
+ # 1. Generate Narrative
324
+ text_model_info = TEXT_MODELS.get(_text_model)
325
+ if text_model_info and text_model_info["type"] != "none":
326
+ system_p = get_narrative_system_prompt("default"); prev_narr = st.session_state.story_object.get_last_scene_narrative(); user_p = format_narrative_user_prompt(_scene_prompt, prev_narr)
327
+ st.session_state.current_log.append(f" Narrative: Using {_text_model} ({text_model_info['id']}).")
328
+ text_resp = None
329
+ if text_model_info["type"] == "gemini" and AI_SERVICES_STATUS["gemini_text_ready"]: text_resp = generate_text_gemini(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if _narr_length.startswith("Detailed") else 400)
330
+ elif text_model_info["type"] == "hf_text" and AI_SERVICES_STATUS["hf_text_ready"]: text_resp = generate_text_hf(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if _narr_length.startswith("Detailed") else 400)
331
+ if text_resp and text_resp.success: current_narrative_text = basic_text_cleanup(text_resp.text); st.session_state.current_log.append(" Narrative: Success.")
332
+ elif text_resp: current_narrative_text = f"**Narrative Error:** {text_resp.error}"; st.session_state.current_log.append(f" Narrative: FAILED - {text_resp.error}")
333
+ else: st.session_state.current_log.append(f" Narrative: FAILED - No response.")
334
+ else: current_narrative_text = "**Narrative Error:** Model unavailable."; st.session_state.current_log.append(f" Narrative: FAILED - Model '{_text_model}' unavailable.")
335
+ st.session_state.latest_scene_narrative = f"## Scene Idea: {_scene_prompt}\n\n{current_narrative_text}"
336
+
337
+ # 2. Generate Image
338
+ selected_img_prov_type = IMAGE_PROVIDERS.get(_image_provider)
339
+ img_content_prompt = current_narrative_text if current_narrative_text and "Error" not in current_narrative_text else _scene_prompt
340
+ quality_kw = "ultra detailed, " if _img_quality == "High Detail" else ("concept sketch, " if _img_quality == "Sketch Concept" else "")
341
+ full_img_prompt_for_gen = format_image_generation_prompt(quality_kw + img_content_prompt[:350], _image_style, _artist_style)
342
+ st.session_state.current_log.append(f" Image: Using {_image_provider} (type '{selected_img_prov_type}').")
343
+ if selected_img_prov_type and selected_img_prov_type != "none":
344
+ img_resp = None
345
+ if selected_img_prov_type.startswith("dalle_") and AI_SERVICES_STATUS["dalle_image_ready"]:
346
+ dalle_model = "dall-e-3" if selected_img_prov_type == "dalle_3" else "dall-e-2"
347
+ img_resp = generate_image_dalle(full_img_prompt_for_gen, model=dalle_model)
348
+ elif selected_img_prov_type.startswith("hf_") and AI_SERVICES_STATUS["hf_image_ready"]:
349
+ hf_model_id = "stabilityai/stable-diffusion-xl-base-1.0" # Default
350
+ if selected_img_prov_type == "hf_openjourney": hf_model_id="prompthero/openjourney"
351
+ img_resp = generate_image_hf_model(full_img_prompt_for_gen, model_id=hf_model_id, negative_prompt=_negative_prompt)
352
+ if img_resp and img_resp.success: generated_image_pil = img_resp.image; st.session_state.current_log.append(" Image: Success.")
353
+ elif img_resp: image_gen_error_msg = f"**Image Error:** {img_resp.error}"; st.session_state.current_log.append(f" Image: FAILED - {img_resp.error}")
354
+ else: image_gen_error_msg = "**Image Error:** No response."; st.session_state.current_log.append(" Image: FAILED - No response.")
355
+ else: image_gen_error_msg = "**Image Error:** No provider."; st.session_state.current_log.append(f" Image: FAILED - No provider.")
356
+ st.session_state.latest_scene_image_pil = generated_image_pil if generated_image_pil else create_placeholder_image_st("Image Gen Failed", color="#401010")
357
+
358
+ # 3. Add Scene
359
+ if image_gen_error_msg and "**Narrative Error**" in current_narrative_text: final_scene_error_msg = f"{current_narrative_text}\n{image_gen_error_msg}" # etc.
360
+ elif "**Narrative Error**" in current_narrative_text: final_scene_error_msg = current_narrative_text
361
+ elif image_gen_error_msg: final_scene_error_msg = image_gen_error_msg
362
+ st.session_state.story_object.add_scene_from_elements(
363
+ user_prompt=_scene_prompt, narrative_text=current_narrative_text, image=generated_image_pil,
364
+ image_style_prompt=f"{_image_style}{f', by {_artist_style}' if _artist_style else ''}",
365
+ image_provider=_image_provider, error_message=final_scene_error_msg
366
+ )
367
+ st.session_state.current_log.append(f" Scene {st.session_state.story_object.current_scene_number} processed.")
368
 
369
+ # 4. Set final status message
370
+ if final_scene_error_msg: st.session_state.status_message = {"text": f"Scene {st.session_state.story_object.current_scene_number} added with errors.", "type": "error"}
371
+ else: st.session_state.status_message = {"text": f"🌌 Scene {st.session_state.story_object.current_scene_number} woven!", "type": "success"}
372
+
373
+ st.session_state.processing_scene = False
374
+ st.experimental_rerun()