mgbam commited on
Commit
ce507ec
·
verified ·
1 Parent(s): 46e90a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +265 -391
app.py CHANGED
@@ -1,425 +1,299 @@
1
- # algoforge_prime/app.py
2
  import gradio as gr
3
  import os
4
  import time
5
- import json # For potentially displaying structured data or passing complex states
6
 
7
  # --- Core Logic Imports ---
8
- from core.llm_clients import initialize_all_clients, is_gemini_api_configured, is_hf_api_configured
9
- initialize_all_clients()
10
-
11
- GEMINI_API_READY = is_gemini_api_configured()
12
- HF_API_READY = is_hf_api_configured()
13
-
14
- from core.generation_engine import generate_initial_solutions
15
- from core.evaluation_engine import evaluate_solution_candidate, EvaluationResultOutput
16
- from core.evolution_engine import evolve_solution
17
- from prompts.system_prompts import get_system_prompt
18
- from prompts.prompt_templates import format_code_test_analysis_user_prompt
19
- from core.safe_executor import execute_python_code_with_tests, ExecutionResult # For re-evaluating
20
-
21
- # --- Application Configuration ---
22
- # (This section should ideally move to a config file or env vars for production)
23
- AVAILABLE_MODELS_CONFIG = {}
24
- UI_DEFAULT_MODEL_KEY = None
25
- GEMINI_1_5_PRO_LATEST_ID = "gemini-1.5-pro-latest" # Ensure this is the correct ID usable via API
26
- GEMINI_1_5_FLASH_LATEST_ID = "gemini-1.5-flash-latest"
27
-
28
- if GEMINI_API_READY:
29
- AVAILABLE_MODELS_CONFIG.update({
30
- f"✨ Google Gemini 1.5 Pro (API)": {"id": GEMINI_1_5_PRO_LATEST_ID, "type": "google_gemini"},
31
- f" Google Gemini 1.5 Flash (API)": {"id": GEMINI_1_5_FLASH_LATEST_ID, "type": "google_gemini"},
32
- "Legacy Gemini 1.0 Pro (API)": {"id": "gemini-1.0-pro-latest", "type": "google_gemini"},
33
- })
34
- UI_DEFAULT_MODEL_KEY = f"✨ Google Gemini 1.5 Pro (API)"
35
- if UI_DEFAULT_MODEL_KEY not in AVAILABLE_MODELS_CONFIG:
36
- UI_DEFAULT_MODEL_KEY = f"⚡ Google Gemini 1.5 Flash (API)"
37
- else: print("WARNING: app.py - Gemini API not configured.")
38
-
39
- if HF_API_READY:
40
- AVAILABLE_MODELS_CONFIG.update({
41
- "Gemma 2B (HF Test)": {"id": "google/gemma-2b-it", "type": "hf"},
42
- "Mistral 7B (HF)": {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf"},
43
- })
44
- if not UI_DEFAULT_MODEL_KEY: UI_DEFAULT_MODEL_KEY = "Gemma 2B (HF Test)"
45
- else: print("WARNING: app.py - HF API not configured.")
46
-
47
- if not AVAILABLE_MODELS_CONFIG:
48
- AVAILABLE_MODELS_CONFIG["No Models Available (Setup API Keys!)"] = {"id": "dummy_error", "type": "none"}
49
- UI_DEFAULT_MODEL_KEY = "No Models Available (Setup API Keys!)"
50
- elif not UI_DEFAULT_MODEL_KEY and AVAILABLE_MODELS_CONFIG:
51
- UI_DEFAULT_MODEL_KEY = list(AVAILABLE_MODELS_CONFIG.keys())[0]
52
-
53
- # --- UI Customization (Conceptual - real CSS would be in a file) ---
54
- # For a "WOW" UI, you'd link a custom CSS file.
55
- # Here's a conceptual placeholder for some styles we might imply.
56
- APP_THEME = gr.themes.Soft(
57
- primary_hue=gr.themes.colors.blue,
58
- secondary_hue=gr.themes.colors.sky,
59
- neutral_hue=gr.themes.colors.slate,
60
- font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"],
61
- ).set(
62
- # Example: input_background_fill="rgba(240, 240, 240, 0.5)" # Slightly transparent inputs
63
- # button_primary_background_fill="linear-gradient(to bottom right, hsl(210, 80%, 50%), hsl(210, 100%, 30%))"
64
  )
 
 
 
 
 
 
 
 
 
 
 
65
 
66
- # --- Main Orchestration Logic (More detailed progress and error handling for UI) ---
67
- def run_algoforge_orchestrator_ui_wrapper(
68
- problem_type_selected: str, problem_description_text: str, initial_hints_text: str,
69
- user_provided_tests_code: str, num_initial_solutions_to_gen: int, selected_model_ui_key: str,
70
- genesis_temp: float, genesis_max_tokens: int, critique_temp: float, critique_max_tokens: int,
71
- evolution_temp: float, evolution_max_tokens: int,
72
- # Gradio's Request object can give session info if needed for advanced state
73
- # request: gr.Request
 
 
74
  ):
75
- # This wrapper allows for more fine-grained UI updates via yielding
76
- # and handles the overall try-except for better error display.
77
-
78
- log_accumulator = [f"**AlgoForge Omega™ Cycle Starting at {time.strftime('%Y-%m-%d %H:%M:%S')}**\n"]
79
- # Initial state for UI outputs
80
- yield {
81
- output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🚀 Initializing AlgoForge Omega™...</p>", visible=True),
82
- output_initial_solutions_accordion: gr.Accordion(label="⏳ Generating Initial Candidates...", open=False, visible=True),
83
- output_initial_solutions_markdown: gr.Markdown(value="Working...", visible=True),
84
- output_champion_accordion: gr.Accordion(label="⏳ Awaiting Champion Selection...", open=False, visible=False),
85
- output_champion_markdown: gr.Markdown(value="", visible=False),
86
- output_evolved_accordion: gr.Accordion(label="⏳ Awaiting Evolution...", open=False, visible=False),
87
- output_evolved_markdown: gr.Markdown(value="", visible=False),
88
- output_ai_test_analysis_markdown: gr.Markdown(value="", visible=False),
89
- output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator), visible=True),
90
- engage_button: gr.Button(interactive=False) # Disable button during run
91
- }
92
-
93
- try:
94
- start_time = time.time()
95
-
96
- if not problem_description_text.strip():
97
- raise ValueError("Problem Description is mandatory.")
98
-
99
- current_model_config = AVAILABLE_MODELS_CONFIG.get(selected_model_ui_key)
100
- if not current_model_config or current_model_config["type"] == "none":
101
- raise ValueError(f"No valid LLM selected ('{selected_model_ui_key}'). Check API key configurations.")
102
-
103
- log_accumulator.append(f"Selected Model: {selected_model_ui_key} (Type: {current_model_config['type']}, ID: {current_model_config['id']})")
104
- log_accumulator.append(f"Problem Type: {problem_type_selected}")
105
- log_accumulator.append(f"User Tests Provided: {'Yes' if user_provided_tests_code.strip() else 'No'}\n")
106
- yield { output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }
107
 
 
 
108
 
109
- llm_config_genesis = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": genesis_temp, "max_tokens": genesis_max_tokens}
110
- llm_config_critique = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": critique_temp, "max_tokens": critique_max_tokens}
111
- llm_config_evolution = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": evolution_temp, "max_tokens": evolution_max_tokens}
112
-
113
- # --- STAGE 1: GENESIS ---
114
- yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🧬 Stage 1: Genesis Engine - Generating Solutions...</p>") }
115
- log_accumulator.append("**------ STAGE 1: GENESIS ENGINE ------**")
116
- initial_raw_solutions = generate_initial_solutions(problem_description_text, initial_hints_text, problem_type_selected, num_initial_solutions_to_gen, llm_config_genesis)
117
- log_accumulator.append(f"Genesis Engine produced {len(initial_raw_solutions)} raw candidate(s).")
118
- for i, sol_text in enumerate(initial_raw_solutions): log_accumulator.append(f" Candidate {i+1} (Raw Snippet): {str(sol_text)[:100]}...")
119
- yield { output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }
120
-
121
-
122
- # --- STAGE 2: CRITIQUE & AUTOMATED EVALUATION ---
123
- yield {
124
- output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🔬 Stage 2: Critique Crucible - Evaluating Candidates...</p>"),
125
- output_initial_solutions_accordion: gr.Accordion(label="Initial Candidates & Evaluations (Processing...)", open=True)
126
- }
127
- log_accumulator.append("\n**------ STAGE 2: CRITIQUE CRUCIBLE & AUTOMATED EVALUATION ------**")
128
- evaluated_candidates_list = []
129
- initial_solutions_md_accumulator = ["**Initial Candidates & Detailed Evaluations:**\n"]
130
-
131
- for i, candidate_solution_text in enumerate(initial_raw_solutions):
132
- log_accumulator.append(f"\n--- Evaluating Candidate {i+1} ---")
133
- yield { output_status_bar: gr.HTML(value=f"<p style='color: dodgerblue;'>🔬 Evaluating Candidate {i+1} of {num_initial_solutions_to_gen}...</p>") }
134
-
135
- evaluation_output_obj = evaluate_solution_candidate(str(candidate_solution_text), problem_description_text, problem_type_selected, user_provided_tests_code, llm_config_critique)
136
- evaluated_candidates_list.append({"id": i + 1, "solution_text": str(candidate_solution_text), "evaluation_obj": evaluation_output_obj})
137
-
138
- log_accumulator.append(f" Combined Score: {evaluation_output_obj.combined_score}/10")
139
- # ... (more detailed logging from evaluation_obj as before)
140
-
141
- # Update UI with this candidate's evaluation progressively
142
- current_eval_md = (
143
- f"**Candidate {i+1} (Score: {evaluation_output_obj.combined_score}/10):**\n"
144
- f"```python\n{str(candidate_solution_text)}\n```\n\n"
145
- f"**Evaluation Verdict:**\n{evaluation_output_obj.get_display_critique()}\n---"
146
- )
147
- initial_solutions_md_accumulator.append(current_eval_md)
148
- yield {
149
- output_initial_solutions_markdown: gr.Markdown(value="\n".join(initial_solutions_md_accumulator)),
150
- output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator))
151
- }
152
-
153
- # --- STAGE 3: SELECTION OF CHAMPION ---
154
- yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🏆 Stage 3: Selecting Champion Candidate...</p>") }
155
- log_accumulator.append("\n**------ STAGE 3: CHAMPION SELECTION ------**")
156
- potentially_viable_candidates = [c for c in evaluated_candidates_list if c["evaluation_obj"] and c["evaluation_obj"].combined_score > 0 and not str(c["solution_text"]).startswith("ERROR")]
157
 
158
- if not potentially_viable_candidates:
159
- raise ValueError("No viable candidate solutions found after evaluation. All attempts may have failed or scored too low.")
 
 
 
 
 
 
 
 
 
160
 
161
- champion_candidate_data = sorted(potentially_viable_candidates, key=lambda x: x["evaluation_obj"].combined_score, reverse=True)[0]
162
- log_accumulator.append(f"Champion Selected: Candidate {champion_candidate_data['id']} with score {champion_candidate_data['evaluation_obj'].combined_score}/10.")
163
- champion_display_markdown = (
164
- f"**Champion Candidate ID: {champion_candidate_data['id']} "
165
- f"(Original Score: {champion_candidate_data['evaluation_obj'].combined_score}/10):**\n"
166
- f"```python\n{champion_candidate_data['solution_text']}\n```\n\n"
167
- f"**Original Comprehensive Evaluation:**\n{champion_candidate_data['evaluation_obj'].get_display_critique()}"
168
- )
169
- yield {
170
- output_champion_accordion: gr.Accordion(label=f"🏆 Champion: Candidate {champion_candidate_data['id']} (Score: {champion_candidate_data['evaluation_obj'].combined_score}/10)", open=True, visible=True),
171
- output_champion_markdown: gr.Markdown(value=champion_display_markdown, visible=True),
172
- output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator))
173
- }
174
 
175
- # --- STAGE 4: EVOLUTIONARY FORGE ---
176
- yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🛠️ Stage 4: Evolutionary Forge - Refining Champion...</p>") }
177
- log_accumulator.append("\n**------ STAGE 4: EVOLUTIONARY FORGE ------**")
178
- evolved_solution_code = evolve_solution(
179
- str(champion_candidate_data["solution_text"]), champion_candidate_data["evaluation_obj"],
180
- problem_description_text, problem_type_selected, llm_config_evolution
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
  )
182
- log_accumulator.append(f"Raw Evolved Solution (Snippet): {str(evolved_solution_code)[:100]}...")
183
-
184
- evolved_solution_display_markdown = ""
185
- ai_test_analysis_markdown = ""
186
-
187
- if str(evolved_solution_code).startswith("ERROR"):
188
- evolved_solution_display_markdown = f"<p style='color: red;'>**Evolution Stage Failed:**<br>{evolved_solution_code}</p>"
 
 
 
 
 
189
  else:
190
- evolved_solution_display_markdown = f"**✨ AlgoForge Omega™ Evolved Artifact ✨:**\n```python\n{evolved_solution_code}\n```"
191
- if "python" in problem_type_selected.lower() and user_provided_tests_code.strip():
192
- yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🧪 Post-Evolution: Re-testing Evolved Code...</p>") }
193
- log_accumulator.append("\n--- Post-Evolution Test of Evolved Code ---")
194
- from core.safe_executor import execute_python_code_with_tests # Ensure imported
195
- evolved_code_exec_result = execute_python_code_with_tests(str(evolved_solution_code), user_provided_tests_code, timeout_seconds=10)
196
-
197
- evolved_solution_display_markdown += (
198
- f"\n\n**Post-Evolution Automated Test Results (Simulated):**\n"
199
- f" Status: {'SUCCESS' if evolved_code_exec_result.success else 'FAILED/ERRORS'}\n"
200
- f" Tests Attempted: {evolved_code_exec_result.total_tests}\n"
201
- f" Tests Passed: {evolved_code_exec_result.passed_tests}\n"
202
- f" Execution Time: {evolved_code_exec_result.execution_time:.4f}s\n"
203
- )
204
- if evolved_code_exec_result.compilation_error: evolved_solution_display_markdown += f" Compilation Error: {evolved_code_exec_result.compilation_error}\n"
205
- elif evolved_code_exec_result.timeout_error: evolved_solution_display_markdown += f" Timeout Error.\n"
206
- elif evolved_code_exec_result.error: evolved_solution_display_markdown += f" Execution Error/Output: {evolved_code_exec_result.overall_error_summary}\n"
207
- elif evolved_code_exec_result.stdout: evolved_solution_display_markdown += f" Execution Stdout:\n```\n{evolved_code_exec_result.stdout[:300].strip()}\n```\n"
208
- log_accumulator.append(f" Evolved Code Test Results: {evolved_code_exec_result}")
209
-
210
- if evolved_code_exec_result.total_tests > 0 :
211
- yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🧠 Post-Evolution: AI Analyzing Test Results...</p>") }
212
- log_accumulator.append("\n--- AI Analysis of Evolved Code's Test Results ---")
213
- exec_summary_for_analysis = str(evolved_code_exec_result.overall_error_summary or "Tests completed.")
214
- analysis_user_prompt = format_code_test_analysis_user_prompt(str(evolved_solution_code), user_provided_tests_code, f"Passed: {evolved_code_exec_result.passed_tests}/{evolved_code_exec_result.total_tests}. Detail: {exec_summary_for_analysis}")
215
- analysis_system_prompt = get_system_prompt("code_execution_explainer")
216
- llm_analysis_config = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": 0.3, "max_tokens": critique_max_tokens + 200}
217
-
218
- from core.llm_clients import call_huggingface_api, call_gemini_api
219
- explanation_response_obj = None
220
- if llm_analysis_config["type"] == "hf": explanation_response_obj = call_huggingface_api(analysis_user_prompt, llm_analysis_config["model_id"], llm_analysis_config["temp"], llm_analysis_config["max_tokens"], analysis_system_prompt)
221
- elif llm_analysis_config["type"] == "google_gemini": explanation_response_obj = call_gemini_api(analysis_user_prompt, llm_analysis_config["model_id"], llm_analysis_config["temp"], llm_analysis_config["max_tokens"], analysis_system_prompt)
222
-
223
- if explanation_response_obj and explanation_response_obj.success:
224
- ai_test_analysis_markdown = f"**AI Analysis of Evolved Code's Test Performance:**\n{explanation_response_obj.text}"
225
- elif explanation_response_obj:
226
- ai_test_analysis_markdown = f"<p style='color: orange;'>**AI Analysis of Test Performance Failed:**<br>{explanation_response_obj.error}</p>"
227
- log_accumulator.append(f" AI Test Analysis result logged.")
228
 
229
- total_time = time.time() - start_time
230
- log_accumulator.append(f"\n**AlgoForge Omega™ Cycle Complete. Total time: {total_time:.2f} seconds.**")
231
- yield {
232
- output_status_bar: gr.HTML(value=f"<p style='color: green;'>✅ Cycle Complete! ({total_time:.2f}s)</p>"),
233
- output_evolved_accordion: gr.Accordion(label="🌟 Evolved Artifact & Test Analysis", open=True, visible=True),
234
- output_evolved_markdown: gr.Markdown(value=evolved_solution_display_markdown, visible=True),
235
- output_ai_test_analysis_markdown: gr.Markdown(value=ai_test_analysis_markdown, visible=True if ai_test_analysis_markdown else False),
236
- output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)),
237
- engage_button: gr.Button(interactive=True) # Re-enable button
238
- }
239
 
240
- except ValueError as ve: # Catch our specific input/config errors
241
- log_accumulator.append(f"\n**INPUT/CONFIG ERROR:** {ve}")
242
- yield {
243
- output_status_bar: gr.HTML(value=f"<p style='color: red;'>❌ CONFIGURATION ERROR: {ve}</p>", visible=True),
244
- output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)),
245
- engage_button: gr.Button(interactive=True)
246
- }
247
- except Exception as e:
248
- log_accumulator.append(f"\n**UNEXPECTED RUNTIME ERROR:** {type(e).__name__} - {e}\n{traceback.format_exc()}")
249
- # For other outputs, we might want to clear them or show a general error message
250
- yield {
251
- output_status_bar: gr.HTML(value=f"<p style='color: red;'>❌ UNEXPECTED ERROR: {e}. Check logs.</p>", visible=True),
252
- output_initial_solutions_markdown: gr.Markdown(value="An unexpected error occurred. Please check the interaction log."),
253
- output_champion_markdown: gr.Markdown(value="Error state."),
254
- output_evolved_markdown: gr.Markdown(value="Error state."),
255
- output_ai_test_analysis_markdown: gr.Markdown(value="Error state."),
256
- output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)),
257
- engage_button: gr.Button(interactive=True)
258
- }
259
 
 
 
 
260
 
261
  # --- Gradio UI Definition ---
262
- # (This section is the full UI layout with improvements)
263
- css = """
264
- body { font-family: 'Inter', sans-serif; }
265
- .gradio-container { max-width: 1280px !important; margin: auto !important; }
266
- .gr-button-primary {
267
- background: linear-gradient(135deg, #007bff 0%, #0056b3 100%) !important;
268
- color: white !important;
269
- border: none !important;
270
- box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1) !important;
271
- transition: all 0.2s ease-in-out !important;
272
- }
273
- .gr-button-primary:hover {
274
- transform: translateY(-2px) !important;
275
- box-shadow: 0 6px 12px rgba(0, 0, 0, 0.15) !important;
276
- }
277
- .status-bar p {
278
- padding: 8px 12px;
279
- border-radius: 6px;
280
- font-weight: 500;
281
- text-align: center;
282
- margin-bottom: 10px; /* Add some space below status bar */
283
- }
284
- .accordion-section .gr-markdown { padding-top: 5px; padding-bottom: 5px; }
285
- .output-tabs .gr-tabitem {min-height: 400px;} /* Ensure tabs have some min height */
286
- """
287
 
288
- with gr.Blocks(theme=APP_THEME, css=css, title="AlgoForge Omega™ ") as app_demo:
289
- gr.Markdown("# ✨ AlgoForge Omega™ ✨\n### Conceptual AI-Powered Algorithm & Application Foundry")
290
- gr.Markdown(
291
- "Define a challenge, configure the AI forge, and witness the (conceptual) evolution of solutions, "
292
- "now with (simulated) unit testing and more detailed feedback loops!"
293
- )
294
 
295
- with gr.Row(equal_height=False):
296
- # --- INPUT COLUMN ---
297
- with gr.Column(scale=2, min_width=400):
298
- gr.Markdown("## 💡 1. Define the Challenge")
299
- with gr.Group():
300
- problem_type_dropdown = gr.Dropdown(
301
- choices=["Python Algorithm with Tests", "Python Algorithm (Critique Only)", "General Algorithm Idea", "Conceptual System Design", "Pseudocode Refinement"],
302
- label="Problem Type", value="Python Algorithm with Tests",
303
- info="Select '...with Tests' to enable (simulated) unit testing if you provide tests below."
304
- )
305
- problem_description_textbox = gr.Textbox(
306
- lines=7, label="Problem Description / Desired Outcome",
307
- placeholder="Example for 'Python Algorithm with Tests':\n`def calculate_factorial(n: int) -> int:`\nCalculates factorial of n. Should handle n=0 (returns 1) and raise ValueError for n<0."
308
- )
309
- initial_hints_textbox = gr.Textbox(
310
- lines=4, label="Initial Thoughts / Constraints / Seed Ideas (Optional)",
311
- placeholder="E.g., 'Prefer an iterative solution over recursive for factorial.' or 'Consider time complexity and edge cases like empty inputs.'"
312
- )
313
- user_tests_textbox = gr.Textbox(
314
- lines=7, label="Python Unit Tests (Optional, one `assert` per line)",
315
- placeholder="assert calculate_factorial(0) == 1\nassert calculate_factorial(5) == 120\n# For expected errors (advanced, simulated):\n# try:\n# calculate_factorial(-1)\n# except ValueError:\n# assert True\n# else:\n# assert False, \"ValueError not raised\"",
316
- info="For 'Python Algorithm with Tests'. Ensure function names match your problem description. Basic try-except for error testing is crudely simulated."
317
- )
318
 
319
- gr.Markdown("## ⚙️ 2. Configure The Forge")
320
- with gr.Group():
321
- api_status_html = gr.HTML() # For dynamic API status
322
-
323
- # Logic to set API status text (must be done after initialize_all_clients)
324
- status_messages = []
325
- if not GEMINI_API_READY and not HF_API_READY:
326
- status_messages.append("<p style='color:red; font-weight:bold;'>⚠️ CRITICAL: NO APIs CONFIGURED. App non-functional.</p>")
327
- else:
328
- if GEMINI_API_READY: status_messages.append("<p style='color:green;'>✅ Google Gemini API Ready.</p>")
329
- else: status_messages.append("<p style='color:orange;'>⚠️ Google Gemini API NOT Ready (Check GOOGLE_API_KEY).</p>")
330
- if HF_API_READY: status_messages.append("<p style='color:green;'>✅ Hugging Face API Ready.</p>")
331
- else: status_messages.append("<p style='color:orange;'>⚠️ Hugging Face API NOT Ready (Check HF_TOKEN).</p>")
332
- api_status_html.value = "".join(status_messages)
333
-
334
-
335
- model_selection_dropdown = gr.Dropdown(
336
- choices=list(AVAILABLE_MODELS_CONFIG.keys()),
337
- value=UI_DEFAULT_MODEL_KEY if UI_DEFAULT_MODEL_KEY in AVAILABLE_MODELS_CONFIG else (list(AVAILABLE_MODELS_CONFIG.keys())[0] if AVAILABLE_MODELS_CONFIG else None),
338
- label="LLM Core Model",
339
- info="Ensure the corresponding API key is correctly set in Space Secrets."
340
- )
341
- num_initial_solutions_slider = gr.Slider(minimum=1, maximum=4, value=2, step=1, label="# Initial Solutions (Genesis Engine)", info="More solutions take longer but provide more diversity.")
342
-
343
- with gr.Accordion("Advanced LLM Parameters (Tune with Caution!)", open=False):
344
- with gr.Row():
345
- genesis_temp_slider = gr.Slider(minimum=0.0, maximum=1.2, value=0.7, step=0.05, label="Genesis Temp")
346
- genesis_max_tokens_slider = gr.Slider(minimum=256, maximum=4096, value=1024, step=128, label="Genesis Max Tokens")
347
- with gr.Row():
348
- critique_temp_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, step=0.05, label="Critique Temp")
349
- critique_max_tokens_slider = gr.Slider(minimum=150, maximum=2048, value=768, step=64, label="Critique Max Tokens")
350
- with gr.Row():
351
- evolution_temp_slider = gr.Slider(minimum=0.0, maximum=1.2, value=0.75, step=0.05, label="Evolution Temp")
352
- evolution_max_tokens_slider = gr.Slider(minimum=256, maximum=4096, value=1536, step=128, label="Evolution Max Tokens")
353
 
354
- engage_button = gr.Button("🚀 ENGAGE ALGOFORGE OMEGA™ 🚀", variant="primary", size="lg", elem_id="engage_button_elem")
355
 
356
- # --- OUTPUT COLUMN ---
357
- with gr.Column(scale=3, min_width=600):
358
- gr.Markdown("## 🔥 3. The Forge's Output")
359
- output_status_bar = gr.HTML(value="<p>Idle. Define a challenge and engage!</p>", elem_classes=["status-bar"], visible=True)
360
 
361
- with gr.Tabs(elem_id="output_tabs_elem", elem_classes=["output-tabs"]):
362
- with gr.TabItem("📜 Initial Candidates & Evaluations", id="tab_initial_evals"):
363
- output_initial_solutions_accordion = gr.Accordion(label="Initial Candidates & Evaluations", open=True, visible=False, elem_classes=["accordion-section"])
364
- with output_initial_solutions_accordion:
365
- output_initial_solutions_markdown = gr.Markdown(visible=True)
366
 
367
- with gr.TabItem("🏆 Champion Candidate", id="tab_champion"):
368
- output_champion_accordion = gr.Accordion(label="Champion Candidate (Pre-Evolution)", open=True, visible=False, elem_classes=["accordion-section"])
369
- with output_champion_accordion:
370
- output_champion_markdown = gr.Markdown(visible=True)
371
 
372
- with gr.TabItem("🌟 Evolved & Tested", id="tab_evolved"):
373
- output_evolved_accordion = gr.Accordion(label="Evolved Artifact & Test Analysis", open=True, visible=False, elem_classes=["accordion-section"])
374
- with output_evolved_accordion:
375
- output_evolved_markdown = gr.Markdown(visible=True)
376
- output_ai_test_analysis_markdown = gr.Markdown(visible=True, label="AI Analysis of Evolved Code's Tests")
377
-
378
- with gr.TabItem("🛠️ Interaction Log", id="tab_log"):
379
- with gr.Accordion(label="Developer Interaction Log", open=True, elem_classes=["accordion-section"]): # Always open log
380
- output_interaction_log_markdown = gr.Markdown(value="Log will appear here...", visible=True)
381
-
382
- # Connect button to the orchestration function wrapper
383
- # The wrapper handles UI updates via yield
384
- engage_button.click(
385
- fn=run_algoforge_orchestrator_ui_wrapper, # Call the wrapper
386
- inputs=[
387
- problem_type_dropdown, problem_description_textbox, initial_hints_textbox, user_tests_textbox,
388
- num_initial_solutions_slider, model_selection_dropdown,
389
- genesis_temp_slider, genesis_max_tokens_slider,
390
- critique_temp_slider, critique_max_tokens_slider,
391
- evolution_temp_slider, evolution_max_tokens_slider
392
- ],
393
- outputs=[ # These are the components updated by the `yield` statements
394
- output_status_bar,
395
- output_initial_solutions_accordion, output_initial_solutions_markdown,
396
- output_champion_accordion, output_champion_markdown,
397
- output_evolved_accordion, output_evolved_markdown, output_ai_test_analysis_markdown,
398
- output_interaction_log_markdown,
399
- engage_button # To disable/re-enable it
400
- ]
401
- )
402
-
403
- gr.Markdown("---")
404
- gr.Markdown(
405
- "**Disclaimer:** This is a conceptual, educational demonstration. "
406
- "The (simulated) unit testing feature is for illustrative purposes. "
407
- "**NEVER run LLM-generated code from an untrusted source in an unrestricted environment.** "
408
- "Implementing robust and secure code sandboxing is complex and absolutely critical for safety in real-world applications. "
409
- "LLM outputs always require careful human review and verification."
410
  )
411
- gr.HTML("<p style='text-align:center; font-size:0.9em; color:grey;'>AlgoForge Omega™ - Powered by Gradio, Gemini & Hugging Face Models</p>")
412
 
 
 
 
 
 
 
 
 
 
 
 
413
 
414
- # --- Entry Point for Running the Gradio App ---
415
  if __name__ == "__main__":
416
  print("="*80)
417
- print("AlgoForge OmegaConceptual Demo (WOW UI Attempt) - Launching...")
418
- print(f" Google Gemini API Configured (from app.py check): {GEMINI_API_READY}")
419
- print(f" Hugging Face API Configured (from app.py check): {HF_API_READY}")
420
- if not GEMINI_API_READY and not HF_API_READY:
421
- print(" CRITICAL WARNING: No API keys seem to be configured correctly. The application will likely be non-functional.")
422
- print(f" UI Default Model Key: {UI_DEFAULT_MODEL_KEY}")
423
- print(f" Available models for UI: {list(AVAILABLE_MODELS_CONFIG.keys())}")
 
 
 
 
 
 
424
  print("="*80)
425
- app_demo.launch(debug=True, server_name="0.0.0.0")
 
1
+ # storyverse_weaver/app.py
2
  import gradio as gr
3
  import os
4
  import time
5
+ from PIL import Image
6
 
7
  # --- Core Logic Imports ---
8
+ from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf
9
+ from core.image_services import initialize_image_llms, STABILITY_API_CONFIGURED, OPENAI_DALLE_CONFIGURED, generate_image_stabilityai, generate_image_dalle # Add other providers if implemented
10
+ from core.story_engine import Story, Scene # Manages the story
11
+ from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
12
+ from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
13
+ from core.utils import basic_text_cleanup
14
+
15
+ # --- Initialize Services ---
16
+ initialize_text_llms()
17
+ initialize_image_llms()
18
+
19
+ # --- Available Model Configuration (Simplified for StoryVerse) ---
20
+ # Text Models
21
+ TEXT_MODELS = {}
22
+ if is_gemini_text_ready():
23
+ TEXT_MODELS["Gemini 1.5 Flash (Text)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
24
+ TEXT_MODELS["Gemini 1.0 Pro (Text)"] = {"id": "gemini-1.0-pro-latest", "type": "gemini"}
25
+ if is_hf_text_ready():
26
+ TEXT_MODELS["Mistral 7B (HF Text)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
27
+ DEFAULT_TEXT_MODEL_KEY = list(TEXT_MODELS.keys())[0] if TEXT_MODELS else "No Text Models Available"
28
+
29
+ # Image Models (Providers)
30
+ IMAGE_PROVIDERS = {}
31
+ if STABILITY_API_CONFIGURED: IMAGE_PROVIDERS["Stability AI (Stable Diffusion XL)"] = "stability_ai"
32
+ if OPENAI_DALLE_CONFIGURED: IMAGE_PROVIDERS["OpenAI DALL-E 3 (Simulated)"] = "dalle"
33
+ # Add other HF image models if you implement image_services.generate_image_hf_model
34
+ DEFAULT_IMAGE_PROVIDER_KEY = list(IMAGE_PROVIDERS.keys())[0] if IMAGE_PROVIDERS else "No Image Providers Available"
35
+
36
+
37
+ # --- Gradio UI Theme and CSS ---
38
+ story_theme = gr.themes.Soft(
39
+ primary_hue=gr.themes.colors.purple,
40
+ secondary_hue=gr.themes.colors.pink,
41
+ font=[gr.themes.GoogleFont("Quicksand"), "ui-sans-serif", "system-ui", "sans-serif"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  )
43
+ custom_css = """
44
+ .gradio-container { max-width: 1200px !important; margin: auto !important; }
45
+ .panel_image img { object-fit: contain; width: 100%; height: 100%; max-height: 512px; }
46
+ .gallery_output .thumbnail-item { height: 150px !important; width: 150px !important; }
47
+ .gallery_output .thumbnail-item img { height: 100% !important; width: 100% !important; object-fit: cover !important; }
48
+ .status_text { font-weight: bold; padding: 8px; text-align: center; border-radius: 5px; margin-top:10px;}
49
+ .error_text { background-color: #ffebee; color: #c62828; }
50
+ .success_text { background-color: #e8f5e9; color: #2e7d32; }
51
+ .processing_text { background-color: #e3f2fd; color: #1565c0; }
52
+ .compact-row .gr-form {gap: 8px !important;} /* Reduce gap in rows */
53
+ """
54
 
55
+ # --- StoryVerse Weaver Orchestrator ---
56
+ def add_scene_to_story(
57
+ current_story_obj: Story, # Comes from gr.State
58
+ scene_prompt_text: str,
59
+ image_style_dropdown: str,
60
+ artist_style_text: str,
61
+ negative_prompt_text: str,
62
+ text_model_key: str,
63
+ image_provider_key: str,
64
+ progress=gr.Progress(track_tqdm=True)
65
  ):
66
+ if not scene_prompt_text.strip():
67
+ return current_story_obj, None, "<p class='error_text status_text'>Scene prompt cannot be empty!</p>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
+ progress(0, desc="Initializing new scene...")
70
+ log_updates = ["Starting new scene generation..."]
71
 
72
+ # --- 1. Generate Narrative Text ---
73
+ progress(0.2, desc="Generating narrative...")
74
+ narrative_text_generated = "Narrative generation failed."
75
+ text_model_info = TEXT_MODELS.get(text_model_key)
76
+
77
+ if text_model_info:
78
+ system_p = get_narrative_system_prompt("default") # or "comic"
79
+ # Could use last scene's narrative for context if desired
80
+ # prev_narrative = current_story_obj.get_last_scene_narrative()
81
+ user_p = format_narrative_user_prompt(scene_prompt_text) #, prev_narrative)
82
+
83
+ text_response = None
84
+ if text_model_info["type"] == "gemini":
85
+ text_response = generate_text_gemini(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=300)
86
+ elif text_model_info["type"] == "hf_text":
87
+ text_response = generate_text_hf(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=300)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
+ if text_response and text_response.success:
90
+ narrative_text_generated = basic_text_cleanup(text_response.text)
91
+ log_updates.append(f"Narrative generated using {text_model_key}.")
92
+ elif text_response:
93
+ narrative_text_generated = f"Narrative Error: {text_response.error}"
94
+ log_updates.append(f"Narrative generation FAILED with {text_model_key}: {text_response.error}")
95
+ else:
96
+ log_updates.append(f"Narrative generation FAILED with {text_model_key}: No response object.")
97
+ else:
98
+ narrative_text_generated = "Selected text model not available."
99
+ log_updates.append("Narrative generation FAILED: Text model not found.")
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
+ # --- 2. Generate Image ---
103
+ progress(0.6, desc="Generating image...")
104
+ image_generated = None
105
+ image_error = None
106
+ selected_image_provider = IMAGE_PROVIDERS.get(image_provider_key)
107
+
108
+ # Use the generated narrative (or original prompt if narrative failed) for image prompt
109
+ image_content_prompt = narrative_text_generated if narrative_text_generated and "Error" not in narrative_text_generated else scene_prompt_text
110
+ full_image_prompt = format_image_generation_prompt(image_content_prompt[:300], image_style_dropdown, artist_style_text) # Limit prompt length for image gen
111
+
112
+ if selected_image_provider:
113
+ image_response = None
114
+ if selected_image_provider == "stability_ai":
115
+ image_response = generate_image_stabilityai(full_image_prompt, style_preset=None, negative_prompt=negative_prompt_text or COMMON_NEGATIVE_PROMPTS)
116
+ elif selected_image_provider == "dalle":
117
+ image_response = generate_image_dalle(full_image_prompt) # Uses default DALL-E settings from image_services
118
+ # Add elif for HF image models if implemented
119
+
120
+ if image_response and image_response.success:
121
+ image_generated = image_response.image
122
+ log_updates.append(f"Image generated using {image_provider_key}.")
123
+ elif image_response:
124
+ image_error = f"Image Error ({image_provider_key}): {image_response.error}"
125
+ log_updates.append(f"Image generation FAILED with {image_provider_key}: {image_response.error}")
126
+ else:
127
+ image_error = f"Image generation failed: No response from {image_provider_key} service."
128
+ log_updates.append(f"Image generation FAILED with {image_provider_key}: No response object.")
129
+ else:
130
+ image_error = "Selected image provider not available."
131
+ log_updates.append("Image generation FAILED: Image provider not found.")
132
+
133
+ # --- 3. Add Scene to Story Object ---
134
+ if image_error and "Error" in narrative_text_generated: # Both failed
135
+ current_story_obj.add_scene_with_error(scene_prompt_text, f"Narrative: {narrative_text_generated}. Image: {image_error}")
136
+ else:
137
+ current_story_obj.add_scene_from_elements(
138
+ user_prompt=scene_prompt_text,
139
+ narrative_text=narrative_text_generated,
140
+ image=image_generated,
141
+ image_style_prompt=f"{image_style_dropdown}{f', by {artist_style_text}' if artist_style_text else ''}",
142
+ image_provider=image_provider_key
143
  )
144
+
145
+ progress(1.0, desc="Scene complete!")
146
+
147
+ # --- 4. Prepare Outputs for Gradio ---
148
+ # Gallery expects list of (image_path_or_PIL, caption_string) tuples
149
+ gallery_items = []
150
+ for scene in current_story_obj.scenes:
151
+ caption = f"S{scene.scene_number}: {scene.user_prompt[:40]}..."
152
+ if scene.error_message:
153
+ # Create a placeholder image for errors or display error text
154
+ error_img = Image.new('RGB', (100,100), color='red') # Simple red square
155
+ gallery_items.append((error_img, f"{caption}\nError: {scene.error_message[:100]}..."))
156
  else:
157
+ gallery_items.append((scene.image if scene.image else Image.new('RGB', (100,100), color='grey'), caption)) # Grey if no image but no error
158
+
159
+ # Display the latest scene's full details
160
+ latest_scene_display = ""
161
+ if current_story_obj.scenes:
162
+ ls = current_story_obj.scenes[-1]
163
+ latest_scene_display = f"## Scene {ls.scene_number}: {ls.user_prompt}\n\n"
164
+ if ls.error_message:
165
+ latest_scene_display += f"**Error:** {ls.error_message}\n"
166
+ else:
167
+ if ls.image:
168
+ # Gradio Markdown can't directly display PIL.Image. We'll show it in the gallery.
169
+ # For single image display, use gr.Image component.
170
+ latest_scene_display += f"**Style:** {ls.image_style_prompt}\n\n"
171
+ latest_scene_display += f"{ls.narrative_text}"
172
+
173
+ # Determine status message
174
+ status_message_html = ""
175
+ if image_error or "Error" in narrative_text_generated:
176
+ status_message_html = f"<p class='error_text status_text'>Scene added with errors. Narrative: {'OK' if 'Error' not in narrative_text_generated else 'Failed'}. Image: {'OK' if not image_error else 'Failed'}.</p>"
177
+ else:
178
+ status_message_html = "<p class='success_text status_text'>New scene added successfully!</p>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
+ # For the single image display component, show the latest generated image
181
+ latest_image_output = image_generated if image_generated else None # (or a placeholder if error)
182
+
183
+ return current_story_obj, gallery_items, latest_image_output, latest_scene_display, status_message_html, "\n".join(log_updates)
 
 
 
 
 
 
184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
+ def clear_story_state():
187
+ new_story = Story()
188
+ return new_story, [], None, "Story Cleared. Ready for a new verse!", "<p class='status_text'>Story Cleared</p>", "Log Cleared."
189
 
190
  # --- Gradio UI Definition ---
191
+ with gr.Blocks(theme=story_theme, css=custom_css) as story_weaver_demo:
192
+ story_state = gr.State(Story()) # Manages the story object
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
 
194
+ gr.Markdown("#StoryVerse Weaver ✨\nCreate multimodal stories with AI-generated narrative and images!")
 
 
 
 
 
195
 
196
+ # API Status Check (Conceptual - real app might hide this or make it admin-only)
197
+ with gr.Accordion("API & Model Status (Developer Info)", open=False):
198
+ status_text = []
199
+ if not GEMINI_API_READY and not HF_API_READY and not STABILITY_API_CONFIGURED and not OPENAI_DALLE_CONFIGURED:
200
+ status_text.append("<p style='color:red;font-weight:bold;'>⚠️ CRITICAL: NO APIs CONFIGURED. App will be non-functional.</p>")
201
+ else:
202
+ if GEMINI_API_READY or HF_API_READY: status_text.append("<p style='color:green;'>✅ Text LLM(s) Ready.</p>")
203
+ else: status_text.append("<p style='color:orange;'>⚠️ No Text LLMs Ready (Check STORYVERSE_GOOGLE_API_KEY/STORYVERSE_HF_TOKEN).</p>")
204
+ if STABILITY_API_CONFIGURED or OPENAI_DALLE_CONFIGURED: status_text.append("<p style='color:green;'>✅ Image Generation Service(s) Ready.</p>")
205
+ else: status_text.append("<p style='color:orange;'>⚠️ No Image Generation Services Ready (Check API Keys).</p>")
206
+ gr.HTML("".join(status_text))
207
+
208
+
209
+ with gr.Row():
210
+ # --- CONTROL PANEL (Inputs) ---
211
+ with gr.Column(scale=1):
212
+ gr.Markdown("### 🎬 Scene Input")
213
+ scene_prompt_input = gr.Textbox(lines=5, label="Describe your scene or story beat:", placeholder="e.g., A lone astronaut discovers a glowing alien artifact on a desolate moon.")
 
 
 
 
 
214
 
215
+ with gr.Accordion("🎨 Visual Style (Optional)", open=True):
216
+ image_style_input = gr.Dropdown(choices=["Default"] + list(STYLE_PRESETS.keys()), value="Default", label="Image Style Preset")
217
+ artist_style_input = gr.Textbox(label="Inspired by Artist (Optional):", placeholder="e.g., Van Gogh, Hayao Miyazaki, HR Giger")
218
+ negative_prompt_input = gr.Textbox(lines=2, label="Negative Prompt (Optional):", placeholder="e.g., blurry, text, watermark, poorly drawn", value=COMMON_NEGATIVE_PROMPTS)
219
+
220
+ with gr.Accordion("⚙️ AI Configuration (Advanced)", open=False):
221
+ text_model_dropdown = gr.Dropdown(choices=list(TEXT_MODELS.keys()), value=DEFAULT_TEXT_MODEL_KEY, label="Text Generation Model")
222
+ image_provider_dropdown = gr.Dropdown(choices=list(IMAGE_PROVIDERS.keys()), value=DEFAULT_IMAGE_PROVIDER_KEY, label="Image Generation Provider")
223
+ # Could add sliders for temperature, tokens etc. here later
224
+
225
+ with gr.Row(elem_classes=["compact-row"]):
226
+ add_scene_button = gr.Button(" Weave Next Scene", variant="primary")
227
+ clear_story_button = gr.Button("🗑️ Clear Story")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
 
229
+ status_bar_output = gr.HTML(value="<p class='status_text'>Ready to weave...</p>")
230
 
231
+ # --- STORY DISPLAY (Outputs) ---
232
+ with gr.Column(scale=2):
233
+ gr.Markdown("### 📖 Your StoryVerse So Far")
 
234
 
235
+ with gr.Tabs():
236
+ with gr.TabItem("🖼️ Latest Scene View"):
237
+ latest_scene_image_output = gr.Image(label="Latest Scene Image", type="pil", interactive=False, show_download_button=True, elem_classes=["panel_image"])
238
+ latest_scene_narrative_output = gr.Markdown(label="Latest Scene Narrative")
 
239
 
240
+ with gr.TabItem(" галерея | Story Scroll"): # Gallery in Russian, for fun :)
241
+ story_gallery_output = gr.Gallery(label="Story Scroll", show_label=False, columns=[3], object_fit="contain", height="auto", elem_classes=["gallery_output"])
 
 
242
 
243
+ with gr.TabItem("📜 Interaction Log"):
244
+ log_output_markdown = gr.Markdown("Log will appear here...")
245
+
246
+ # --- Event Handlers ---
247
+ add_scene_button.click(
248
+ fn=add_scene_to_story,
249
+ inputs=[
250
+ story_state, scene_prompt_input,
251
+ image_style_input, artist_style_input, negative_prompt_input,
252
+ text_model_dropdown, image_provider_dropdown
253
+ ],
254
+ outputs=[ # Order must match the return order of add_scene_to_story
255
+ story_state, story_gallery_output,
256
+ latest_scene_image_output, latest_scene_narrative_output,
257
+ status_bar_output, log_output_markdown
258
+ ]
259
+ )
260
+ clear_story_button.click(
261
+ fn=clear_story_state,
262
+ inputs=[],
263
+ outputs=[
264
+ story_state, story_gallery_output,
265
+ latest_scene_image_output, latest_scene_narrative_output,
266
+ status_bar_output, log_output_markdown
267
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
268
  )
 
269
 
270
+ # Example Prompts for User
271
+ gr.Examples(
272
+ examples=[
273
+ ["A knight faces a dragon in a fiery volcano.", "Fantasy Art", "Frank Frazetta", "blurry, low quality"],
274
+ ["A futuristic detective investigates a crime in a neon-lit alley.", "Cyberpunk", "Syd Mead", "cartoon, painting"],
275
+ ["Two children discover a hidden portal in an old oak tree.", "Studio Ghibli Inspired", "", "dark, scary"],
276
+ ["A single red rose blooming in a post-apocalyptic wasteland.", "Photorealistic", "Ansel Adams", "oversaturated, vibrant"],
277
+ ],
278
+ inputs=[scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input],
279
+ label="✨ Example Scene Ideas & Styles ✨"
280
+ )
281
 
282
+ # --- Entry Point ---
283
  if __name__ == "__main__":
284
  print("="*80)
285
+ print(" StoryVerse Weaver- Multimodal Story Creator - Launching...")
286
+ print(f" Text LLM Ready (Gemini): {is_gemini_text_ready()}")
287
+ print(f" Text LLM Ready (HF): {is_hf_text_ready()}")
288
+ print(f" Image Provider Ready (Stability AI): {STABILITY_API_CONFIGURED}")
289
+ print(f" Image Provider Ready (DALL-E): {OPENAI_DALLE_CONFIGURED}")
290
+ if not (is_gemini_text_ready() or is_hf_text_ready()) or not (STABILITY_API_CONFIGURED or OPENAI_DALLE_CONFIGURED):
291
+ print(" 🔴 WARNING: Not all required API services are configured. Functionality will be limited or fail.")
292
+ print(" Please set: STORYVERSE_GOOGLE_API_KEY (for Gemini text), and/or STORYVERSE_HF_TOKEN (for HF text),")
293
+ print(" AND STORYVERSE_STABILITY_API_KEY (for Stability AI images) or STORYVERSE_OPENAI_API_KEY (for DALL-E images) in your environment/secrets.")
294
+ print(f" Default Text Model: {DEFAULT_TEXT_MODEL_KEY}")
295
+ print(f" Default Image Provider: {DEFAULT_IMAGE_PROVIDER_KEY}")
296
+ print(f" Available Text Models: {list(TEXT_MODELS.keys())}")
297
+ print(f" Available Image Providers: {list(IMAGE_PROVIDERS.keys())}")
298
  print("="*80)
299
+ story_weaver_demo.launch(debug=True, server_name="0.0.0.0")