mgbam commited on
Commit
9b1a7e0
·
verified ·
1 Parent(s): b09495b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +140 -169
app.py CHANGED
@@ -3,69 +3,105 @@ import gradio as gr
3
  import os
4
  import time # For progress updates
5
 
 
 
6
  from core.llm_clients import initialize_all_clients, GEMINI_API_CONFIGURED, HF_API_CONFIGURED
 
 
7
  from core.generation_engine import generate_initial_solutions
8
- from core.evaluation_engine import evaluate_solution_candidate, EvaluationResult
9
  from core.evolution_engine import evolve_solution
10
  from prompts.system_prompts import get_system_prompt
11
  from prompts.prompt_templates import format_code_test_analysis_user_prompt
12
 
13
  # --- Application Configuration (Models, Defaults) ---
14
  AVAILABLE_MODELS_CONFIG = {}
15
- UI_DEFAULT_MODEL_KEY = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
- # Populate with Gemini models if API is configured
18
  if GEMINI_API_CONFIGURED:
19
  AVAILABLE_MODELS_CONFIG.update({
20
- "Google Gemini 1.5 Flash (API - Fast, Recommended)": {"id": "gemini-1.5-flash-latest", "type": "google_gemini"},
21
- "Google Gemini 1.0 Pro (API)": {"id": "gemini-1.0-pro-latest", "type": "google_gemini"},
 
 
22
  })
23
- UI_DEFAULT_MODEL_KEY = "Google Gemini 1.5 Flash (API - Fast, Recommended)"
 
 
 
 
 
 
 
24
 
25
- # Populate with Hugging Face models if API is configured
26
  if HF_API_CONFIGURED:
27
  AVAILABLE_MODELS_CONFIG.update({
28
  "Google Gemma 2B (HF - Quick Test)": {"id": "google/gemma-2b-it", "type": "hf"},
29
  "Mistral 7B Instruct (HF)": {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf"},
30
- "CodeLlama 7B Instruct (HF)": {"id": "codellama/CodeLlama-7b-Instruct-hf", "type": "hf"}, # Smaller CodeLlama
31
  })
32
- if not UI_DEFAULT_MODEL_KEY: # If Gemini isn't configured, default to an HF model
33
  UI_DEFAULT_MODEL_KEY = "Google Gemma 2B (HF - Quick Test)"
 
 
 
 
 
34
 
35
- # Absolute fallback if no models could be configured
36
  if not AVAILABLE_MODELS_CONFIG:
37
  print("CRITICAL APP ERROR: No models could be configured. Check API keys in Space Secrets.")
38
- AVAILABLE_MODELS_CONFIG["No Models Available (Check API Keys)"] = {"id": "dummy", "type": "none"}
39
  UI_DEFAULT_MODEL_KEY = "No Models Available (Check API Keys)"
40
  elif not UI_DEFAULT_MODEL_KEY and AVAILABLE_MODELS_CONFIG:
41
- UI_DEFAULT_MODEL_KEY = list(AVAILABLE_MODELS_CONFIG.keys())[0] # Pick first available if default somehow not set
 
 
42
 
43
  # --- Main Orchestration Logic for Gradio ---
 
44
  def run_algoforge_simulation_orchestrator(
45
- problem_type_selected: str,
46
- problem_description_text: str,
47
- initial_hints_text: str,
48
  user_provided_tests_code: str,
49
- num_initial_solutions_to_gen: int,
50
  selected_model_ui_key: str,
51
  genesis_temp: float, genesis_max_tokens: int,
52
  critique_temp: float, critique_max_tokens: int,
53
  evolution_temp: float, evolution_max_tokens: int,
54
- progress=gr.Progress(track_tqdm=True) # Gradio progress tracker
55
  ):
56
  progress(0, desc="Initializing AlgoForge Prime™...")
57
  log_entries = [f"**AlgoForge Prime™ Cycle Starting at {time.strftime('%Y-%m-%d %H:%M:%S')}**"]
58
  start_time = time.time()
59
 
60
- # Basic input validation
61
  if not problem_description_text.strip():
62
- error_msg = "CRITICAL INPUT ERROR: Problem Description is mandatory. Please describe the problem."
63
  log_entries.append(error_msg)
64
- return error_msg, "", "", "\n".join(log_entries), "" # Return 5 values for outputs
65
 
66
  current_model_config = AVAILABLE_MODELS_CONFIG.get(selected_model_ui_key)
67
  if not current_model_config or current_model_config["type"] == "none":
68
- error_msg = f"CRITICAL CONFIG ERROR: No valid LLM selected ('{selected_model_ui_key}'). This usually means API keys are missing or failed to initialize. Check Space Secrets and restart."
69
  log_entries.append(error_msg)
70
  return error_msg, "", "", "\n".join(log_entries), ""
71
 
@@ -73,73 +109,58 @@ def run_algoforge_simulation_orchestrator(
73
  log_entries.append(f"Problem Type: {problem_type_selected}")
74
  log_entries.append(f"User Unit Tests Provided: {'Yes' if user_provided_tests_code.strip() else 'No'}")
75
 
76
- # Prepare LLM configurations for each stage
77
  llm_config_genesis = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": genesis_temp, "max_tokens": genesis_max_tokens}
78
  llm_config_critique = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": critique_temp, "max_tokens": critique_max_tokens}
79
  llm_config_evolution = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": evolution_temp, "max_tokens": evolution_max_tokens}
80
 
81
- # --- STAGE 1: GENESIS ---
82
- progress(0.1, desc="Stage 1: Genesis Engine - Generating Initial Solutions...")
83
  log_entries.append("\n**------ STAGE 1: GENESIS ENGINE ------**")
84
-
85
  initial_raw_solutions = generate_initial_solutions(
86
  problem_description_text, initial_hints_text, problem_type_selected,
87
  num_initial_solutions_to_gen, llm_config_genesis
88
  )
89
- log_entries.append(f"Genesis Engine produced {len(initial_raw_solutions)} raw solution candidate(s).")
90
- for i, sol_text in enumerate(initial_raw_solutions):
91
  log_entries.append(f" Candidate {i+1} (Raw Snippet): {str(sol_text)[:120]}...")
92
 
93
- # --- STAGE 2: CRITIQUE & AUTOMATED EVALUATION ---
94
- progress(0.3, desc="Stage 2: Critique Crucible - Evaluating Candidates...")
95
- log_entries.append("\n**------ STAGE 2: CRITIQUE CRUCIBLE & AUTOMATED EVALUATION ------**")
96
-
97
- evaluated_candidates_list = [] # Stores dicts: {"id": ..., "solution_text": ..., "evaluation_result": EvaluationResult}
98
 
 
 
 
 
99
  for i, candidate_solution_text in enumerate(initial_raw_solutions):
100
- current_progress = 0.3 + ( (i + 1) / num_initial_solutions_to_gen ) * 0.35 # Progress for evaluation stage
101
- progress(current_progress, desc=f"Evaluating Candidate {i+1} of {num_initial_solutions_to_gen}...")
102
  log_entries.append(f"\n--- Evaluating Candidate {i+1} ---")
103
-
104
- # The evaluation_engine handles if candidate_solution_text itself is an error string
105
- evaluation_obj = evaluate_solution_candidate( # type: EvaluationResult
106
- candidate_solution_text, problem_description_text, problem_type_selected,
107
  user_provided_tests_code, llm_config_critique
108
  )
109
-
 
 
110
  log_entries.append(f" Final Combined Score: {evaluation_obj.score}/10")
111
  log_entries.append(f" Automated Tests: {evaluation_obj.passed_tests}/{evaluation_obj.total_tests} passed.")
112
  if evaluation_obj.execution_summary: log_entries.append(f" Execution Summary: {evaluation_obj.execution_summary}")
113
  log_entries.append(f" LLM Critique (Snippet): {str(evaluation_obj.critique_text)[:150]}...")
114
-
115
- evaluated_candidates_list.append({
116
- "id": i + 1,
117
- "solution_text": candidate_solution_text, # Store original text, even if it was an error from genesis
118
- "evaluation_result": evaluation_obj
119
- })
120
 
121
- # Format display for initial solutions & evaluations
122
  initial_solutions_display_markdown = []
123
- for data in evaluated_candidates_list:
124
  initial_solutions_display_markdown.append(
125
- f"**Candidate {data['id']}:**\n"
126
- f"```python\n{data['solution_text']}\n```\n\n" # Assuming python for display, adjust if problem_type varies widely
127
- f"**Evaluation Verdict (Combined Score: {data['evaluation_result'].score}/10):**\n"
128
- f"{data['evaluation_result'].critique_text}\n---"
129
  )
130
-
131
- # --- STAGE 3: SELECTION OF CHAMPION ---
132
- progress(0.7, desc="Stage 3: Selecting Champion Candidate...")
133
  log_entries.append("\n**------ STAGE 3: CHAMPION SELECTION ------**")
134
-
135
- # Filter out candidates that were errors from genesis OR had very low evaluation scores (e.g., score of 0 from evaluation)
136
- # We want to select a champion that is actually a piece of code/algorithm, not an error message.
137
  potentially_viable_candidates = [
138
- cand for cand in evaluated_candidates_list
139
  if cand["evaluation_result"] and cand["evaluation_result"].score > 0 and \
140
- cand["solution_text"] and not cand["solution_text"].startswith("ERROR")
141
  ]
142
-
143
  if not potentially_viable_candidates:
144
  final_error_msg = "No viable candidate solutions found after generation and evaluation. All attempts may have failed or scored too low."
145
  log_entries.append(f" CRITICAL: {final_error_msg}")
@@ -147,55 +168,41 @@ def run_algoforge_simulation_orchestrator(
147
 
148
  potentially_viable_candidates.sort(key=lambda x: x["evaluation_result"].score, reverse=True)
149
  champion_candidate_data = potentially_viable_candidates[0]
150
-
151
  log_entries.append(f"Champion Selected: Candidate {champion_candidate_data['id']} "
152
- f"(Solution Snippet: {champion_candidate_data['solution_text'][:60]}...) "
153
  f"with evaluation score {champion_candidate_data['evaluation_result'].score}/10.")
154
-
155
  champion_display_markdown = (
156
  f"**Champion Candidate ID: {champion_candidate_data['id']} "
157
  f"(Original Combined Score: {champion_candidate_data['evaluation_result'].score}/10):**\n"
158
  f"```python\n{champion_candidate_data['solution_text']}\n```\n\n"
159
- f"**Original Comprehensive Evaluation for this Champion:**\n"
160
- f"{champion_candidate_data['evaluation_result'].critique_text}"
161
  )
162
 
163
- # --- STAGE 4: EVOLUTIONARY FORGE ---
164
- progress(0.75, desc="Stage 4: Evolutionary Forge - Refining Champion...")
165
  log_entries.append("\n**------ STAGE 4: EVOLUTIONARY FORGE ------**")
166
-
167
  evolved_solution_code = evolve_solution(
168
- champion_candidate_data["solution_text"],
169
- champion_candidate_data["evaluation_result"].critique_text, # Pass the full critique
170
  champion_candidate_data["evaluation_result"].score,
171
- problem_description_text,
172
- problem_type_selected,
173
- llm_config_evolution
174
  )
175
  log_entries.append(f"Raw Evolved Solution Text (Snippet): {str(evolved_solution_code)[:150]}...")
176
-
177
  evolved_solution_display_markdown = ""
178
- ai_test_analysis_markdown = "" # For LLM explanation of unit test results of evolved code
179
 
180
- if evolved_solution_code.startswith("ERROR"):
181
  evolved_solution_display_markdown = f"**Evolution Stage Failed:**\n{evolved_solution_code}"
182
  else:
183
  evolved_solution_display_markdown = f"**✨ AlgoForge Prime™ Evolved Artifact ✨:**\n```python\n{evolved_solution_code}\n```"
184
-
185
- # Optionally, re-evaluate the evolved solution with unit tests if provided and applicable
186
  if "python" in problem_type_selected.lower() and user_provided_tests_code.strip():
187
- progress(0.9, desc="Post-Evolution: Re-running Automated Tests on Evolved Code...")
188
  log_entries.append("\n--- Post-Evolution Sanity Check (Automated Tests on Evolved Code) ---")
189
-
190
- # Use a low temperature for this critique to focus on test results rather than creative critique
191
- # The critique part here is mostly for consistency, primary goal is test execution.
192
  evolved_critique_config = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": 0.2, "max_tokens": critique_max_tokens}
193
-
194
- evolved_code_eval_result = evaluate_solution_candidate( # type: EvaluationResult
195
- evolved_solution_code, problem_description_text, problem_type_selected,
196
  user_provided_tests_code, evolved_critique_config
197
  )
198
-
199
  evolved_solution_display_markdown += (
200
  f"\n\n**Post-Evolution Automated Test Results (Simulated):**\n"
201
  f"{evolved_code_eval_result.execution_summary}\n"
@@ -204,23 +211,18 @@ def run_algoforge_simulation_orchestrator(
204
  log_entries.append(f" Evolved Code Test Results: {evolved_code_eval_result.passed_tests}/{evolved_code_eval_result.total_tests} passed. "
205
  f"Summary: {evolved_code_eval_result.execution_summary}")
206
 
207
- # Get LLM to explain the test results of the evolved code
208
- if evolved_code_eval_result.total_tests > 0 : # Only if tests were run
209
  progress(0.95, desc="Post-Evolution: AI Analyzing Test Results...")
210
  log_entries.append("\n--- AI Analysis of Evolved Code's Test Results ---")
211
  analysis_user_prompt = format_code_test_analysis_user_prompt(
212
- evolved_solution_code,
213
- user_provided_tests_code,
214
- evolved_code_eval_result.execution_summary # Pass the summary string
215
  )
216
  analysis_system_prompt = get_system_prompt("code_execution_explainer")
 
 
217
 
218
- # Use a config for analysis - can be same as critique or specialized
219
- llm_analysis_config = {"type": current_model_config["type"], "model_id": current_model_config["id"],
220
- "temp": 0.3, "max_tokens": critique_max_tokens + 100} # A bit more tokens for explanation
221
-
222
- from core.llm_clients import call_huggingface_api, call_gemini_api # Re-import for clarity or use a dispatcher
223
-
224
  explanation_response_obj = None
225
  if llm_analysis_config["type"] == "hf":
226
  explanation_response_obj = call_huggingface_api(analysis_user_prompt, llm_analysis_config["model_id"], llm_analysis_config["temp"], llm_analysis_config["max_tokens"], analysis_system_prompt)
@@ -229,12 +231,11 @@ def run_algoforge_simulation_orchestrator(
229
 
230
  if explanation_response_obj and explanation_response_obj.success:
231
  ai_test_analysis_markdown = f"**AI Analysis of Evolved Code's Test Performance:**\n{explanation_response_obj.text}"
232
- log_entries.append(f" AI Test Analysis (Snippet): {explanation_response_obj.text[:100]}...")
233
  elif explanation_response_obj:
234
  ai_test_analysis_markdown = f"**AI Analysis of Test Performance Failed:**\n{explanation_response_obj.error}"
235
  log_entries.append(f" AI Test Analysis Error: {explanation_response_obj.error}")
236
 
237
-
238
  total_time = time.time() - start_time
239
  log_entries.append(f"\n**AlgoForge Prime™ Cycle Complete. Total time: {total_time:.2f} seconds.**")
240
  progress(1.0, desc="Cycle Complete!")
@@ -243,102 +244,81 @@ def run_algoforge_simulation_orchestrator(
243
 
244
 
245
  # --- Gradio UI Definition ---
246
- # (This section is largely similar to the previous app.py, with updated input/output connections)
247
-
248
  intro_markdown = """
249
- # ✨ AlgoForge Prime™ ✨: Modular Algorithmic Evolution (v2)
250
- This enhanced version uses a modular codebase and demonstrates a conceptual workflow for AI-assisted algorithm discovery,
251
  featuring (simulated) unit testing for Python code if provided.
252
 
253
- **API Keys Required in Space Secrets:**
254
  - `GOOGLE_API_KEY` (Primary): For Google Gemini API models. Ensure the "Generative Language API" (or similar) is enabled for your project.
255
  - `HF_TOKEN` (Secondary): For Hugging Face hosted models.
256
- If keys are missing or invalid, corresponding models will be unavailable.
257
  """
258
 
259
- # Determine API status for UI message
260
- ui_token_status_md = ""
261
  if not GEMINI_API_CONFIGURED and not HF_API_CONFIGURED:
262
  ui_token_status_md = "<p style='color:red;'>⚠️ **CRITICAL: NEITHER GOOGLE_API_KEY NOR HF_TOKEN are configured or working correctly.** The application will not be able to call any LLMs.</p>"
263
  else:
264
- if GEMINI_API_CONFIGURED:
265
- ui_token_status_md += "<p style='color:green;'>✅ Google Gemini API Key detected and configured.</p>"
266
- else:
267
- ui_token_status_md += "<p style='color:orange;'>⚠️ **GOOGLE_API_KEY missing or failed to configure.** Gemini API models will be disabled.</p>"
268
-
269
- if HF_API_CONFIGURED:
270
- ui_token_status_md += "<p style='color:green;'>✅ Hugging Face API Token detected and client initialized.</p>"
271
- else:
272
- ui_token_status_md += "<p style='color:orange;'>⚠️ **HF_TOKEN missing or client failed to initialize.** Hugging Face models will be disabled.</p>"
273
 
274
 
275
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="indigo", secondary_hue="purple"), title="AlgoForge Prime™ Modular v2") as app_demo:
276
  gr.Markdown(intro_markdown)
277
  gr.HTML(ui_token_status_md)
278
 
279
- if not AVAILABLE_MODELS_CONFIG or UI_DEFAULT_MODEL_KEY == "No Models Available (Check API Keys)":
280
- gr.Markdown("<h2 style='color:red;'>No LLM models are available. Please check your API key configurations in this Space's Secrets and restart the Space. The application cannot function without at least one working API configuration.</h2>")
281
  else:
282
  with gr.Row():
283
- # Input Column
284
- with gr.Column(scale=2): # Input column slightly wider
285
  gr.Markdown("## 💡 1. Define the Challenge")
286
  problem_type_dropdown = gr.Dropdown(
287
  choices=["Python Algorithm with Tests", "Python Algorithm (Critique Only)", "General Algorithm Idea", "Conceptual System Design", "Pseudocode Refinement"],
288
  label="Type of Problem / Algorithm", value="Python Algorithm with Tests",
289
- info="Select '...with Tests' to enable (simulated) unit testing if you provide tests below."
290
- )
291
- problem_description_textbox = gr.Textbox(
292
- lines=5, label="Problem Description / Desired Outcome",
293
- placeholder="Example for 'Python Algorithm with Tests':\n`def calculate_factorial(n: int) -> int:`\nCalculates factorial of n. Should handle n=0 (returns 1) and raise ValueError for n<0."
294
- )
295
- initial_hints_textbox = gr.Textbox(
296
- lines=3, label="Initial Thoughts / Constraints / Seed Ideas (Optional)",
297
- placeholder="E.g., 'Prefer an iterative solution over recursive for factorial.' or 'Consider time complexity.'"
298
- )
299
- user_tests_textbox = gr.Textbox(
300
- lines=6, label="Python Unit Tests (Optional, one `assert` per line)",
301
- placeholder="assert calculate_factorial(0) == 1\nassert calculate_factorial(5) == 120\n# try: calculate_factorial(-1); assert False # Expected ValueError\n# except ValueError: assert True",
302
- info="For 'Python Algorithm with Tests'. Ensure function names match your problem description."
303
  )
 
 
 
304
 
305
  gr.Markdown("## ⚙️ 2. Configure The Forge")
306
  model_selection_dropdown = gr.Dropdown(
307
  choices=list(AVAILABLE_MODELS_CONFIG.keys()),
308
- value=UI_DEFAULT_MODEL_KEY if UI_DEFAULT_MODEL_KEY in AVAILABLE_MODELS_CONFIG else (list(AVAILABLE_MODELS_CONFIG.keys())[0] if AVAILABLE_MODELS_CONFIG else None),
309
  label="Select LLM Core Model",
310
- info="Ensure the corresponding API key (Google or HF) is configured in secrets."
311
  )
312
- num_initial_solutions_slider = gr.Slider(minimum=1, maximum=4, value=2, step=1, label="Number of Initial Solutions (Genesis Engine)")
313
 
314
  with gr.Accordion("Advanced LLM Parameters (Expert Users)", open=False):
315
  with gr.Row():
316
- genesis_temp_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.05, label="Genesis Temp", info="Higher = more creative, Lower = more deterministic.") # Gemini range often 0-1
317
- genesis_max_tokens_slider = gr.Slider(minimum=200, maximum=2048, value=768, step=64, label="Genesis Max Output Tokens")
318
  with gr.Row():
319
  critique_temp_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, step=0.05, label="Critique Temp")
320
- critique_max_tokens_slider = gr.Slider(minimum=150, maximum=1024, value=512, step=64, label="Critique Max Output Tokens")
321
  with gr.Row():
322
  evolution_temp_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.75, step=0.05, label="Evolution Temp")
323
- evolution_max_tokens_slider = gr.Slider(minimum=200, maximum=2048, value=1024, step=64, label="Evolution Max Output Tokens")
 
324
 
325
- engage_button = gr.Button("🚀 ENGAGE ALGOFORGE PRIME™ 🚀", variant="primary", size="lg", elem_id="engage_button_elem")
326
 
327
- # Output Column
328
- with gr.Column(scale=3): # Output column wider
329
  gr.Markdown("## 🔥 3. The Forge's Output")
330
- with gr.Tabs(elem_id="output_tabs_elem"):
331
- with gr.TabItem("📜 Initial Candidates & Evaluations", id="tab_initial_evals"):
332
- output_initial_solutions_markdown = gr.Markdown(label="Generated Solutions & Combined Evaluations")
333
- with gr.TabItem("🏆 Champion Candidate (Pre-Evolution)", id="tab_champion"):
334
- output_champion_markdown = gr.Markdown(label="Top Pick for Refinement")
335
- with gr.TabItem("🌟 Evolved Artifact & Test Analysis", id="tab_evolved"):
336
- output_evolved_markdown = gr.Markdown(label="Refined Solution from Evolutionary Forge")
337
- output_ai_test_analysis_markdown = gr.Markdown(label="AI Analysis of Evolved Code's Test Performance")
338
- with gr.TabItem("🛠️ Interaction Log (Developer View)", id="tab_log"):
339
- output_interaction_log_markdown = gr.Markdown(label="Detailed Log of LLM Prompts & Responses")
340
 
341
- # Connect button to the orchestration function
342
  engage_button.click(
343
  fn=run_algoforge_simulation_orchestrator,
344
  inputs=[
@@ -354,29 +334,20 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="indigo", secondary_hue="purple"
354
  output_ai_test_analysis_markdown
355
  ]
356
  )
357
-
358
  gr.Markdown("---")
359
  gr.Markdown(
360
- "**Disclaimer:** This is a conceptual, educational demonstration. "
361
- "The (simulated) unit testing feature is for illustrative purposes. "
362
- "**NEVER run LLM-generated code from an untrusted source in an unrestricted environment.** "
363
- "Implementing robust and secure code sandboxing is complex and absolutely critical for safety in real-world applications. "
364
- "LLM outputs always require careful human review and verification."
365
  )
366
 
367
  # --- Entry Point for Running the Gradio App ---
368
  if __name__ == "__main__":
369
  print("="*80)
370
- print("AlgoForge Prime™ (Modular Version v2 with Simulated Testing) - Launching...")
371
-
372
  print(f" Google Gemini API Configured: {GEMINI_API_CONFIGURED}")
373
  print(f" Hugging Face API Configured: {HF_API_CONFIGURED}")
374
-
375
  if not GEMINI_API_CONFIGURED and not HF_API_CONFIGURED:
376
- print(" CRITICAL WARNING: No API keys seem to be configured. The application will likely be non-functional.")
377
-
378
  print(f" UI Default Model Key: {UI_DEFAULT_MODEL_KEY}")
379
  print(f" Available models for UI: {list(AVAILABLE_MODELS_CONFIG.keys())}")
380
  print("="*80)
381
-
382
- app_demo.launch(debug=True, server_name="0.0.0.0") # server_name="0.0.0.0" is often needed for Docker/Spaces
 
3
  import os
4
  import time # For progress updates
5
 
6
+ # --- Core Logic Imports ---
7
+ # Initialize clients first to ensure API keys are loaded before other modules use them.
8
  from core.llm_clients import initialize_all_clients, GEMINI_API_CONFIGURED, HF_API_CONFIGURED
9
+ initialize_all_clients() # Call initialization once when the app starts
10
+
11
  from core.generation_engine import generate_initial_solutions
12
+ from core.evaluation_engine import evaluate_solution_candidate, EvaluationResult # Class for typed results
13
  from core.evolution_engine import evolve_solution
14
  from prompts.system_prompts import get_system_prompt
15
  from prompts.prompt_templates import format_code_test_analysis_user_prompt
16
 
17
  # --- Application Configuration (Models, Defaults) ---
18
  AVAILABLE_MODELS_CONFIG = {}
19
+ UI_DEFAULT_MODEL_KEY = None # Will be set based on configured APIs
20
+
21
+ # --- Placeholder for the actual API model ID string ---
22
+ # You need to find the correct string from Google's documentation or AI Studio for this model.
23
+ # It might be something like "models/gemini-2.5-pro-preview-0506" or similar.
24
+ GEMINI_2_5_PRO_PREVIEW_MODEL_ID = "YOUR_GEMINI_2.5_PRO_PREVIEW_0506_MODEL_ID_STRING_HERE"
25
+ # Example: "gemini-experimental" or a more specific preview ID if available via API.
26
+ # For now, if you don't have the exact ID, you can use a known working one like "gemini-1.5-pro-latest"
27
+ # and then update this when you get the 2.5 Pro Preview ID.
28
+ # If GEMINI_2_5_PRO_PREVIEW_MODEL_ID remains the placeholder, that model won't work.
29
+ # Let's use a known one for now if the placeholder isn't replaced.
30
+ if GEMINI_2_5_PRO_PREVIEW_MODEL_ID == "YOUR_GEMINI_2.5_PRO_PREVIEW_0506_MODEL_ID_STRING_HERE":
31
+ print(f"WARNING: app.py - GEMINI_2_5_PRO_PREVIEW_MODEL_ID is a placeholder. Using 'gemini-1.5-pro-latest' as a stand-in for Gemini 2.5 Pro Preview.")
32
+ SAFE_GEMINI_PRO_ID = "gemini-1.5-pro-latest" # A known recent Pro model
33
+ else:
34
+ SAFE_GEMINI_PRO_ID = GEMINI_2_5_PRO_PREVIEW_MODEL_ID
35
+
36
 
37
+ # Populate with Gemini models first if API is configured
38
  if GEMINI_API_CONFIGURED:
39
  AVAILABLE_MODELS_CONFIG.update({
40
+ # Update this line with the correct display name and ID once known
41
+ "Google Gemini 2.5 Pro Preview (API)": {"id": SAFE_GEMINI_PRO_ID, "type": "google_gemini"},
42
+ "Google Gemini 1.5 Flash (API - Fast)": {"id": "gemini-1.5-flash-latest", "type": "google_gemini"},
43
+ "Google Gemini 1.0 Pro (API - Legacy)": {"id": "gemini-1.0-pro-latest", "type": "google_gemini"},
44
  })
45
+ # Prioritize the newest Pro model as default if its ID is not the placeholder
46
+ if SAFE_GEMINI_PRO_ID != "gemini-1.5-pro-latest" or GEMINI_2_5_PRO_PREVIEW_MODEL_ID != "YOUR_GEMINI_2.5_PRO_PREVIEW_0506_MODEL_ID_STRING_HERE":
47
+ UI_DEFAULT_MODEL_KEY = "Google Gemini 2.5 Pro Preview (API)"
48
+ else: # Fallback to Flash if 2.5 Pro ID is still placeholder
49
+ UI_DEFAULT_MODEL_KEY = "Google Gemini 1.5 Flash (API - Fast)"
50
+ print(f"INFO: app.py - Gemini models populated. Default set to: {UI_DEFAULT_MODEL_KEY}")
51
+ else:
52
+ print("WARNING: app.py - Gemini API not configured; Gemini models will be unavailable.")
53
 
54
+ # Populate with Hugging Face models if API is configured (as alternatives/fallbacks)
55
  if HF_API_CONFIGURED:
56
  AVAILABLE_MODELS_CONFIG.update({
57
  "Google Gemma 2B (HF - Quick Test)": {"id": "google/gemma-2b-it", "type": "hf"},
58
  "Mistral 7B Instruct (HF)": {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf"},
59
+ "CodeLlama 7B Instruct (HF)": {"id": "codellama/CodeLlama-7b-Instruct-hf", "type": "hf"},
60
  })
61
+ if not UI_DEFAULT_MODEL_KEY: # If Gemini wasn't configured, default to an HF model
62
  UI_DEFAULT_MODEL_KEY = "Google Gemma 2B (HF - Quick Test)"
63
+ print("INFO: app.py - HF models populated; default set to an HF model as Gemini was not available.")
64
+ else:
65
+ print("INFO: app.py - HF models also populated as alternatives.")
66
+ else:
67
+ print("WARNING: app.py - Hugging Face API not configured; HF models will be unavailable.")
68
 
69
+ # Absolute fallback if no models could be configured at all
70
  if not AVAILABLE_MODELS_CONFIG:
71
  print("CRITICAL APP ERROR: No models could be configured. Check API keys in Space Secrets.")
72
+ AVAILABLE_MODELS_CONFIG["No Models Available (Check API Keys)"] = {"id": "dummy_error", "type": "none"}
73
  UI_DEFAULT_MODEL_KEY = "No Models Available (Check API Keys)"
74
  elif not UI_DEFAULT_MODEL_KEY and AVAILABLE_MODELS_CONFIG:
75
+ UI_DEFAULT_MODEL_KEY = list(AVAILABLE_MODELS_CONFIG.keys())[0]
76
+ print(f"WARNING: app.py - UI_DEFAULT_MODEL_KEY was not set by primary logic, falling back to first available: {UI_DEFAULT_MODEL_KEY}")
77
+
78
 
79
  # --- Main Orchestration Logic for Gradio ---
80
+ # This function remains the same as in the previous "full rewrite" that included all files.
81
  def run_algoforge_simulation_orchestrator(
82
+ problem_type_selected: str,
83
+ problem_description_text: str,
84
+ initial_hints_text: str,
85
  user_provided_tests_code: str,
86
+ num_initial_solutions_to_gen: int,
87
  selected_model_ui_key: str,
88
  genesis_temp: float, genesis_max_tokens: int,
89
  critique_temp: float, critique_max_tokens: int,
90
  evolution_temp: float, evolution_max_tokens: int,
91
+ progress=gr.Progress(track_tqdm=True)
92
  ):
93
  progress(0, desc="Initializing AlgoForge Prime™...")
94
  log_entries = [f"**AlgoForge Prime™ Cycle Starting at {time.strftime('%Y-%m-%d %H:%M:%S')}**"]
95
  start_time = time.time()
96
 
 
97
  if not problem_description_text.strip():
98
+ error_msg = "CRITICAL INPUT ERROR: Problem Description is mandatory."
99
  log_entries.append(error_msg)
100
+ return error_msg, "", "", "\n".join(log_entries), ""
101
 
102
  current_model_config = AVAILABLE_MODELS_CONFIG.get(selected_model_ui_key)
103
  if not current_model_config or current_model_config["type"] == "none":
104
+ error_msg = f"CRITICAL CONFIG ERROR: No valid LLM selected ('{selected_model_ui_key}'). API keys might be missing or failed initialization. Please check Space Secrets & restart."
105
  log_entries.append(error_msg)
106
  return error_msg, "", "", "\n".join(log_entries), ""
107
 
 
109
  log_entries.append(f"Problem Type: {problem_type_selected}")
110
  log_entries.append(f"User Unit Tests Provided: {'Yes' if user_provided_tests_code.strip() else 'No'}")
111
 
 
112
  llm_config_genesis = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": genesis_temp, "max_tokens": genesis_max_tokens}
113
  llm_config_critique = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": critique_temp, "max_tokens": critique_max_tokens}
114
  llm_config_evolution = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": evolution_temp, "max_tokens": evolution_max_tokens}
115
 
116
+ # STAGE 1: GENESIS
117
+ progress(0.05, desc="Stage 1: Genesis Engine - Generating...") # Adjusted progress start
118
  log_entries.append("\n**------ STAGE 1: GENESIS ENGINE ------**")
 
119
  initial_raw_solutions = generate_initial_solutions(
120
  problem_description_text, initial_hints_text, problem_type_selected,
121
  num_initial_solutions_to_gen, llm_config_genesis
122
  )
123
+ log_entries.append(f"Genesis Engine produced {len(initial_raw_solutions)} raw candidate(s).")
124
+ for i, sol_text in enumerate(initial_raw_solutions): # Log snippets
125
  log_entries.append(f" Candidate {i+1} (Raw Snippet): {str(sol_text)[:120]}...")
126
 
 
 
 
 
 
127
 
128
+ # STAGE 2: CRITIQUE & AUTOMATED EVALUATION
129
+ progress(0.25, desc="Stage 2: Critique Crucible - Evaluating...") # Adjusted progress
130
+ log_entries.append("\n**------ STAGE 2: CRITIQUE CRUCIBLE & AUTOMATED EVALUATION ------**")
131
+ evaluated_candidates_list = []
132
  for i, candidate_solution_text in enumerate(initial_raw_solutions):
133
+ current_progress = 0.25 + ((i + 1) / num_initial_solutions_to_gen) * 0.4 # Progress for evaluation
134
+ progress(current_progress, desc=f"Evaluating Candidate {i+1}...")
135
  log_entries.append(f"\n--- Evaluating Candidate {i+1} ---")
136
+ evaluation_obj = evaluate_solution_candidate(
137
+ candidate_solution_text, problem_description_text, problem_type_selected,
 
 
138
  user_provided_tests_code, llm_config_critique
139
  )
140
+ evaluated_candidates_list.append({
141
+ "id": i + 1, "solution_text": candidate_solution_text, "evaluation_result": evaluation_obj
142
+ })
143
  log_entries.append(f" Final Combined Score: {evaluation_obj.score}/10")
144
  log_entries.append(f" Automated Tests: {evaluation_obj.passed_tests}/{evaluation_obj.total_tests} passed.")
145
  if evaluation_obj.execution_summary: log_entries.append(f" Execution Summary: {evaluation_obj.execution_summary}")
146
  log_entries.append(f" LLM Critique (Snippet): {str(evaluation_obj.critique_text)[:150]}...")
 
 
 
 
 
 
147
 
148
+
149
  initial_solutions_display_markdown = []
150
+ for data in evaluated_candidates_list: # Format display for initial solutions
151
  initial_solutions_display_markdown.append(
152
+ f"**Candidate {data['id']}:**\n```python\n{data['solution_text']}\n```\n\n"
153
+ f"**Evaluation Verdict (Combined Score: {data['evaluation_result'].score}/10):**\n{data['evaluation_result'].critique_text}\n---"
 
 
154
  )
155
+
156
+ # STAGE 3: SELECTION OF CHAMPION
157
+ progress(0.7, desc="Stage 3: Selecting Champion...")
158
  log_entries.append("\n**------ STAGE 3: CHAMPION SELECTION ------**")
 
 
 
159
  potentially_viable_candidates = [
160
+ cand for cand in evaluated_candidates_list
161
  if cand["evaluation_result"] and cand["evaluation_result"].score > 0 and \
162
+ cand["solution_text"] and not str(cand["solution_text"]).startswith("ERROR") # Ensure solution_text is str
163
  ]
 
164
  if not potentially_viable_candidates:
165
  final_error_msg = "No viable candidate solutions found after generation and evaluation. All attempts may have failed or scored too low."
166
  log_entries.append(f" CRITICAL: {final_error_msg}")
 
168
 
169
  potentially_viable_candidates.sort(key=lambda x: x["evaluation_result"].score, reverse=True)
170
  champion_candidate_data = potentially_viable_candidates[0]
 
171
  log_entries.append(f"Champion Selected: Candidate {champion_candidate_data['id']} "
172
+ f"(Solution Snippet: {str(champion_candidate_data['solution_text'])[:60]}...) " # str() for safety
173
  f"with evaluation score {champion_candidate_data['evaluation_result'].score}/10.")
 
174
  champion_display_markdown = (
175
  f"**Champion Candidate ID: {champion_candidate_data['id']} "
176
  f"(Original Combined Score: {champion_candidate_data['evaluation_result'].score}/10):**\n"
177
  f"```python\n{champion_candidate_data['solution_text']}\n```\n\n"
178
+ f"**Original Comprehensive Evaluation for this Champion:**\n{champion_candidate_data['evaluation_result'].critique_text}"
 
179
  )
180
 
181
+ # STAGE 4: EVOLUTIONARY FORGE
182
+ progress(0.75, desc="Stage 4: Evolutionary Forge - Refining...")
183
  log_entries.append("\n**------ STAGE 4: EVOLUTIONARY FORGE ------**")
 
184
  evolved_solution_code = evolve_solution(
185
+ str(champion_candidate_data["solution_text"]), # str() for safety
186
+ str(champion_candidate_data["evaluation_result"].critique_text),
187
  champion_candidate_data["evaluation_result"].score,
188
+ problem_description_text, problem_type_selected, llm_config_evolution
 
 
189
  )
190
  log_entries.append(f"Raw Evolved Solution Text (Snippet): {str(evolved_solution_code)[:150]}...")
 
191
  evolved_solution_display_markdown = ""
192
+ ai_test_analysis_markdown = ""
193
 
194
+ if str(evolved_solution_code).startswith("ERROR"): # str() for safety
195
  evolved_solution_display_markdown = f"**Evolution Stage Failed:**\n{evolved_solution_code}"
196
  else:
197
  evolved_solution_display_markdown = f"**✨ AlgoForge Prime™ Evolved Artifact ✨:**\n```python\n{evolved_solution_code}\n```"
 
 
198
  if "python" in problem_type_selected.lower() and user_provided_tests_code.strip():
199
+ progress(0.9, desc="Post-Evolution: Testing Evolved Code...")
200
  log_entries.append("\n--- Post-Evolution Sanity Check (Automated Tests on Evolved Code) ---")
 
 
 
201
  evolved_critique_config = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": 0.2, "max_tokens": critique_max_tokens}
202
+ evolved_code_eval_result = evaluate_solution_candidate(
203
+ str(evolved_solution_code), problem_description_text, problem_type_selected,
 
204
  user_provided_tests_code, evolved_critique_config
205
  )
 
206
  evolved_solution_display_markdown += (
207
  f"\n\n**Post-Evolution Automated Test Results (Simulated):**\n"
208
  f"{evolved_code_eval_result.execution_summary}\n"
 
211
  log_entries.append(f" Evolved Code Test Results: {evolved_code_eval_result.passed_tests}/{evolved_code_eval_result.total_tests} passed. "
212
  f"Summary: {evolved_code_eval_result.execution_summary}")
213
 
214
+ if evolved_code_eval_result.total_tests > 0 :
 
215
  progress(0.95, desc="Post-Evolution: AI Analyzing Test Results...")
216
  log_entries.append("\n--- AI Analysis of Evolved Code's Test Results ---")
217
  analysis_user_prompt = format_code_test_analysis_user_prompt(
218
+ str(evolved_solution_code), user_provided_tests_code,
219
+ str(evolved_code_eval_result.execution_summary)
 
220
  )
221
  analysis_system_prompt = get_system_prompt("code_execution_explainer")
222
+ llm_analysis_config = {"type": current_model_config["type"], "model_id": current_model_config["id"],
223
+ "temp": 0.3, "max_tokens": critique_max_tokens + 150} # Ensure enough tokens for analysis
224
 
225
+ from core.llm_clients import call_huggingface_api, call_gemini_api # Direct import for clarity
 
 
 
 
 
226
  explanation_response_obj = None
227
  if llm_analysis_config["type"] == "hf":
228
  explanation_response_obj = call_huggingface_api(analysis_user_prompt, llm_analysis_config["model_id"], llm_analysis_config["temp"], llm_analysis_config["max_tokens"], analysis_system_prompt)
 
231
 
232
  if explanation_response_obj and explanation_response_obj.success:
233
  ai_test_analysis_markdown = f"**AI Analysis of Evolved Code's Test Performance:**\n{explanation_response_obj.text}"
234
+ log_entries.append(f" AI Test Analysis (Snippet): {str(explanation_response_obj.text)[:100]}...")
235
  elif explanation_response_obj:
236
  ai_test_analysis_markdown = f"**AI Analysis of Test Performance Failed:**\n{explanation_response_obj.error}"
237
  log_entries.append(f" AI Test Analysis Error: {explanation_response_obj.error}")
238
 
 
239
  total_time = time.time() - start_time
240
  log_entries.append(f"\n**AlgoForge Prime™ Cycle Complete. Total time: {total_time:.2f} seconds.**")
241
  progress(1.0, desc="Cycle Complete!")
 
244
 
245
 
246
  # --- Gradio UI Definition ---
 
 
247
  intro_markdown = """
248
+ # ✨ AlgoForge Prime™ ✨: Modular Algorithmic Evolution (v2.5 Gemini Focus)
249
+ This version prioritizes the latest Google Gemini models and demonstrates a conceptual workflow for AI-assisted algorithm discovery,
250
  featuring (simulated) unit testing for Python code if provided.
251
 
252
+ **API Keys Required in Space Secrets (should be working):**
253
  - `GOOGLE_API_KEY` (Primary): For Google Gemini API models. Ensure the "Generative Language API" (or similar) is enabled for your project.
254
  - `HF_TOKEN` (Secondary): For Hugging Face hosted models.
 
255
  """
256
 
257
+ ui_token_status_md = ""
 
258
  if not GEMINI_API_CONFIGURED and not HF_API_CONFIGURED:
259
  ui_token_status_md = "<p style='color:red;'>⚠️ **CRITICAL: NEITHER GOOGLE_API_KEY NOR HF_TOKEN are configured or working correctly.** The application will not be able to call any LLMs.</p>"
260
  else:
261
+ if GEMINI_API_CONFIGURED: ui_token_status_md += "<p style='color:green;'>✅ Google Gemini API Key detected and configured.</p>"
262
+ else: ui_token_status_md += "<p style='color:orange;'>⚠️ **GOOGLE_API_KEY missing or failed to configure.** Gemini API models will be disabled.</p>"
263
+ if HF_API_CONFIGURED: ui_token_status_md += "<p style='color:green;'>✅ Hugging Face API Token detected and client initialized.</p>"
264
+ else: ui_token_status_md += "<p style='color:orange;'>⚠️ **HF_TOKEN missing or client failed to initialize.** Hugging Face models will be disabled.</p>"
 
 
 
 
 
265
 
266
 
267
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"), title="AlgoForge Prime™ v2.5") as app_demo: # Updated theme
268
  gr.Markdown(intro_markdown)
269
  gr.HTML(ui_token_status_md)
270
 
271
+ if not AVAILABLE_MODELS_CONFIG or UI_DEFAULT_MODEL_KEY == "No Models Available (Check API Keys)" or (UI_DEFAULT_MODEL_KEY and AVAILABLE_MODELS_CONFIG[UI_DEFAULT_MODEL_KEY]["type"] == "none"):
272
+ gr.Markdown("<h2 style='color:red;'>No LLM models are available. Please ensure API keys are correctly set in Space Secrets and that the Space has been restarted.</h2>")
273
  else:
274
  with gr.Row():
275
+ with gr.Column(scale=2):
 
276
  gr.Markdown("## 💡 1. Define the Challenge")
277
  problem_type_dropdown = gr.Dropdown(
278
  choices=["Python Algorithm with Tests", "Python Algorithm (Critique Only)", "General Algorithm Idea", "Conceptual System Design", "Pseudocode Refinement"],
279
  label="Type of Problem / Algorithm", value="Python Algorithm with Tests",
280
+ info="Select '...with Tests' to enable (simulated) unit testing."
 
 
 
 
 
 
 
 
 
 
 
 
 
281
  )
282
+ problem_description_textbox = gr.Textbox(lines=5, label="Problem Description / Desired Outcome", placeholder="Describe the algorithmic task clearly.")
283
+ initial_hints_textbox = gr.Textbox(lines=3, label="Initial Thoughts / Constraints (Optional)", placeholder="Any specific approaches or limitations.")
284
+ user_tests_textbox = gr.Textbox(lines=6, label="Python Unit Tests (Optional, one `assert` per line)", placeholder="assert function_name(input) == expected_output")
285
 
286
  gr.Markdown("## ⚙️ 2. Configure The Forge")
287
  model_selection_dropdown = gr.Dropdown(
288
  choices=list(AVAILABLE_MODELS_CONFIG.keys()),
289
+ value=UI_DEFAULT_MODEL_KEY,
290
  label="Select LLM Core Model",
291
+ info="Ensure the corresponding API key is working."
292
  )
293
+ num_initial_solutions_slider = gr.Slider(minimum=1, maximum=3, value=2, step=1, label="Number of Initial Solutions") # Max 3 for faster iterations
294
 
295
  with gr.Accordion("Advanced LLM Parameters (Expert Users)", open=False):
296
  with gr.Row():
297
+ genesis_temp_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.05, label="Genesis Temp")
298
+ genesis_max_tokens_slider = gr.Slider(minimum=256, maximum=4096, value=1024, step=128, label="Genesis Max Tokens") # Increased range
299
  with gr.Row():
300
  critique_temp_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, step=0.05, label="Critique Temp")
301
+ critique_max_tokens_slider = gr.Slider(minimum=150, maximum=2048, value=512, step=64, label="Critique Max Tokens")
302
  with gr.Row():
303
  evolution_temp_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.75, step=0.05, label="Evolution Temp")
304
+ evolution_max_tokens_slider = gr.Slider(minimum=256, maximum=4096, value=1536, step=128, label="Evolution Max Tokens")
305
+
306
 
307
+ engage_button = gr.Button("🚀 ENGAGE ALGOFORGE PRIME™ 🚀", variant="primary", size="lg")
308
 
309
+ with gr.Column(scale=3):
 
310
  gr.Markdown("## 🔥 3. The Forge's Output")
311
+ with gr.Tabs():
312
+ with gr.TabItem("📜 Initial Candidates & Evaluations"):
313
+ output_initial_solutions_markdown = gr.Markdown()
314
+ with gr.TabItem("🏆 Champion Candidate"):
315
+ output_champion_markdown = gr.Markdown()
316
+ with gr.TabItem("🌟 Evolved Artifact & Test Analysis"):
317
+ output_evolved_markdown = gr.Markdown()
318
+ output_ai_test_analysis_markdown = gr.Markdown()
319
+ with gr.TabItem("🛠️ Interaction Log"):
320
+ output_interaction_log_markdown = gr.Markdown()
321
 
 
322
  engage_button.click(
323
  fn=run_algoforge_simulation_orchestrator,
324
  inputs=[
 
334
  output_ai_test_analysis_markdown
335
  ]
336
  )
 
337
  gr.Markdown("---")
338
  gr.Markdown(
339
+ "**Disclaimer:** Conceptual demo. (Simulated) unit testing is illustrative. **NEVER run LLM-generated code from an untrusted source in an unrestricted environment.** Real sandboxing is critical for safety."
 
 
 
 
340
  )
341
 
342
  # --- Entry Point for Running the Gradio App ---
343
  if __name__ == "__main__":
344
  print("="*80)
345
+ print("AlgoForge Prime™ (Modular Version v2.5 Gemini Focus) - Launching...")
 
346
  print(f" Google Gemini API Configured: {GEMINI_API_CONFIGURED}")
347
  print(f" Hugging Face API Configured: {HF_API_CONFIGURED}")
 
348
  if not GEMINI_API_CONFIGURED and not HF_API_CONFIGURED:
349
+ print(" CRITICAL WARNING: No API keys seem to be configured. App will be non-functional.")
 
350
  print(f" UI Default Model Key: {UI_DEFAULT_MODEL_KEY}")
351
  print(f" Available models for UI: {list(AVAILABLE_MODELS_CONFIG.keys())}")
352
  print("="*80)
353
+ app_demo.launch(debug=True, server_name="0.0.0.0") # server_name="0.0.0.0" for Docker/Spaces