mgbam commited on
Commit
9826cfc
·
verified ·
1 Parent(s): ebf9635

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +370 -258
app.py CHANGED
@@ -1,310 +1,422 @@
1
  # algoforge_prime/app.py
2
  import gradio as gr
3
  import os
4
- import time # For progress updates and timing
 
5
 
6
  # --- Core Logic Imports ---
7
- # Initialize clients first to ensure API keys are loaded before other modules use them.
8
- from core.llm_clients import initialize_all_clients, is_gemini_api_configured, is_hf_api_configured # Use getters
9
- initialize_all_clients() # CRITICAL: Call initialization first
10
 
11
- # Now get the status AFTER initialization
12
  GEMINI_API_READY = is_gemini_api_configured()
13
  HF_API_READY = is_hf_api_configured()
14
 
15
  from core.generation_engine import generate_initial_solutions
16
- from core.evaluation_engine import evaluate_solution_candidate, EvaluationResultOutput # Using the renamed class
17
  from core.evolution_engine import evolve_solution
18
  from prompts.system_prompts import get_system_prompt
19
  from prompts.prompt_templates import format_code_test_analysis_user_prompt
20
- from core.safe_executor import execute_python_code_with_tests, ExecutionResult # For re-evaluating evolved code
21
 
22
- # --- Application Configuration (Models, Defaults) ---
 
23
  AVAILABLE_MODELS_CONFIG = {}
24
- UI_DEFAULT_MODEL_KEY = None
25
-
26
- GEMINI_1_5_PRO_LATEST_ID = "gemini-1.5-pro-latest"
27
  GEMINI_1_5_FLASH_LATEST_ID = "gemini-1.5-flash-latest"
28
 
29
  if GEMINI_API_READY:
30
  AVAILABLE_MODELS_CONFIG.update({
31
- f"Google Gemini 1.5 Pro (API - Recommended)": {"id": GEMINI_1_5_PRO_LATEST_ID, "type": "google_gemini"},
32
- f"Google Gemini 1.5 Flash (API - Fast)": {"id": GEMINI_1_5_FLASH_LATEST_ID, "type": "google_gemini"},
33
- "Google Gemini 1.0 Pro (API - Legacy)": {"id": "gemini-1.0-pro-latest", "type": "google_gemini"},
34
  })
35
- UI_DEFAULT_MODEL_KEY = f"Google Gemini 1.5 Pro (API - Recommended)"
36
  if UI_DEFAULT_MODEL_KEY not in AVAILABLE_MODELS_CONFIG:
37
- UI_DEFAULT_MODEL_KEY = f"Google Gemini 1.5 Flash (API - Fast)"
38
- print(f"INFO: app.py - Gemini models populated. Default set to: {UI_DEFAULT_MODEL_KEY}")
39
- else:
40
- print("WARNING: app.py - Gemini API not configured; Gemini models will be unavailable.")
41
 
42
  if HF_API_READY:
43
  AVAILABLE_MODELS_CONFIG.update({
44
- "Google Gemma 2B (HF - Quick Test)": {"id": "google/gemma-2b-it", "type": "hf"},
45
- "Mistral 7B Instruct (HF)": {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf"},
46
- "CodeLlama 7B Instruct (HF)": {"id": "codellama/CodeLlama-7b-Instruct-hf", "type": "hf"},
47
  })
48
- if not UI_DEFAULT_MODEL_KEY:
49
- UI_DEFAULT_MODEL_KEY = "Google Gemma 2B (HF - Quick Test)"
50
- print("INFO: app.py - HF models populated; default set as Gemini was not available.")
51
- else:
52
- print("INFO: app.py - HF models also populated as alternatives.")
53
- else:
54
- print("WARNING: app.py - Hugging Face API not configured; HF models will be unavailable.")
55
 
56
  if not AVAILABLE_MODELS_CONFIG:
57
- print("CRITICAL APP ERROR: No models could be configured. Check API keys and restart Space.")
58
- AVAILABLE_MODELS_CONFIG["No Models Available (Check API Keys & Restart)"] = {"id": "dummy_error", "type": "none"}
59
- UI_DEFAULT_MODEL_KEY = "No Models Available (Check API Keys & Restart)"
60
  elif not UI_DEFAULT_MODEL_KEY and AVAILABLE_MODELS_CONFIG:
61
  UI_DEFAULT_MODEL_KEY = list(AVAILABLE_MODELS_CONFIG.keys())[0]
62
- print(f"WARNING: app.py - UI_DEFAULT_MODEL_KEY was not set by primary logic, falling back to: {UI_DEFAULT_MODEL_KEY}")
63
-
64
-
65
- # --- Main Orchestration Logic for Gradio ---
66
- def run_algoforge_simulation_orchestrator(
67
- problem_type_selected: str,
68
- problem_description_text: str,
69
- initial_hints_text: str,
70
- user_provided_tests_code: str,
71
- num_initial_solutions_to_gen: int,
72
- selected_model_ui_key: str,
73
- genesis_temp: float, genesis_max_tokens: int,
74
- critique_temp: float, critique_max_tokens: int,
 
 
 
 
 
 
75
  evolution_temp: float, evolution_max_tokens: int,
76
- progress=gr.Progress(track_tqdm=True)
 
77
  ):
78
- # CORRECTED: start_time defined at the beginning of the function
79
- start_time = time.time()
80
 
81
- progress(0, desc="Initializing AlgoForge Prime™...")
82
- log_entries = [f"**AlgoForge Prime™ Cycle Starting at {time.strftime('%Y-%m-%d %H:%M:%S')}**"]
83
-
84
- if not problem_description_text.strip():
85
- error_msg = "CRITICAL INPUT ERROR: Problem Description is mandatory. Please describe the problem."
86
- log_entries.append(error_msg)
87
- return error_msg, "", "", "\n".join(log_entries), ""
88
-
89
- current_model_config = AVAILABLE_MODELS_CONFIG.get(selected_model_ui_key)
90
- if not current_model_config or current_model_config["type"] == "none":
91
- error_msg = f"CRITICAL CONFIG ERROR: No valid LLM selected ('{selected_model_ui_key}'). API keys might be missing or failed initialization. Please check Space Secrets & restart."
92
- log_entries.append(error_msg)
93
- return error_msg, "", "", "\n".join(log_entries), ""
94
-
95
- log_entries.append(f"Selected Model: {selected_model_ui_key} (Type: {current_model_config['type']}, ID: {current_model_config['id']})")
96
- log_entries.append(f"Problem Type: {problem_type_selected}")
97
- log_entries.append(f"User Unit Tests Provided: {'Yes' if user_provided_tests_code.strip() else 'No'}")
98
-
99
- llm_config_genesis = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": genesis_temp, "max_tokens": genesis_max_tokens}
100
- llm_config_critique = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": critique_temp, "max_tokens": critique_max_tokens}
101
- llm_config_evolution = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": evolution_temp, "max_tokens": evolution_max_tokens}
102
-
103
- # --- STAGE 1: GENESIS ---
104
- progress(0.05, desc="Stage 1: Genesis Engine - Generating Solutions...")
105
- log_entries.append("\n**------ STAGE 1: GENESIS ENGINE ------**")
106
- initial_raw_solutions = generate_initial_solutions(
107
- problem_description_text, initial_hints_text, problem_type_selected,
108
- num_initial_solutions_to_gen, llm_config_genesis
109
- )
110
- log_entries.append(f"Genesis Engine produced {len(initial_raw_solutions)} raw solution candidate(s).")
111
- for i, sol_text in enumerate(initial_raw_solutions):
112
- log_entries.append(f" Candidate {i+1} (Raw Snippet): {str(sol_text)[:120]}...")
113
-
114
- # --- STAGE 2: CRITIQUE & AUTOMATED EVALUATION ---
115
- progress(0.25, desc="Stage 2: Critique Crucible - Evaluating Candidates...")
116
- log_entries.append("\n**------ STAGE 2: CRITIQUE CRUCIBLE & AUTOMATED EVALUATION ------**")
117
- evaluated_candidates_list = []
118
- for i, candidate_solution_text in enumerate(initial_raw_solutions):
119
- current_progress = 0.25 + ( (i + 1) / num_initial_solutions_to_gen ) * 0.4
120
- progress(current_progress, desc=f"Evaluating Candidate {i+1} of {num_initial_solutions_to_gen}...")
121
- log_entries.append(f"\n--- Evaluating Candidate {i+1} ---")
122
- evaluation_output_obj = evaluate_solution_candidate(
123
- str(candidate_solution_text),
124
- problem_description_text, problem_type_selected,
125
- user_provided_tests_code, llm_config_critique
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  )
127
- evaluated_candidates_list.append({
128
- "id": i + 1,
129
- "solution_text": str(candidate_solution_text),
130
- "evaluation_obj": evaluation_output_obj
131
- })
132
- log_entries.append(f" Combined Score: {evaluation_output_obj.combined_score}/10")
133
- if evaluation_output_obj.execution_details:
134
- log_entries.append(f" Test Results: {evaluation_output_obj.execution_details.passed_tests}/{evaluation_output_obj.execution_details.total_tests} passed.")
135
- if evaluation_output_obj.execution_details.error: log_entries.append(f" Execution Error: {evaluation_output_obj.execution_details.error}")
136
- log_entries.append(f" LLM Critique (Snippet): {str(evaluation_output_obj.llm_critique_text)[:150]}...")
137
-
138
- initial_solutions_display_markdown = []
139
- for data in evaluated_candidates_list:
140
- initial_solutions_display_markdown.append(
141
- f"**Candidate {data['id']}:**\n```python\n{data['solution_text']}\n```\n\n"
142
- f"**Evaluation Verdict (Combined Score: {data['evaluation_obj'].combined_score}/10):**\n{data['evaluation_obj'].get_display_critique()}\n---"
143
  )
144
-
145
- # --- STAGE 3: SELECTION OF CHAMPION ---
146
- progress(0.7, desc="Stage 3: Selecting Champion Candidate...")
147
- log_entries.append("\n**------ STAGE 3: CHAMPION SELECTION ------**")
148
- potentially_viable_candidates = [
149
- cand for cand in evaluated_candidates_list
150
- if cand["evaluation_obj"] and cand["evaluation_obj"].combined_score > 0 and \
151
- cand["solution_text"] and not str(cand["solution_text"]).startswith("ERROR")
152
- ]
153
- if not potentially_viable_candidates:
154
- final_error_msg = "No viable candidate solutions found after generation and evaluation. All attempts may have failed or scored too low."
155
- log_entries.append(f" CRITICAL: {final_error_msg}")
156
- return "\n\n".join(initial_solutions_display_markdown), final_error_msg, "", "\n".join(log_entries), ""
157
-
158
- potentially_viable_candidates.sort(key=lambda x: x["evaluation_obj"].combined_score, reverse=True)
159
- champion_candidate_data = potentially_viable_candidates[0]
160
- log_entries.append(f"Champion Selected: Candidate {champion_candidate_data['id']} "
161
- f"(Solution Snippet: {str(champion_candidate_data['solution_text'])[:60]}...) "
162
- f"with evaluation score {champion_candidate_data['evaluation_obj'].combined_score}/10.")
163
- champion_display_markdown = (
164
- f"**Champion Candidate ID: {champion_candidate_data['id']} "
165
- f"(Original Combined Score: {champion_candidate_data['evaluation_obj'].combined_score}/10):**\n"
166
- f"```python\n{champion_candidate_data['solution_text']}\n```\n\n"
167
- f"**Original Comprehensive Evaluation for this Champion:**\n{champion_candidate_data['evaluation_obj'].get_display_critique()}"
168
- )
169
 
170
- # --- STAGE 4: EVOLUTIONARY FORGE ---
171
- progress(0.75, desc="Stage 4: Evolutionary Forge - Refining Champion...")
172
- log_entries.append("\n**------ STAGE 4: EVOLUTIONARY FORGE ------**")
173
- evolved_solution_code = evolve_solution(
174
- str(champion_candidate_data["solution_text"]),
175
- champion_candidate_data["evaluation_obj"],
176
- problem_description_text,
177
- problem_type_selected,
178
- llm_config_evolution
179
- )
180
- log_entries.append(f"Raw Evolved Solution Text (Snippet): {str(evolved_solution_code)[:150]}...")
181
- evolved_solution_display_markdown = ""
182
- ai_test_analysis_markdown = ""
183
-
184
- if str(evolved_solution_code).startswith("ERROR"):
185
- evolved_solution_display_markdown = f"**Evolution Stage Failed:**\n{evolved_solution_code}"
186
- else:
187
- evolved_solution_display_markdown = f"**✨ AlgoForge Omega™ Evolved Artifact ✨:**\n```python\n{evolved_solution_code}\n```"
188
- if "python" in problem_type_selected.lower() and user_provided_tests_code.strip():
189
- progress(0.9, desc="Post-Evolution: Re-testing Evolved Code...")
190
- log_entries.append("\n--- Post-Evolution Test of Evolved Code ---")
191
- evolved_code_exec_result = execute_python_code_with_tests(
192
- str(evolved_solution_code), user_provided_tests_code, timeout_seconds=10
193
- )
194
- evolved_solution_display_markdown += (
195
- f"\n\n**Post-Evolution Automated Test Results (Simulated):**\n"
196
- f" Tests Attempted: {evolved_code_exec_result.total_tests}\n"
197
- f" Tests Passed: {evolved_code_exec_result.passed_tests}\n"
198
- f" Execution Time: {evolved_code_exec_result.execution_time:.4f}s\n"
199
- )
200
- if evolved_code_exec_result.error:
201
- evolved_solution_display_markdown += f" Execution Error/Output: {evolved_code_exec_result.error}\n"
202
- elif evolved_code_exec_result.output:
203
- evolved_solution_display_markdown += f" Execution Output (stdout):\n```\n{evolved_code_exec_result.output[:300]}\n```\n"
204
- log_entries.append(f" Evolved Code Test Results: {evolved_code_exec_result}")
205
-
206
- if evolved_code_exec_result.total_tests > 0 :
207
- progress(0.95, desc="Post-Evolution: AI Analyzing Test Results...")
208
- log_entries.append("\n--- AI Analysis of Evolved Code's Test Results ---")
209
- analysis_exec_summary = evolved_code_exec_result.error if evolved_code_exec_result.error else (evolved_code_exec_result.output if evolved_code_exec_result.output else "Tests completed.")
210
- analysis_user_prompt = format_code_test_analysis_user_prompt(str(evolved_solution_code), user_provided_tests_code, f"Passed: {evolved_code_exec_result.passed_tests}/{evolved_code_exec_result.total_tests}. Detail: {analysis_exec_summary}")
211
- analysis_system_prompt = get_system_prompt("code_execution_explainer")
212
- llm_analysis_config = {"type": current_model_config["type"], "model_id": current_model_config["id"],
213
- "temp": 0.3, "max_tokens": critique_max_tokens + 150}
214
 
215
- from core.llm_clients import call_huggingface_api, call_gemini_api
216
- explanation_response_obj = None
217
- if llm_analysis_config["type"] == "hf": explanation_response_obj = call_huggingface_api(analysis_user_prompt, llm_analysis_config["model_id"], llm_analysis_config["temp"], llm_analysis_config["max_tokens"], analysis_system_prompt)
218
- elif llm_analysis_config["type"] == "google_gemini": explanation_response_obj = call_gemini_api(analysis_user_prompt, llm_analysis_config["model_id"], llm_analysis_config["temp"], llm_analysis_config["max_tokens"], analysis_system_prompt)
219
-
220
- if explanation_response_obj and explanation_response_obj.success:
221
- ai_test_analysis_markdown = f"**AI Analysis of Evolved Code's Test Performance:**\n{explanation_response_obj.text}"
222
- log_entries.append(f" AI Test Analysis (Snippet): {str(explanation_response_obj.text)[:100]}...")
223
- elif explanation_response_obj:
224
- ai_test_analysis_markdown = f"**AI Analysis of Test Performance Failed:**\n{explanation_response_obj.error}"
225
- log_entries.append(f" AI Test Analysis Error: {explanation_response_obj.error}")
226
-
227
- # CORRECTED: total_time definition using the start_time from the function scope
228
- total_time = time.time() - start_time
229
- log_entries.append(f"\n**AlgoForge Omega™ Cycle Complete. Total time: {total_time:.2f} seconds.**")
230
- progress(1.0, desc="Cycle Complete!")
231
-
232
- return "\n\n".join(initial_solutions_display_markdown), champion_display_markdown, evolved_solution_display_markdown, "\n".join(log_entries), ai_test_analysis_markdown
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
 
234
 
235
  # --- Gradio UI Definition ---
236
- intro_markdown = """
237
- # AlgoForge Omega™ ✨: Conceptual Demo with (Simulated) Execution
238
- This version demonstrates a conceptual workflow for AI-assisted algorithm discovery and refinement,
239
- featuring **(simulated) execution of generated Python code against user-provided unit tests**.
240
-
241
- **API Keys Required in Space Secrets:**
242
- - `GOOGLE_API_KEY` (Primary): For Google Gemini API models. Ensure the "Generative Language API" (or similar) is enabled for your project.
243
- - `HF_TOKEN` (Secondary): For Hugging Face hosted models.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
  """
245
- ui_token_status_md = ""
246
- if not GEMINI_API_READY and not HF_API_READY:
247
- ui_token_status_md = "<p style='color:red;'>⚠️ **CRITICAL: NEITHER API IS CONFIGURED. APP NON-FUNCTIONAL.**</p>"
248
- else:
249
- if GEMINI_API_READY: ui_token_status_md += "<p style='color:green;'>✅ Google Gemini API Configured.</p>"
250
- else: ui_token_status_md += "<p style='color:orange;'>⚠️ **GOOGLE_API_KEY missing/failed.** Gemini models disabled.</p>"
251
- if HF_API_READY: ui_token_status_md += "<p style='color:green;'>✅ Hugging Face API Configured.</p>"
252
- else: ui_token_status_md += "<p style='color:orange;'>⚠️ **HF_TOKEN missing/failed.** HF models disabled.</p>"
253
-
254
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple", secondary_hue="pink"), title="AlgoForge Omega™ Demo") as app_demo:
255
- gr.Markdown(intro_markdown)
256
- gr.HTML(ui_token_status_md)
257
-
258
- usable_models_available = any(
259
- AVAILABLE_MODELS_CONFIG.get(key, {}).get("type") != "none"
260
- for key in AVAILABLE_MODELS_CONFIG
261
  )
262
- if not usable_models_available:
263
- gr.Markdown("<h2 style='color:red;'>No LLM models are available for use. Check API keys and restart.</h2>")
264
- else:
265
- with gr.Row():
266
- with gr.Column(scale=2):
267
- gr.Markdown("## 💡 1. Define the Challenge")
268
- problem_type_dropdown = gr.Dropdown(choices=["Python Algorithm with Tests", "Python Algorithm (Critique Only)", "General Algorithm Idea"], label="Problem Type", value="Python Algorithm with Tests")
269
- problem_description_textbox = gr.Textbox(lines=5, label="Problem Description")
270
- initial_hints_textbox = gr.Textbox(lines=3, label="Initial Hints (Optional)")
271
- user_tests_textbox = gr.Textbox(lines=6, label="Python Unit Tests (Optional, one `assert` per line)", placeholder="assert my_func(1) == 1")
272
- gr.Markdown("## ⚙️ 2. Configure The Forge")
273
- model_selection_dropdown = gr.Dropdown(choices=list(AVAILABLE_MODELS_CONFIG.keys()), value=UI_DEFAULT_MODEL_KEY, label="LLM Core Model")
274
- num_initial_solutions_slider = gr.Slider(1, 3, value=2, step=1, label="# Initial Solutions")
275
- with gr.Accordion("Advanced LLM Parameters", open=False):
276
- genesis_temp_slider = gr.Slider(0.0, 1.0, value=0.7, step=0.05, label="Genesis Temp")
277
- genesis_max_tokens_slider = gr.Slider(256, 4096, value=1024, step=128, label="Genesis Max Tokens")
278
- critique_temp_slider = gr.Slider(0.0, 1.0, value=0.4, step=0.05, label="Critique Temp")
279
- critique_max_tokens_slider = gr.Slider(150, 2048, value=512, step=64, label="Critique Max Tokens")
280
- evolution_temp_slider = gr.Slider(0.0, 1.0, value=0.75, step=0.05, label="Evolution Temp")
281
- evolution_max_tokens_slider = gr.Slider(256, 4096, value=1536, step=128, label="Evolution Max Tokens")
282
- engage_button = gr.Button("🚀 ENGAGE ALGOFORGE OMEGA™ 🚀", variant="primary")
283
-
284
- with gr.Column(scale=3):
285
- gr.Markdown("## 🔥 3. The Forge's Output")
286
- with gr.Tabs():
287
- with gr.TabItem("📜 Candidates & Evaluations"): output_initial_solutions_markdown = gr.Markdown()
288
- with gr.TabItem("🏆 Champion"): output_champion_markdown = gr.Markdown()
289
- with gr.TabItem("🌟 Evolved & Tested"):
290
- output_evolved_markdown = gr.Markdown()
291
- output_ai_test_analysis_markdown = gr.Markdown()
292
- with gr.TabItem("🛠️ Log"): output_interaction_log_markdown = gr.Markdown()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
 
 
 
294
  engage_button.click(
295
- fn=run_algoforge_simulation_orchestrator,
296
- inputs=[ problem_type_dropdown, problem_description_textbox, initial_hints_textbox, user_tests_textbox, num_initial_solutions_slider, model_selection_dropdown, genesis_temp_slider, genesis_max_tokens_slider, critique_temp_slider, critique_max_tokens_slider, evolution_temp_slider, evolution_max_tokens_slider ],
297
- outputs=[ output_initial_solutions_markdown, output_champion_markdown, output_evolved_markdown, output_interaction_log_markdown, output_ai_test_analysis_markdown ]
 
 
 
 
 
 
 
 
 
 
 
 
 
298
  )
 
299
  gr.Markdown("---")
300
- gr.Markdown("**Disclaimer:** Conceptual Omega Demo. (Simulated) unit testing. **NEVER run untrusted LLM code without robust sandboxing.**")
 
 
 
 
 
 
 
 
301
 
302
  # --- Entry Point for Running the Gradio App ---
303
  if __name__ == "__main__":
304
  print("="*80)
305
- print("AlgoForge Omega™ Conceptual Demo - Launching...")
306
- print(f" Gemini API Ready: {GEMINI_API_READY}")
307
- print(f" HF API Ready: {HF_API_READY}")
308
  if not GEMINI_API_READY and not HF_API_READY:
309
  print(" CRITICAL WARNING: No API keys seem to be configured correctly. The application will likely be non-functional.")
310
  print(f" UI Default Model Key: {UI_DEFAULT_MODEL_KEY}")
 
1
  # algoforge_prime/app.py
2
  import gradio as gr
3
  import os
4
+ import time
5
+ import json # For potentially displaying structured data or passing complex states
6
 
7
  # --- Core Logic Imports ---
8
+ from core.llm_clients import initialize_all_clients, is_gemini_api_configured, is_hf_api_configured
9
+ initialize_all_clients()
 
10
 
 
11
  GEMINI_API_READY = is_gemini_api_configured()
12
  HF_API_READY = is_hf_api_configured()
13
 
14
  from core.generation_engine import generate_initial_solutions
15
+ from core.evaluation_engine import evaluate_solution_candidate, EvaluationResultOutput
16
  from core.evolution_engine import evolve_solution
17
  from prompts.system_prompts import get_system_prompt
18
  from prompts.prompt_templates import format_code_test_analysis_user_prompt
19
+ from core.safe_executor import execute_python_code_with_tests, ExecutionResult # For re-evaluating
20
 
21
+ # --- Application Configuration ---
22
+ # (This section should ideally move to a config file or env vars for production)
23
  AVAILABLE_MODELS_CONFIG = {}
24
+ UI_DEFAULT_MODEL_KEY = None
25
+ GEMINI_1_5_PRO_LATEST_ID = "gemini-1.5-pro-latest" # Ensure this is the correct ID usable via API
 
26
  GEMINI_1_5_FLASH_LATEST_ID = "gemini-1.5-flash-latest"
27
 
28
  if GEMINI_API_READY:
29
  AVAILABLE_MODELS_CONFIG.update({
30
+ f"Google Gemini 1.5 Pro (API)": {"id": GEMINI_1_5_PRO_LATEST_ID, "type": "google_gemini"},
31
+ f"Google Gemini 1.5 Flash (API)": {"id": GEMINI_1_5_FLASH_LATEST_ID, "type": "google_gemini"},
32
+ "Legacy Gemini 1.0 Pro (API)": {"id": "gemini-1.0-pro-latest", "type": "google_gemini"},
33
  })
34
+ UI_DEFAULT_MODEL_KEY = f"Google Gemini 1.5 Pro (API)"
35
  if UI_DEFAULT_MODEL_KEY not in AVAILABLE_MODELS_CONFIG:
36
+ UI_DEFAULT_MODEL_KEY = f"Google Gemini 1.5 Flash (API)"
37
+ else: print("WARNING: app.py - Gemini API not configured.")
 
 
38
 
39
  if HF_API_READY:
40
  AVAILABLE_MODELS_CONFIG.update({
41
+ "Gemma 2B (HF Test)": {"id": "google/gemma-2b-it", "type": "hf"},
42
+ "Mistral 7B (HF)": {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf"},
 
43
  })
44
+ if not UI_DEFAULT_MODEL_KEY: UI_DEFAULT_MODEL_KEY = "Gemma 2B (HF Test)"
45
+ else: print("WARNING: app.py - HF API not configured.")
 
 
 
 
 
46
 
47
  if not AVAILABLE_MODELS_CONFIG:
48
+ AVAILABLE_MODELS_CONFIG["No Models Available (Setup API Keys!)"] = {"id": "dummy_error", "type": "none"}
49
+ UI_DEFAULT_MODEL_KEY = "No Models Available (Setup API Keys!)"
 
50
  elif not UI_DEFAULT_MODEL_KEY and AVAILABLE_MODELS_CONFIG:
51
  UI_DEFAULT_MODEL_KEY = list(AVAILABLE_MODELS_CONFIG.keys())[0]
52
+
53
+ # --- UI Customization (Conceptual - real CSS would be in a file) ---
54
+ # For a "WOW" UI, you'd link a custom CSS file.
55
+ # Here's a conceptual placeholder for some styles we might imply.
56
+ APP_THEME = gr.themes.Soft(
57
+ primary_hue=gr.themes.colors.blue,
58
+ secondary_hue=gr.themes.colors.sky,
59
+ neutral_hue=gr.themes.colors.slate,
60
+ font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"],
61
+ ).set(
62
+ # Example: input_background_fill="rgba(240, 240, 240, 0.5)" # Slightly transparent inputs
63
+ # button_primary_background_fill="linear-gradient(to bottom right, hsl(210, 80%, 50%), hsl(210, 100%, 30%))"
64
+ )
65
+
66
+ # --- Main Orchestration Logic (More detailed progress and error handling for UI) ---
67
+ def run_algoforge_orchestrator_ui_wrapper(
68
+ problem_type_selected: str, problem_description_text: str, initial_hints_text: str,
69
+ user_provided_tests_code: str, num_initial_solutions_to_gen: int, selected_model_ui_key: str,
70
+ genesis_temp: float, genesis_max_tokens: int, critique_temp: float, critique_max_tokens: int,
71
  evolution_temp: float, evolution_max_tokens: int,
72
+ # Gradio's Request object can give session info if needed for advanced state
73
+ # request: gr.Request
74
  ):
75
+ # This wrapper allows for more fine-grained UI updates via yielding
76
+ # and handles the overall try-except for better error display.
77
 
78
+ log_accumulator = [f"**AlgoForge Omega™ Cycle Starting at {time.strftime('%Y-%m-%d %H:%M:%S')}**\n"]
79
+ # Initial state for UI outputs
80
+ yield {
81
+ output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🚀 Initializing AlgoForge Omega™...</p>", visible=True),
82
+ output_initial_solutions_accordion: gr.Accordion(label=" Generating Initial Candidates...", open=False, visible=True),
83
+ output_initial_solutions_markdown: gr.Markdown(value="Working...", visible=True),
84
+ output_champion_accordion: gr.Accordion(label="⏳ Awaiting Champion Selection...", open=False, visible=False),
85
+ output_champion_markdown: gr.Markdown(value="", visible=False),
86
+ output_evolved_accordion: gr.Accordion(label="⏳ Awaiting Evolution...", open=False, visible=False),
87
+ output_evolved_markdown: gr.Markdown(value="", visible=False),
88
+ output_ai_test_analysis_markdown: gr.Markdown(value="", visible=False),
89
+ output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator), visible=True),
90
+ engage_button: gr.Button(interactive=False) # Disable button during run
91
+ }
92
+
93
+ try:
94
+ start_time = time.time()
95
+
96
+ if not problem_description_text.strip():
97
+ raise ValueError("Problem Description is mandatory.")
98
+
99
+ current_model_config = AVAILABLE_MODELS_CONFIG.get(selected_model_ui_key)
100
+ if not current_model_config or current_model_config["type"] == "none":
101
+ raise ValueError(f"No valid LLM selected ('{selected_model_ui_key}'). Check API key configurations.")
102
+
103
+ log_accumulator.append(f"Selected Model: {selected_model_ui_key} (Type: {current_model_config['type']}, ID: {current_model_config['id']})")
104
+ log_accumulator.append(f"Problem Type: {problem_type_selected}")
105
+ log_accumulator.append(f"User Tests Provided: {'Yes' if user_provided_tests_code.strip() else 'No'}\n")
106
+ yield { output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }
107
+
108
+
109
+ llm_config_genesis = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": genesis_temp, "max_tokens": genesis_max_tokens}
110
+ llm_config_critique = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": critique_temp, "max_tokens": critique_max_tokens}
111
+ llm_config_evolution = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": evolution_temp, "max_tokens": evolution_max_tokens}
112
+
113
+ # --- STAGE 1: GENESIS ---
114
+ yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🧬 Stage 1: Genesis Engine - Generating Solutions...</p>") }
115
+ log_accumulator.append("**------ STAGE 1: GENESIS ENGINE ------**")
116
+ initial_raw_solutions = generate_initial_solutions(problem_description_text, initial_hints_text, problem_type_selected, num_initial_solutions_to_gen, llm_config_genesis)
117
+ log_accumulator.append(f"Genesis Engine produced {len(initial_raw_solutions)} raw candidate(s).")
118
+ for i, sol_text in enumerate(initial_raw_solutions): log_accumulator.append(f" Candidate {i+1} (Raw Snippet): {str(sol_text)[:100]}...")
119
+ yield { output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }
120
+
121
+
122
+ # --- STAGE 2: CRITIQUE & AUTOMATED EVALUATION ---
123
+ yield {
124
+ output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🔬 Stage 2: Critique Crucible - Evaluating Candidates...</p>"),
125
+ output_initial_solutions_accordion: gr.Accordion(label="Initial Candidates & Evaluations (Processing...)", open=True)
126
+ }
127
+ log_accumulator.append("\n**------ STAGE 2: CRITIQUE CRUCIBLE & AUTOMATED EVALUATION ------**")
128
+ evaluated_candidates_list = []
129
+ initial_solutions_md_accumulator = ["**Initial Candidates & Detailed Evaluations:**\n"]
130
+
131
+ for i, candidate_solution_text in enumerate(initial_raw_solutions):
132
+ log_accumulator.append(f"\n--- Evaluating Candidate {i+1} ---")
133
+ yield { output_status_bar: gr.HTML(value=f"<p style='color: dodgerblue;'>🔬 Evaluating Candidate {i+1} of {num_initial_solutions_to_gen}...</p>") }
134
+
135
+ evaluation_output_obj = evaluate_solution_candidate(str(candidate_solution_text), problem_description_text, problem_type_selected, user_provided_tests_code, llm_config_critique)
136
+ evaluated_candidates_list.append({"id": i + 1, "solution_text": str(candidate_solution_text), "evaluation_obj": evaluation_output_obj})
137
+
138
+ log_accumulator.append(f" Combined Score: {evaluation_output_obj.combined_score}/10")
139
+ # ... (more detailed logging from evaluation_obj as before)
140
+
141
+ # Update UI with this candidate's evaluation progressively
142
+ current_eval_md = (
143
+ f"**Candidate {i+1} (Score: {evaluation_output_obj.combined_score}/10):**\n"
144
+ f"```python\n{str(candidate_solution_text)}\n```\n\n"
145
+ f"**Evaluation Verdict:**\n{evaluation_output_obj.get_display_critique()}\n---"
146
+ )
147
+ initial_solutions_md_accumulator.append(current_eval_md)
148
+ yield {
149
+ output_initial_solutions_markdown: gr.Markdown(value="\n".join(initial_solutions_md_accumulator)),
150
+ output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator))
151
+ }
152
+
153
+ # --- STAGE 3: SELECTION OF CHAMPION ---
154
+ yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🏆 Stage 3: Selecting Champion Candidate...</p>") }
155
+ log_accumulator.append("\n**------ STAGE 3: CHAMPION SELECTION ------**")
156
+ potentially_viable_candidates = [c for c in evaluated_candidates_list if c["evaluation_obj"] and c["evaluation_obj"].combined_score > 0 and not str(c["solution_text"]).startswith("ERROR")]
157
+
158
+ if not potentially_viable_candidates:
159
+ raise ValueError("No viable candidate solutions found after evaluation. All attempts may have failed or scored too low.")
160
+
161
+ champion_candidate_data = sorted(potentially_viable_candidates, key=lambda x: x["evaluation_obj"].combined_score, reverse=True)[0]
162
+ log_accumulator.append(f"Champion Selected: Candidate {champion_candidate_data['id']} with score {champion_candidate_data['evaluation_obj'].combined_score}/10.")
163
+ champion_display_markdown = (
164
+ f"**Champion Candidate ID: {champion_candidate_data['id']} "
165
+ f"(Original Score: {champion_candidate_data['evaluation_obj'].combined_score}/10):**\n"
166
+ f"```python\n{champion_candidate_data['solution_text']}\n```\n\n"
167
+ f"**Original Comprehensive Evaluation:**\n{champion_candidate_data['evaluation_obj'].get_display_critique()}"
168
  )
169
+ yield {
170
+ output_champion_accordion: gr.Accordion(label=f"🏆 Champion: Candidate {champion_candidate_data['id']} (Score: {champion_candidate_data['evaluation_obj'].combined_score}/10)", open=True, visible=True),
171
+ output_champion_markdown: gr.Markdown(value=champion_display_markdown, visible=True),
172
+ output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator))
173
+ }
174
+
175
+ # --- STAGE 4: EVOLUTIONARY FORGE ---
176
+ yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🛠️ Stage 4: Evolutionary Forge - Refining Champion...</p>") }
177
+ log_accumulator.append("\n**------ STAGE 4: EVOLUTIONARY FORGE ------**")
178
+ evolved_solution_code = evolve_solution(
179
+ str(champion_candidate_data["solution_text"]), champion_candidate_data["evaluation_obj"],
180
+ problem_description_text, problem_type_selected, llm_config_evolution
 
 
 
 
181
  )
182
+ log_accumulator.append(f"Raw Evolved Solution (Snippet): {str(evolved_solution_code)[:100]}...")
183
+
184
+ evolved_solution_display_markdown = ""
185
+ ai_test_analysis_markdown = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
 
187
+ if str(evolved_solution_code).startswith("ERROR"):
188
+ evolved_solution_display_markdown = f"<p style='color: red;'>**Evolution Stage Failed:**<br>{evolved_solution_code}</p>"
189
+ else:
190
+ evolved_solution_display_markdown = f"**✨ AlgoForge Omega™ Evolved Artifact ✨:**\n```python\n{evolved_solution_code}\n```"
191
+ if "python" in problem_type_selected.lower() and user_provided_tests_code.strip():
192
+ yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🧪 Post-Evolution: Re-testing Evolved Code...</p>") }
193
+ log_accumulator.append("\n--- Post-Evolution Test of Evolved Code ---")
194
+ from core.safe_executor import execute_python_code_with_tests # Ensure imported
195
+ evolved_code_exec_result = execute_python_code_with_tests(str(evolved_solution_code), user_provided_tests_code, timeout_seconds=10)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
 
197
+ evolved_solution_display_markdown += (
198
+ f"\n\n**Post-Evolution Automated Test Results (Simulated):**\n"
199
+ f" Status: {'SUCCESS' if evolved_code_exec_result.success else 'FAILED/ERRORS'}\n"
200
+ f" Tests Attempted: {evolved_code_exec_result.total_tests}\n"
201
+ f" Tests Passed: {evolved_code_exec_result.passed_tests}\n"
202
+ f" Execution Time: {evolved_code_exec_result.execution_time:.4f}s\n"
203
+ )
204
+ if evolved_code_exec_result.compilation_error: evolved_solution_display_markdown += f" Compilation Error: {evolved_code_exec_result.compilation_error}\n"
205
+ elif evolved_code_exec_result.timeout_error: evolved_solution_display_markdown += f" Timeout Error.\n"
206
+ elif evolved_code_exec_result.error: evolved_solution_display_markdown += f" Execution Error/Output: {evolved_code_exec_result.overall_error_summary}\n"
207
+ elif evolved_code_exec_result.stdout: evolved_solution_display_markdown += f" Execution Stdout:\n```\n{evolved_code_exec_result.stdout[:300].strip()}\n```\n"
208
+ log_accumulator.append(f" Evolved Code Test Results: {evolved_code_exec_result}")
209
+
210
+ if evolved_code_exec_result.total_tests > 0 :
211
+ yield { output_status_bar: gr.HTML(value="<p style='color: dodgerblue;'>🧠 Post-Evolution: AI Analyzing Test Results...</p>") }
212
+ log_accumulator.append("\n--- AI Analysis of Evolved Code's Test Results ---")
213
+ exec_summary_for_analysis = str(evolved_code_exec_result.overall_error_summary or "Tests completed.")
214
+ analysis_user_prompt = format_code_test_analysis_user_prompt(str(evolved_solution_code), user_provided_tests_code, f"Passed: {evolved_code_exec_result.passed_tests}/{evolved_code_exec_result.total_tests}. Detail: {exec_summary_for_analysis}")
215
+ analysis_system_prompt = get_system_prompt("code_execution_explainer")
216
+ llm_analysis_config = {"type": current_model_config["type"], "model_id": current_model_config["id"], "temp": 0.3, "max_tokens": critique_max_tokens + 200}
217
+
218
+ from core.llm_clients import call_huggingface_api, call_gemini_api
219
+ explanation_response_obj = None
220
+ if llm_analysis_config["type"] == "hf": explanation_response_obj = call_huggingface_api(analysis_user_prompt, llm_analysis_config["model_id"], llm_analysis_config["temp"], llm_analysis_config["max_tokens"], analysis_system_prompt)
221
+ elif llm_analysis_config["type"] == "google_gemini": explanation_response_obj = call_gemini_api(analysis_user_prompt, llm_analysis_config["model_id"], llm_analysis_config["temp"], llm_analysis_config["max_tokens"], analysis_system_prompt)
222
+
223
+ if explanation_response_obj and explanation_response_obj.success:
224
+ ai_test_analysis_markdown = f"**AI Analysis of Evolved Code's Test Performance:**\n{explanation_response_obj.text}"
225
+ elif explanation_response_obj:
226
+ ai_test_analysis_markdown = f"<p style='color: orange;'>**AI Analysis of Test Performance Failed:**<br>{explanation_response_obj.error}</p>"
227
+ log_accumulator.append(f" AI Test Analysis result logged.")
228
+
229
+ total_time = time.time() - start_time
230
+ log_accumulator.append(f"\n**AlgoForge Omega™ Cycle Complete. Total time: {total_time:.2f} seconds.**")
231
+ yield {
232
+ output_status_bar: gr.HTML(value=f"<p style='color: green;'>✅ Cycle Complete! ({total_time:.2f}s)</p>"),
233
+ output_evolved_accordion: gr.Accordion(label="🌟 Evolved Artifact & Test Analysis", open=True, visible=True),
234
+ output_evolved_markdown: gr.Markdown(value=evolved_solution_display_markdown, visible=True),
235
+ output_ai_test_analysis_markdown: gr.Markdown(value=ai_test_analysis_markdown, visible=True if ai_test_analysis_markdown else False),
236
+ output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)),
237
+ engage_button: gr.Button(interactive=True) # Re-enable button
238
+ }
239
+
240
+ except ValueError as ve: # Catch our specific input/config errors
241
+ log_accumulator.append(f"\n**INPUT/CONFIG ERROR:** {ve}")
242
+ yield {
243
+ output_status_bar: gr.HTML(value=f"<p style='color: red;'>❌ CONFIGURATION ERROR: {ve}</p>", visible=True),
244
+ output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)),
245
+ engage_button: gr.Button(interactive=True)
246
+ }
247
+ except Exception as e:
248
+ log_accumulator.append(f"\n**UNEXPECTED RUNTIME ERROR:** {type(e).__name__} - {e}\n{traceback.format_exc()}")
249
+ # For other outputs, we might want to clear them or show a general error message
250
+ yield {
251
+ output_status_bar: gr.HTML(value=f"<p style='color: red;'>❌ UNEXPECTED ERROR: {e}. Check logs.</p>", visible=True),
252
+ output_initial_solutions_markdown: gr.Markdown(value="An unexpected error occurred. Please check the interaction log."),
253
+ output_champion_markdown: gr.Markdown(value="Error state."),
254
+ output_evolved_markdown: gr.Markdown(value="Error state."),
255
+ output_ai_test_analysis_markdown: gr.Markdown(value="Error state."),
256
+ output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)),
257
+ engage_button: gr.Button(interactive=True)
258
+ }
259
 
260
 
261
  # --- Gradio UI Definition ---
262
+ # (This section is the full UI layout with improvements)
263
+ css = """
264
+ body { font-family: 'Inter', sans-serif; }
265
+ .gradio-container { max-width: 1280px !important; margin: auto !important; }
266
+ .gr-button-primary {
267
+ background: linear-gradient(135deg, #007bff 0%, #0056b3 100%) !important;
268
+ color: white !important;
269
+ border: none !important;
270
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1) !important;
271
+ transition: all 0.2s ease-in-out !important;
272
+ }
273
+ .gr-button-primary:hover {
274
+ transform: translateY(-2px) !important;
275
+ box-shadow: 0 6px 12px rgba(0, 0, 0, 0.15) !important;
276
+ }
277
+ .status-bar p {
278
+ padding: 8px 12px;
279
+ border-radius: 6px;
280
+ font-weight: 500;
281
+ text-align: center;
282
+ margin-bottom: 10px; /* Add some space below status bar */
283
+ }
284
+ .accordion-section .gr-markdown { padding-top: 5px; padding-bottom: 5px; }
285
+ .output-tabs .gr-tabitem {min-height: 400px;} /* Ensure tabs have some min height */
286
  """
287
+
288
+ with gr.Blocks(theme=APP_THEME, css=css, title="✨ AlgoForge Omega™ ✨") as app_demo:
289
+ gr.Markdown("# AlgoForge Omega™ ✨\n### Conceptual AI-Powered Algorithm & Application Foundry")
290
+ gr.Markdown(
291
+ "Define a challenge, configure the AI forge, and witness the (conceptual) evolution of solutions, "
292
+ "now with (simulated) unit testing and more detailed feedback loops!"
 
 
 
 
 
 
 
 
 
 
293
  )
294
+
295
+ with gr.Row(equal_height=False):
296
+ # --- INPUT COLUMN ---
297
+ with gr.Column(scale=2, min_width=400):
298
+ gr.Markdown("## 💡 1. Define the Challenge")
299
+ with gr.Group():
300
+ problem_type_dropdown = gr.Dropdown(
301
+ choices=["Python Algorithm with Tests", "Python Algorithm (Critique Only)", "General Algorithm Idea", "Conceptual System Design", "Pseudocode Refinement"],
302
+ label="Problem Type", value="Python Algorithm with Tests",
303
+ info="Select '...with Tests' to enable (simulated) unit testing if you provide tests below."
304
+ )
305
+ problem_description_textbox = gr.Textbox(
306
+ lines=7, label="Problem Description / Desired Outcome",
307
+ placeholder="Example for 'Python Algorithm with Tests':\n`def calculate_factorial(n: int) -> int:`\nCalculates factorial of n. Should handle n=0 (returns 1) and raise ValueError for n<0."
308
+ )
309
+ initial_hints_textbox = gr.Textbox(
310
+ lines=4, label="Initial Thoughts / Constraints / Seed Ideas (Optional)",
311
+ placeholder="E.g., 'Prefer an iterative solution over recursive for factorial.' or 'Consider time complexity and edge cases like empty inputs.'"
312
+ )
313
+ user_tests_textbox = gr.Textbox(
314
+ lines=7, label="Python Unit Tests (Optional, one `assert` per line)",
315
+ placeholder="assert calculate_factorial(0) == 1\nassert calculate_factorial(5) == 120\n# For expected errors (advanced, simulated):\n# try:\n# calculate_factorial(-1)\n# except ValueError:\n# assert True\n# else:\n# assert False, \"ValueError not raised\"",
316
+ info="For 'Python Algorithm with Tests'. Ensure function names match your problem description. Basic try-except for error testing is crudely simulated."
317
+ )
318
+
319
+ gr.Markdown("## ⚙️ 2. Configure The Forge")
320
+ with gr.Group():
321
+ api_status_html = gr.HTML() # For dynamic API status
322
+
323
+ # Logic to set API status text (must be done after initialize_all_clients)
324
+ status_messages = []
325
+ if not GEMINI_API_READY and not HF_API_READY:
326
+ status_messages.append("<p style='color:red; font-weight:bold;'>⚠️ CRITICAL: NO APIs CONFIGURED. App non-functional.</p>")
327
+ else:
328
+ if GEMINI_API_READY: status_messages.append("<p style='color:green;'>✅ Google Gemini API Ready.</p>")
329
+ else: status_messages.append("<p style='color:orange;'>⚠️ Google Gemini API NOT Ready (Check GOOGLE_API_KEY).</p>")
330
+ if HF_API_READY: status_messages.append("<p style='color:green;'>✅ Hugging Face API Ready.</p>")
331
+ else: status_messages.append("<p style='color:orange;'>⚠️ Hugging Face API NOT Ready (Check HF_TOKEN).</p>")
332
+ api_status_html.value = "".join(status_messages)
333
+
334
+
335
+ model_selection_dropdown = gr.Dropdown(
336
+ choices=list(AVAILABLE_MODELS_CONFIG.keys()),
337
+ value=UI_DEFAULT_MODEL_KEY if UI_DEFAULT_MODEL_KEY in AVAILABLE_MODELS_CONFIG else (list(AVAILABLE_MODELS_CONFIG.keys())[0] if AVAILABLE_MODELS_CONFIG else None),
338
+ label="LLM Core Model",
339
+ info="Ensure the corresponding API key is correctly set in Space Secrets."
340
+ )
341
+ num_initial_solutions_slider = gr.Slider(minimum=1, maximum=4, value=2, step=1, label="# Initial Solutions (Genesis Engine)", info="More solutions take longer but provide more diversity.")
342
+
343
+ with gr.Accordion("Advanced LLM Parameters (Tune with Caution!)", open=False):
344
+ with gr.Row():
345
+ genesis_temp_slider = gr.Slider(minimum=0.0, maximum=1.2, value=0.7, step=0.05, label="Genesis Temp")
346
+ genesis_max_tokens_slider = gr.Slider(minimum=256, maximum=4096, value=1024, step=128, label="Genesis Max Tokens")
347
+ with gr.Row():
348
+ critique_temp_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, step=0.05, label="Critique Temp")
349
+ critique_max_tokens_slider = gr.Slider(minimum=150, maximum=2048, value=768, step=64, label="Critique Max Tokens")
350
+ with gr.Row():
351
+ evolution_temp_slider = gr.Slider(minimum=0.0, maximum=1.2, value=0.75, step=0.05, label="Evolution Temp")
352
+ evolution_max_tokens_slider = gr.Slider(minimum=256, maximum=4096, value=1536, step=128, label="Evolution Max Tokens")
353
+
354
+ engage_button = gr.Button("🚀 ENGAGE ALGOFORGE OMEGA™ 🚀", variant="primary", size="lg", elem_id="engage_button_elem")
355
+
356
+ # --- OUTPUT COLUMN ---
357
+ with gr.Column(scale=3, min_width=600):
358
+ gr.Markdown("## 🔥 3. The Forge's Output")
359
+ output_status_bar = gr.HTML(value="<p>Idle. Define a challenge and engage!</p>", elem_classes=["status-bar"], visible=True)
360
+
361
+ with gr.Tabs(elem_id="output_tabs_elem", elem_classes=["output-tabs"]):
362
+ with gr.TabItem("📜 Initial Candidates & Evaluations", id="tab_initial_evals"):
363
+ output_initial_solutions_accordion = gr.Accordion(label="Initial Candidates & Evaluations", open=True, visible=False, elem_classes=["accordion-section"])
364
+ with output_initial_solutions_accordion:
365
+ output_initial_solutions_markdown = gr.Markdown(visible=True)
366
+
367
+ with gr.TabItem("🏆 Champion Candidate", id="tab_champion"):
368
+ output_champion_accordion = gr.Accordion(label="Champion Candidate (Pre-Evolution)", open=True, visible=False, elem_classes=["accordion-section"])
369
+ with output_champion_accordion:
370
+ output_champion_markdown = gr.Markdown(visible=True)
371
+
372
+ with gr.TabItem("🌟 Evolved & Tested", id="tab_evolved"):
373
+ output_evolved_accordion = gr.Accordion(label="Evolved Artifact & Test Analysis", open=True, visible=False, elem_classes=["accordion-section"])
374
+ with output_evolved_accordion:
375
+ output_evolved_markdown = gr.Markdown(visible=True)
376
+ output_ai_test_analysis_markdown = gr.Markdown(visible=True, label="AI Analysis of Evolved Code's Tests")
377
+
378
+ with gr.TabItem("🛠️ Interaction Log", id="tab_log"):
379
+ with gr.Accordion(label="Developer Interaction Log", open=True, elem_classes=["accordion-section"]): # Always open log
380
+ output_interaction_log_markdown = gr.Markdown(value="Log will appear here...", visible=True)
381
 
382
+ # Connect button to the orchestration function wrapper
383
+ # The wrapper handles UI updates via yield
384
  engage_button.click(
385
+ fn=run_algoforge_orchestrator_ui_wrapper, # Call the wrapper
386
+ inputs=[
387
+ problem_type_dropdown, problem_description_textbox, initial_hints_textbox, user_tests_textbox,
388
+ num_initial_solutions_slider, model_selection_dropdown,
389
+ genesis_temp_slider, genesis_max_tokens_slider,
390
+ critique_temp_slider, critique_max_tokens_slider,
391
+ evolution_temp_slider, evolution_max_tokens_slider
392
+ ],
393
+ outputs=[ # These are the components updated by the `yield` statements
394
+ output_status_bar,
395
+ output_initial_solutions_accordion, output_initial_solutions_markdown,
396
+ output_champion_accordion, output_champion_markdown,
397
+ output_evolved_accordion, output_evolved_markdown, output_ai_test_analysis_markdown,
398
+ output_interaction_log_markdown,
399
+ engage_button # To disable/re-enable it
400
+ ]
401
  )
402
+
403
  gr.Markdown("---")
404
+ gr.Markdown(
405
+ "**Disclaimer:** This is a conceptual, educational demonstration. "
406
+ "The (simulated) unit testing feature is for illustrative purposes. "
407
+ "**NEVER run LLM-generated code from an untrusted source in an unrestricted environment.** "
408
+ "Implementing robust and secure code sandboxing is complex and absolutely critical for safety in real-world applications. "
409
+ "LLM outputs always require careful human review and verification."
410
+ )
411
+ gr.HTML("<p style='text-align:center; font-size:0.9em; color:grey;'>AlgoForge Omega™ - Powered by Gradio, Gemini & Hugging Face Models</p>")
412
+
413
 
414
  # --- Entry Point for Running the Gradio App ---
415
  if __name__ == "__main__":
416
  print("="*80)
417
+ print("AlgoForge Omega™ Conceptual Demo (WOW UI Attempt) - Launching...")
418
+ print(f" Google Gemini API Configured (from app.py check): {GEMINI_API_READY}")
419
+ print(f" Hugging Face API Configured (from app.py check): {HF_API_READY}")
420
  if not GEMINI_API_READY and not HF_API_READY:
421
  print(" CRITICAL WARNING: No API keys seem to be configured correctly. The application will likely be non-functional.")
422
  print(f" UI Default Model Key: {UI_DEFAULT_MODEL_KEY}")