mgbam commited on
Commit
a0a78d2
·
verified ·
1 Parent(s): a98e0b1

Update core/evolution_engine.py

Browse files
Files changed (1) hide show
  1. core/evolution_engine.py +5 -4
core/evolution_engine.py CHANGED
@@ -1,10 +1,11 @@
1
  # algoforge_prime/core/evaluation_engine.py
2
  import random
3
- from .llm_clients import call_huggingface_api, call_gemini_api, LLMResponse
4
- from ..prompts.system_prompts import get_system_prompt
5
- from ..prompts.prompt_templates import format_critique_user_prompt
6
  # Import our (simulated) safe executor
7
- from .safe_executor import execute_python_code_with_tests, ExecutionResult # Assuming it's in the same 'core' package
 
 
8
 
9
  class EvaluationResultOutput: # Renamed to avoid conflict with safe_executor.ExecutionResult
10
  def __init__(self, combined_score=0, llm_critique_text="", execution_details: ExecutionResult = None, raw_llm_response=None):
 
1
  # algoforge_prime/core/evaluation_engine.py
2
  import random
3
+
4
+ from prompts.prompt_templates import format_critique_user_prompt
 
5
  # Import our (simulated) safe executor
6
+ from core.llm_clients import call_huggingface_api, call_gemini_api, LLMResponse # Absolute
7
+ from prompts.system_prompts import get_system_prompt #
8
+ from safe_executor import execute_python_code_with_tests, ExecutionResult # Assuming it's in the same 'core' package
9
 
10
  class EvaluationResultOutput: # Renamed to avoid conflict with safe_executor.ExecutionResult
11
  def __init__(self, combined_score=0, llm_critique_text="", execution_details: ExecutionResult = None, raw_llm_response=None):