mgbam commited on
Commit
3fd2bb1
·
verified ·
1 Parent(s): b967045

Update core/generation_engine.py

Browse files
Files changed (1) hide show
  1. core/generation_engine.py +22 -12
core/generation_engine.py CHANGED
@@ -1,24 +1,33 @@
1
  # algoforge_prime/core/generation_engine.py
2
  from .llm_clients import call_huggingface_api, call_gemini_api, LLMResponse
3
- from ..prompts.system_prompts import get_system_prompt
4
  from ..prompts.prompt_templates import format_genesis_user_prompt
5
 
6
  def generate_initial_solutions(
7
  problem_description,
8
  initial_hints,
9
- problem_type,
10
  num_solutions_to_generate,
11
  llm_client_config # Dict: {"type": ..., "model_id": ..., "temp": ..., "max_tokens": ...}
12
  ):
13
- solutions = []
14
- system_p_genesis = get_system_prompt("genesis", problem_type)
 
 
 
 
 
 
 
 
 
15
 
16
  for i in range(num_solutions_to_generate):
17
  user_p_genesis = format_genesis_user_prompt(
18
  problem_description, initial_hints, i + 1, num_solutions_to_generate
19
  )
20
 
21
- llm_response_obj = None
22
  if llm_client_config["type"] == "hf":
23
  llm_response_obj = call_huggingface_api(
24
  user_p_genesis, llm_client_config["model_id"],
@@ -31,12 +40,13 @@ def generate_initial_solutions(
31
  temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
32
  system_prompt_text=system_p_genesis
33
  )
 
 
 
34
 
35
- if llm_response_obj and llm_response_obj.success:
36
- solutions.append(llm_response_obj.text)
37
- elif llm_response_obj: # Error occurred
38
- solutions.append(f"ERROR (Genesis Attempt {i+1}): {llm_response_obj.error}")
39
- else: # Should not happen if LLMResponse always returned
40
- solutions.append(f"ERROR (Genesis Attempt {i+1}): Unknown error during LLM call.")
41
 
42
- return solutions
 
1
  # algoforge_prime/core/generation_engine.py
2
  from .llm_clients import call_huggingface_api, call_gemini_api, LLMResponse
3
+ from ..prompts.system_prompts import get_system_prompt # Relative import from parent
4
  from ..prompts.prompt_templates import format_genesis_user_prompt
5
 
6
  def generate_initial_solutions(
7
  problem_description,
8
  initial_hints,
9
+ problem_type, # e.g., "Python Algorithm with Tests"
10
  num_solutions_to_generate,
11
  llm_client_config # Dict: {"type": ..., "model_id": ..., "temp": ..., "max_tokens": ...}
12
  ):
13
+ """
14
+ Generates a list of initial solution strings using the configured LLM.
15
+ Returns a list of strings, where each string is either a solution or an error message.
16
+ """
17
+ solutions_or_errors = []
18
+ # Select system prompt based on problem type, more specific for Python
19
+ system_p_key = "genesis_general"
20
+ if "python" in problem_type.lower():
21
+ system_p_key = "genesis_python"
22
+ system_p_genesis = get_system_prompt(system_p_key)
23
+
24
 
25
  for i in range(num_solutions_to_generate):
26
  user_p_genesis = format_genesis_user_prompt(
27
  problem_description, initial_hints, i + 1, num_solutions_to_generate
28
  )
29
 
30
+ llm_response_obj = None # type: LLMResponse
31
  if llm_client_config["type"] == "hf":
32
  llm_response_obj = call_huggingface_api(
33
  user_p_genesis, llm_client_config["model_id"],
 
40
  temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
41
  system_prompt_text=system_p_genesis
42
  )
43
+ else:
44
+ solutions_or_errors.append(f"ERROR (Genesis Attempt {i+1}): Unknown LLM client type '{llm_client_config['type']}'")
45
+ continue
46
 
47
+ if llm_response_obj.success:
48
+ solutions_or_errors.append(llm_response_obj.text)
49
+ else:
50
+ solutions_or_errors.append(f"ERROR (Genesis Attempt {i+1} with {llm_response_obj.model_id_used}): {llm_response_obj.error}")
 
 
51
 
52
+ return solutions_or_errors