mgbam commited on
Commit
4e61147
·
verified ·
1 Parent(s): 32333bf

Update core/generation_engine.py

Browse files
Files changed (1) hide show
  1. core/generation_engine.py +7 -14
core/generation_engine.py CHANGED
@@ -1,8 +1,7 @@
1
  # algoforge_prime/core/generation_engine.py
2
- from core.llm_clients import call_huggingface_api, call_gemini_api, LLMResponse
3
- from prompts.system_prompts import get_system_prompt # Absolute import from project root
4
- from prompts.prompt_templates import format_genesis_user_prompt
5
-
6
 
7
  def generate_initial_solutions(
8
  problem_description,
@@ -11,32 +10,26 @@ def generate_initial_solutions(
11
  num_solutions_to_generate,
12
  llm_client_config # Dict: {"type": ..., "model_id": ..., "temp": ..., "max_tokens": ...}
13
  ):
14
- """
15
- Generates a list of initial solution strings using the configured LLM.
16
- Returns a list of strings, where each string is either a solution or an error message.
17
- """
18
  solutions_or_errors = []
19
- # Select system prompt based on problem type, more specific for Python
20
  system_p_key = "genesis_general"
21
  if "python" in problem_type.lower():
22
  system_p_key = "genesis_python"
23
- system_p_genesis = get_system_prompt(system_p_key)
24
-
25
 
26
  for i in range(num_solutions_to_generate):
27
- user_p_genesis = format_genesis_user_prompt(
28
  problem_description, initial_hints, i + 1, num_solutions_to_generate
29
  )
30
 
31
  llm_response_obj = None # type: LLMResponse
32
  if llm_client_config["type"] == "hf":
33
- llm_response_obj = call_huggingface_api(
34
  user_p_genesis, llm_client_config["model_id"],
35
  temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
36
  system_prompt_text=system_p_genesis
37
  )
38
  elif llm_client_config["type"] == "google_gemini":
39
- llm_response_obj = call_gemini_api(
40
  user_p_genesis, llm_client_config["model_id"],
41
  temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
42
  system_prompt_text=system_p_genesis
 
1
  # algoforge_prime/core/generation_engine.py
2
+ from core.llm_clients import call_huggingface_api, call_gemini_api, LLMResponse # Changed to absolute
3
+ from prompts.system_prompts import get_system_prompt # Changed to absolute
4
+ from prompts.prompt_templates import format_genesis_user_prompt # Changed to absolute
 
5
 
6
  def generate_initial_solutions(
7
  problem_description,
 
10
  num_solutions_to_generate,
11
  llm_client_config # Dict: {"type": ..., "model_id": ..., "temp": ..., "max_tokens": ...}
12
  ):
 
 
 
 
13
  solutions_or_errors = []
 
14
  system_p_key = "genesis_general"
15
  if "python" in problem_type.lower():
16
  system_p_key = "genesis_python"
17
+ system_p_genesis = get_system_prompt(system_p_key) # Uses the imported function
 
18
 
19
  for i in range(num_solutions_to_generate):
20
+ user_p_genesis = format_genesis_user_prompt( # Uses the imported function
21
  problem_description, initial_hints, i + 1, num_solutions_to_generate
22
  )
23
 
24
  llm_response_obj = None # type: LLMResponse
25
  if llm_client_config["type"] == "hf":
26
+ llm_response_obj = call_huggingface_api( # Uses the imported function
27
  user_p_genesis, llm_client_config["model_id"],
28
  temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
29
  system_prompt_text=system_p_genesis
30
  )
31
  elif llm_client_config["type"] == "google_gemini":
32
+ llm_response_obj = call_gemini_api( # Uses the imported function
33
  user_p_genesis, llm_client_config["model_id"],
34
  temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
35
  system_prompt_text=system_p_genesis