StoryVerseWeaver / core /generation_engine.py
mgbam's picture
Update core/generation_engine.py
d686fd7 verified
raw
history blame
1.9 kB
# algoforge_prime/core/generation_engine.py
from .llm_clients import call_huggingface_api, call_gemini_api, LLMResponse
from ..prompts.system_prompts import get_system_prompt
from ..prompts.prompt_templates import format_genesis_user_prompt
def generate_initial_solutions(
problem_description,
initial_hints,
problem_type,
num_solutions_to_generate,
llm_client_config # Dict: {"type": ..., "model_id": ..., "temp": ..., "max_tokens": ...}
):
solutions = []
system_p_genesis = get_system_prompt("genesis", problem_type)
for i in range(num_solutions_to_generate):
user_p_genesis = format_genesis_user_prompt(
problem_description, initial_hints, i + 1, num_solutions_to_generate
)
llm_response_obj = None
if llm_client_config["type"] == "hf":
llm_response_obj = call_huggingface_api(
user_p_genesis, llm_client_config["model_id"],
temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
system_prompt_text=system_p_genesis
)
elif llm_client_config["type"] == "google_gemini":
llm_response_obj = call_gemini_api(
user_p_genesis, llm_client_config["model_id"],
temperature=llm_client_config["temp"], max_new_tokens=llm_client_config["max_tokens"],
system_prompt_text=system_p_genesis
)
if llm_response_obj and llm_response_obj.success:
solutions.append(llm_response_obj.text)
elif llm_response_obj: # Error occurred
solutions.append(f"ERROR (Genesis Attempt {i+1}): {llm_response_obj.error}")
else: # Should not happen if LLMResponse always returned
solutions.append(f"ERROR (Genesis Attempt {i+1}): Unknown error during LLM call.")
return solutions