24Arys11's picture
added final_answer agent; improved reasoner; fixed minor issues; tweaked the prompts;
58afc3a
from enum import Enum
from typing import Optional
from logger import Logger
TEST_MODE = False
class LLMInterface(Enum):
OPENAI = "OpenAI"
HUGGINGFACE = "HuggingFace"
# Add your own if you like (then adjust the LLMFactory)
class AgentPreset:
def __init__(self, interface: LLMInterface, model_name: str, temperature: Optional[float] = None,
max_tokens: Optional[int] = None, repeat_penalty: Optional[float] = None):
"""
Initialize an AgentPreset with LLM configuration parameters.
Args:
interface: The model interface to use (e.g., OPENAI, HUGGINGFACE)
model_name: Name of the model to use
temperature: Controls randomness in responses (0.0-1.0)
max_tokens: Maximum number of tokens to generate in response
repeat_penalty: Penalty for token repetition
"""
self.interface = interface
self.model_name = model_name
self.temperature = temperature
self.max_tokens = max_tokens
self.repeat_penalty = repeat_penalty
def get_interface(self) -> LLMInterface:
"""
Get the model interface.
Returns:
LLMInterface: The interface used for this agent.
"""
return self.interface
def get_model_name(self) -> str:
"""
Get the model name.
Returns:
str: The name of the model.
"""
return self.model_name
def get_temperature(self) -> float | None:
"""
Get the temperature setting.
Returns:
float: The temperature value controlling randomness.
"""
return self.temperature
def get_max_tokens(self) -> int | None:
"""
Get the maximum tokens setting.
Returns:
int: The maximum number of tokens for generation.
"""
return self.max_tokens
def get_repeat_penalty(self) -> float | None:
"""
Get the repeat penalty setting.
Returns:
float: The penalty value for token repetition.
"""
return self.repeat_penalty
class Args:
LOGGER = Logger.set_logger()
primary_llm_interface=LLMInterface.OPENAI
# secondary_llm_interface=LLMInterface.HUGGINGFACE
vlm_interface=LLMInterface.OPENAI
primary_model="groot" if TEST_MODE else "qwen/qwen3-30b-a3b"
secondary_model="groot" if TEST_MODE else "qwen2.5-7b-instruct-1m"
vision_model="groot" if TEST_MODE else "qwen/qwen2.5-vl-7b"
api_base="http://127.0.0.1:1234/v1" # LM Studio local endpoint
api_key="api_key"
token = "" # Not needed when using OpenAILike API
# Agent presets
PRIMARY_AGENT_PRESET = AgentPreset(
primary_llm_interface, primary_model,
temperature = None, max_tokens = 1500, repeat_penalty = None
)
SECONDARY_AGENT_PRESET = AgentPreset(
primary_llm_interface, secondary_model,
temperature = None, max_tokens = 1500, repeat_penalty = None
)
VISION_AGENT_PRESET = AgentPreset(
vlm_interface, vision_model,
temperature = None, max_tokens = 1500, repeat_penalty = None
)
class AppParams:
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
MOCK_SUBMISSION = True
QUESTIONS_LIMIT = 3 # Use 0 for no limit !
class AlfredParams:
# Maximum number of interactions between Manager and Solver
MAX_INTERACTIONS = 6
# Maximum number of interactions between Solver and it's assistants
MAX_SOLVING_EFFORT = 6
# Verification happening every few messages to see whether the manager agent got stuck
AUDIT_INTERVAL = 3
class MiscParams:
NO_THINK = True