24Arys11's picture
bugfixing; fixed toolbox; isolated [Base|AI|Human]Message crap logic to the agent interface; implemented tests
e4f6727
raw
history blame
3.84 kB
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage, AIMessage
import logging
import os
import re
from typing import List
from args import Args, AgentPreset
from llm_factory import LLMFactory
class IAgent():
def __init__(self, sys_prompt_filename, agent_preset: AgentPreset, tools: List = [], parallel_tool_calls=False):
self.name = self._format_name(sys_prompt_filename)
self.interface = agent_preset.get_interface()
self.mock = (agent_preset.get_model_name() == "groot")
# Load the system prompt from a file
system_prompt_path = os.path.join(os.getcwd(), "system_prompts", sys_prompt_filename)
self.system_prompt = ""
with open(system_prompt_path, "r") as file:
self.system_prompt = file.read().strip()
# Define LLM
llm = LLMFactory.create(agent_preset)
# Add tools
if tools:
self.model = llm.bind_tools(tools, parallel_tool_calls=parallel_tool_calls)
else:
self.model = llm
@staticmethod
def _format_name(sys_prompt_filename: str) -> str:
# Remove file extension
name_without_ext = os.path.splitext(sys_prompt_filename)[0]
# Remove numbers and special characters from the beginning
cleaned_name = re.sub(r'^[^a-zA-Z]+', '', name_without_ext)
return cleaned_name
@staticmethod
def _bake_roles(messages: List[str]) -> List[AnyMessage]:
"""
Assigns roles to messages in reverse order: last message is HumanMessage,
previous is AIMessage, and so on, alternating backwards.
Args:
messages (List[str]): List of message strings.
Returns:
List[AnyMessage]: List of messages wrapped with appropriate role classes.
Raises:
ValueError: If messages is empty.
"""
if not messages:
raise ValueError("The list of messages cannot be empty !")
messages_with_roles = []
total_messages = len(messages)
for idx, msg in enumerate(messages):
# Assign roles in reverse: last is Human, previous is AI, etc.
reverse_idx = total_messages - idx - 1
if reverse_idx % 2 == 0:
messages_with_roles.append(HumanMessage(content=msg))
else:
messages_with_roles.append(AIMessage(content=msg))
return messages_with_roles
def get_system_prompt(self) -> str:
"""
Retrieves the system prompt.
Returns:
str: The system prompt string.
"""
return self.system_prompt
def query(self, messages: List[str]) -> str:
"""
Asynchronously queries the agent with a given question and returns the response.
Args:
question (str): The question to be sent to the agent.
Returns:
str: The response from the agent as a string.
"""
if Args.LOGGER is None:
raise RuntimeError("LOGGER must be defined before querying the agent.")
separator = "=============================="
Args.LOGGER.log(logging.INFO, f"\n{separator}\nAgent '{self.name}' has been queried !\nINPUT:\n{messages}\n")
if self.mock:
response = str("I am GROOT !")
Args.LOGGER.log(logging.INFO, f"\nAgent '{self.name}' produced OUTPUT:\n{response}\n{separator}\n")
return response
system_prompt = self.get_system_prompt()
messages_with_roles = self._bake_roles(messages)
conversation = [SystemMessage(content=system_prompt)] + messages_with_roles
response = str(self.model.invoke(conversation).content)
Args.LOGGER.log(logging.INFO, f"\nAgent '{self.name}' produced OUTPUT:\n{response}\n{separator}\n")
return response