Spaces:
Sleeping
Sleeping
from llama_index.core.agent.workflow import AgentWorkflow | |
from llama_index.core.tools import FunctionTool | |
from llama_index.core.workflow import Context | |
import asyncio | |
import os | |
from llm_factory import LLMFactory | |
from toolbox import Toolbox | |
from args import Args | |
class Summarizer: | |
def __init__(self, temperature, max_tokens): | |
# Load the system prompt from a file | |
system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "04_summarizer.txt") | |
self.system_prompt = "" | |
with open(system_prompt_path, "r") as file: | |
self.system_prompt = file.read().strip() | |
# Define the LLM and agent | |
llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens) | |
self.agent = AgentWorkflow.setup_agent(llm=llm) | |
self.ctx = Context(self.agent) | |
def get_system_prompt(self) -> str: | |
""" | |
Retrieves the system prompt. | |
Returns: | |
str: The system prompt string. | |
""" | |
return self.system_prompt | |
async def query(self, question: str) -> str: | |
""" | |
Asynchronously queries the agent with a given question and returns the response. | |
Args: | |
question (str): The question to be sent to the agent. | |
Returns: | |
str: The response from the agent as a string. | |
""" | |
response = await self.agent.run(question, ctx=self.ctx) | |
response = str(response) | |
return response | |
def clear_context(self): | |
""" | |
Clears the current context of the agent, resetting any conversation history. | |
This is useful when starting a new conversation or when the context needs to be refreshed. | |
""" | |
self.ctx = Context(self.agent) | |
class Researcher: | |
def __init__(self, temperature, max_tokens): | |
# Load the system prompt from a file | |
system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "05_researcher.txt") | |
self.system_prompt = "" | |
with open(system_prompt_path, "r") as file: | |
self.system_prompt = file.read().strip() | |
# Define the LLM and agent | |
llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens) | |
self.agent = AgentWorkflow.from_tools_or_functions( | |
Toolbox.web_search.duck_duck_go_tools, | |
llm=llm | |
) | |
self.ctx = Context(self.agent) | |
def get_system_prompt(self) -> str: | |
""" | |
Retrieves the system prompt. | |
Returns: | |
str: The system prompt string. | |
""" | |
return self.system_prompt | |
async def query(self, question: str) -> str: | |
""" | |
Asynchronously queries the agent with a given question and returns the response. | |
Args: | |
question (str): The question to be sent to the agent. | |
Returns: | |
str: The response from the agent as a string. | |
""" | |
response = await self.agent.run(question, ctx=self.ctx) | |
response = str(response) | |
return response | |
def clear_context(self): | |
""" | |
Clears the current context of the agent, resetting any conversation history. | |
This is useful when starting a new conversation or when the context needs to be refreshed. | |
""" | |
self.ctx = Context(self.agent) | |
class EncryptionExpert: | |
def __init__(self, temperature, max_tokens): | |
# Load the system prompt from a file | |
system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "06_encryption_expert.txt") | |
self.system_prompt = "" | |
with open(system_prompt_path, "r") as file: | |
self.system_prompt = file.read().strip() | |
# Define the LLM and agent | |
llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens) | |
self.agent = AgentWorkflow.from_tools_or_functions( | |
[ | |
Toolbox.encryption.base64_encode, | |
Toolbox.encryption.base64_decode, | |
Toolbox.encryption.caesar_cipher_encode, | |
Toolbox.encryption.caesar_cipher_decode, | |
Toolbox.encryption.reverse_string | |
# TODO: Add more encryption tools | |
], | |
llm=llm | |
) | |
self.ctx = Context(self.agent) | |
# Initialize the tool agents | |
self.math_expert = MathExpert(temperature, max_tokens) | |
self.reasoner = Reasoner(temperature, max_tokens) | |
def get_system_prompt(self) -> str: | |
""" | |
Retrieves the system prompt. | |
Returns: | |
str: The system prompt string. | |
""" | |
return self.system_prompt | |
async def query(self, question: str) -> str: | |
""" | |
Asynchronously queries the agent with a given question and returns the response. | |
Args: | |
question (str): The question to be sent to the agent. | |
Returns: | |
str: The response from the agent as a string. | |
""" | |
response = await self.agent.run(question, ctx=self.ctx) | |
response = str(response) | |
return response | |
def clear_context(self): | |
""" | |
Clears the current context of the agent, resetting any conversation history. | |
This is useful when starting a new conversation or when the context needs to be refreshed. | |
Also clears the context of any tool agents. | |
""" | |
self.ctx = Context(self.agent) | |
# Clear context for tool agents | |
self.math_expert.clear_context() | |
self.reasoner.clear_context() | |
class MathExpert: | |
def __init__(self, temperature, max_tokens): | |
# Load the system prompt from a file | |
system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "07_math_expert.txt") | |
self.system_prompt = "" | |
with open(system_prompt_path, "r") as file: | |
self.system_prompt = file.read().strip() | |
# Define the LLM and agent | |
llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens) | |
self.agent = AgentWorkflow.from_tools_or_functions( | |
[ | |
Toolbox.math.symbolic_calc, | |
Toolbox.math.unit_converter, | |
], | |
llm=llm | |
) | |
self.ctx = Context(self.agent) | |
# Initialize the tool agents | |
self.reasoner = Reasoner(temperature, max_tokens) | |
def get_system_prompt(self) -> str: | |
""" | |
Retrieves the system prompt. | |
Returns: | |
str: The system prompt string. | |
""" | |
return self.system_prompt | |
async def query(self, question: str) -> str: | |
""" | |
Asynchronously queries the agent with a given question and returns the response. | |
Args: | |
question (str): The question to be sent to the agent. | |
Returns: | |
str: The response from the agent as a string. | |
""" | |
response = await self.agent.run(question, ctx=self.ctx) | |
response = str(response) | |
return response | |
def clear_context(self): | |
""" | |
Clears the current context of the agent, resetting any conversation history. | |
This is useful when starting a new conversation or when the context needs to be refreshed. | |
Also clears the context of any tool agents. | |
""" | |
self.ctx = Context(self.agent) | |
self.reasoner.clear_context() | |
class Reasoner: | |
def __init__(self, temperature, max_tokens): | |
# Load the system prompt from a file | |
system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "08_reasoner.txt") | |
self.system_prompt = "" | |
with open(system_prompt_path, "r") as file: | |
self.system_prompt = file.read().strip() | |
# Define the LLM and agent | |
llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens) | |
self.agent = AgentWorkflow.setup_agent(llm=llm) | |
self.ctx = Context(self.agent) | |
async def query(self, question: str) -> str: | |
""" | |
Asynchronously queries the agent with a given question and returns the response. | |
Args: | |
question (str): The question to be sent to the agent. | |
Returns: | |
str: The response from the agent as a string. | |
""" | |
response = await self.agent.run(question, ctx=self.ctx) | |
response = str(response) | |
return response | |
def get_system_prompt(self) -> str: | |
""" | |
Retrieves the system prompt. | |
Returns: | |
str: The system prompt string. | |
""" | |
return self.system_prompt | |
def clear_context(self): | |
""" | |
Clears the current context of the agent, resetting any conversation history. | |
This is useful when starting a new conversation or when the context needs to be refreshed. | |
""" | |
self.ctx = Context(self.agent) | |
class ImageHandler: | |
def __init__(self, temperature, max_tokens): | |
# Load the system prompt from a file | |
system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "09_image_handler.txt") | |
self.system_prompt = "" | |
with open(system_prompt_path, "r") as file: | |
self.system_prompt = file.read().strip() | |
pass | |
def get_system_prompt(self) -> str: | |
""" | |
Retrieves the system prompt. | |
Returns: | |
str: The system prompt string. | |
""" | |
return self.system_prompt | |
def clear_context(self): | |
""" | |
Clears the current context of the agent, resetting any conversation history. | |
This is useful when starting a new conversation or when the context needs to be refreshed. | |
""" | |
if hasattr(self, 'ctx') and hasattr(self, 'agent'): | |
self.ctx = Context(self.agent) | |
class VideoHandler: | |
def __init__(self, temperature, max_tokens): | |
# Load the system prompt from a file | |
system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "10_video_handler.txt") | |
self.system_prompt = "" | |
with open(system_prompt_path, "r") as file: | |
self.system_prompt = file.read().strip() | |
# No implementation yet | |
pass | |
def get_system_prompt(self) -> str: | |
""" | |
Retrieves the system prompt. | |
Returns: | |
str: The system prompt string. | |
""" | |
return self.system_prompt | |
def clear_context(self): | |
""" | |
Clears the current context of the agent, resetting any conversation history. | |
This is useful when starting a new conversation or when the context needs to be refreshed. | |
""" | |
if hasattr(self, 'ctx') and hasattr(self, 'agent'): | |
self.ctx = Context(self.agent) | |
class Solver: | |
def __init__(self, temperature, max_tokens): | |
# Load the system prompt from a file | |
system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "03_solver.txt") | |
self.system_prompt = "" | |
with open(system_prompt_path, "r") as file: | |
self.system_prompt = file.read().strip() | |
# Define the LLM and agent | |
llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens) | |
self.agent = AgentWorkflow.from_tools_or_functions( | |
[ | |
self.call_summarizer, | |
self.call_researcher, | |
self.call_encryption_expert, | |
self.call_math_expert, | |
self.call_reasoner, | |
self.call_image_handler, | |
self.call_video_handler | |
], | |
llm=llm | |
) | |
self.ctx = Context(self.agent) | |
# Initialize the tool agents | |
self.summarizer = Summarizer(temperature, max_tokens) | |
self.researcher = Researcher(temperature, max_tokens) | |
self.encryption_expert = EncryptionExpert(temperature, max_tokens) | |
self.math_expert = MathExpert(temperature, max_tokens) | |
self.reasoner = Reasoner(temperature, max_tokens) | |
self.image_handler = ImageHandler(temperature, max_tokens) | |
self.video_handler = VideoHandler(temperature, max_tokens) | |
def get_system_prompt(self) -> str: | |
""" | |
Retrieves the system prompt. | |
Returns: | |
str: The system prompt string. | |
""" | |
return self.system_prompt | |
async def query(self, question: str) -> str: | |
""" | |
Asynchronously queries the agent with a given question and returns the response. | |
Args: | |
question (str): The question to be sent to the agent. | |
Returns: | |
str: The response from the agent as a string. | |
""" | |
response = await self.agent.run(question, ctx=self.ctx) | |
response = str(response) | |
return response | |
def clear_context(self): | |
""" | |
Clears the current context of the agent, resetting any conversation history. | |
This is useful when starting a new conversation or when the context needs to be refreshed. | |
Also clears the context of all tool agents. | |
""" | |
self.ctx = Context(self.agent) | |
# Clear context for all tool agents | |
self.summarizer.clear_context() | |
self.researcher.clear_context() | |
self.encryption_expert.clear_context() | |
self.math_expert.clear_context() | |
self.reasoner.clear_context() | |
self.image_handler.clear_context() | |
self.video_handler.clear_context() | |
async def call_summarizer(self, question: str) -> str: | |
return await self.summarizer.query(question) | |
async def call_researcher(self, question: str) -> str: | |
return await self.researcher.query(question) | |
async def call_encryption_expert(self, question: str) -> str: | |
return await self.encryption_expert.query(question) | |
async def call_math_expert(self, question: str) -> str: | |
return await self.math_expert.query(question) | |
async def call_reasoner(self, question: str) -> str: | |
return await self.reasoner.query(question) | |
async def call_image_handler(self, question: str) -> str: | |
# ImageHandler may not have a query method yet, but following the pattern | |
if hasattr(self.image_handler, 'query'): | |
return await self.image_handler.query(question) | |
return "Image handling is not implemented yet." | |
# TODO | |
async def call_video_handler(self, question: str) -> str: | |
# VideoHandler may not have a query method yet, but following the pattern | |
if hasattr(self.video_handler, 'query'): | |
return await self.video_handler.query(question) | |
return "Video handling is not implemented yet." | |
# TODO | |
# if __name__ == "__main__": | |
# pass | |