Spaces:
Sleeping
Sleeping
| from llama_index.core.agent.workflow import AgentWorkflow | |
| from llama_index.core.tools import FunctionTool | |
| from llama_index.core.workflow import Context | |
| import asyncio | |
| import os | |
| from llm_factory import LLMFactory | |
| from toolbox import Toolbox | |
| from args import Args | |
| class Solver: | |
| def __init__(self, temperature, max_tokens): | |
| system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "06_math_expert.txt") | |
| self.system_prompt = "" | |
| with open(system_prompt_path, "r") as file: | |
| self.system_prompt = file.read().strip() | |
| llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens) | |
| self.agent = AgentWorkflow.from_tools_or_functions( | |
| [ | |
| Toolbox.math.symbolic_calc, | |
| Toolbox.math.unit_converter, | |
| ], | |
| llm=llm | |
| ) | |
| self.ctx = Context(self.agent) | |
| def get_system_prompt(self): | |
| return self.system_prompt | |
| async def query(self, question: str) -> str: | |
| response = await self.agent.run(question, ctx=self.ctx) | |
| response = str(response) | |
| return response | |
| def clear_context(self): | |
| """ | |
| Clears the current context of the agent, resetting any conversation history. | |
| This is useful when starting a new conversation or when the context needs to be refreshed. | |
| """ | |
| self.ctx = Context(self.agent) | |
| class Summarizer: | |
| def __init__(self, temperature, max_tokens): | |
| system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "01_assistant.txt") | |
| self.system_prompt = "" | |
| with open(system_prompt_path, "r") as file: | |
| self.system_prompt = file.read().strip() | |
| llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens) | |
| self.agent = AgentWorkflow.setup_agent(llm=llm) | |
| self.ctx = Context(self.agent) | |
| async def query(self, question: str) -> str: | |
| response = await self.agent.run(question, ctx=self.ctx) | |
| response = str(response) | |
| return response | |
| class MathExpert: | |
| def __init__(self, temperature, max_tokens): | |
| system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "06_math_expert.txt") | |
| self.system_prompt = "" | |
| with open(system_prompt_path, "r") as file: | |
| self.system_prompt = file.read().strip() | |
| llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens) | |
| self.agent = AgentWorkflow.from_tools_or_functions( | |
| [ | |
| Toolbox.math.symbolic_calc, | |
| Toolbox.math.unit_converter, | |
| ], | |
| llm=llm | |
| ) | |
| self.ctx = Context(self.agent) | |
| def get_system_prompt(self): | |
| return self.system_prompt | |
| async def query(self, question: str) -> str: | |
| response = await self.agent.run(question, ctx=self.ctx) | |
| response = str(response) | |
| return response | |
| def clear_context(self): | |
| """ | |
| Clears the current context of the agent, resetting any conversation history. | |
| This is useful when starting a new conversation or when the context needs to be refreshed. | |
| """ | |
| self.ctx = Context(self.agent) | |
| class Researcher: | |
| def __init__(self, temperature, max_tokens): | |
| system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "04_researcher.txt") | |
| self.system_prompt = "" | |
| with open(system_prompt_path, "r") as file: | |
| self.system_prompt = file.read().strip() | |
| llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens) | |
| self.agent = AgentWorkflow.from_tools_or_functions( | |
| Toolbox.web_search.duck_duck_go_tools, | |
| llm=llm | |
| ) | |
| self.ctx = Context(self.agent) | |
| def get_system_prompt(self): | |
| return self.system_prompt | |
| async def query(self, question: str) -> str: | |
| response = await self.agent.run(question, ctx=self.ctx) | |
| response = str(response) | |
| return response | |
| class EncryptionExpert: | |
| def __init__(self, temperature, max_tokens): | |
| system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "05_encryption_expert.txt") | |
| self.system_prompt = "" | |
| with open(system_prompt_path, "r") as file: | |
| self.system_prompt = file.read().strip() | |
| llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens) | |
| self.agent = AgentWorkflow.from_tools_or_functions( | |
| [ | |
| Toolbox.encryption.base64_encode, | |
| Toolbox.encryption.base64_decode, | |
| Toolbox.encryption.caesar_cipher_encode, | |
| Toolbox.encryption.caesar_cipher_decode, | |
| Toolbox.encryption.reverse_string | |
| ], | |
| llm=llm | |
| ) | |
| self.ctx = Context(self.agent) | |
| def get_system_prompt(self): | |
| return self.system_prompt | |
| async def query(self, question: str) -> str: | |
| response = await self.agent.run(question, ctx=self.ctx) | |
| response = str(response) | |
| return response | |
| class ImageHandler: | |
| pass | |
| class VideoHandler: | |
| pass | |
| class RecursiveSolverAgent: | |
| pass | |
| class Solver_2: | |
| def __init__(self, temperature, max_tokens): | |
| print("Agent initialized.") | |
| system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "01_assistant.txt") | |
| self.system_prompt = "" | |
| with open(system_prompt_path, "r") as file: | |
| self.system_prompt = file.read().strip() | |
| llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens) | |
| self.agent = AgentWorkflow.from_tools_or_functions( | |
| [ | |
| FunctionTool.from_defaults(self.delegate_to_math_expert), | |
| FunctionTool.from_defaults(self.set_final_answer) | |
| ], | |
| llm=llm | |
| ) | |
| self.ctx = Context(self.agent) | |
| self.final_answer = "" | |
| async def __call__(self, question: str) -> str: | |
| print(f"Agent received question (first 50 chars): {question[:50]}...") | |
| self.final_answer = "" | |
| response = await self.query(question) | |
| print(f"Agent processed the response: {response}") | |
| if self.final_answer == "": | |
| response = await self.query("I noticed the final_answer is an empty string. Have you forgot to set the final_answer ?") | |
| return self.final_answer | |
| def get_system_prompt(self): | |
| return self.system_prompt | |
| async def query(self, question: str) -> str: | |
| response = await self.agent.run(question, ctx=self.ctx) | |
| response = str(response) | |
| final_answer = response | |
| self.set_final_answer(final_answer) | |
| return response | |
| def set_final_answer(self, final_answer: str) -> str: | |
| """ | |
| Sets the final answer for the current querry. | |
| Args: | |
| final_answer (str): The final answer to be set for the agent. | |
| Returns: | |
| str: The final answer that was set. | |
| """ | |
| print("-> set_final_answer !") | |
| self.final_answer = final_answer | |
| def delegate_to_math_expert(self, question: str) -> str: | |
| print("-> delegated to math agent !") | |
| math_agent = MathExpert(temperature=0.7, max_tokens=100) | |
| return math_agent.query(question) | |
| if __name__ == "__main__": | |
| encryption_agent = EncryptionExpert(temperature=0.7, max_tokens=2000) | |
| # encryption_query = "Descifer this: 'Bmfy bfx ymj wjxzqy gjybjjs z-hqzo fsi zsnajwxnyfyjf-hwfntaf ns fuwnq 2025 ?'" | |
| encryption_query = ".rewsna eht sa ""tfel"" drow eht fo etisoppo eht etirw ,ecnetnes siht dnatsrednu uoy fI" | |
| # print(encryption_agent.get_system_prompt()) | |
| # encoding = encryption_agent.caesar_cipher_encode(encryption_query, 5) | |
| # print(encoding) | |
| # print(encryption_agent.caesar_cipher_decode(encoding, 5)) | |
| print(asyncio.run(encryption_agent.query(encryption_query))) | |