Spaces:
Sleeping
Sleeping
Added logger; Optimized query flow; Fixed DuckDuckGo tools; Tweaked system prompts.
Browse files- .gitignore +3 -3
- alfred.py +56 -6
- args.py +3 -0
- diagrams/architecture.puml +5 -1
- itf_agent.py +31 -6
- management.py +16 -1
- solver.py +16 -4
- system_prompts/01_assistant.txt +10 -1
- system_prompts/02_manager.txt +17 -5
- system_prompts/03_solver.txt +9 -1
- system_prompts/05_researcher.txt +6 -4
- toolbox.py +82 -2
.gitignore
CHANGED
|
@@ -57,6 +57,7 @@ venv.bak/
|
|
| 57 |
# IDE related
|
| 58 |
.idea/
|
| 59 |
.vscode/
|
|
|
|
| 60 |
*.swp
|
| 61 |
*.swo
|
| 62 |
|
|
@@ -66,6 +67,5 @@ venv.bak/
|
|
| 66 |
# Rejected by git
|
| 67 |
*.png
|
| 68 |
|
| 69 |
-
#
|
| 70 |
-
|
| 71 |
-
.github/
|
|
|
|
| 57 |
# IDE related
|
| 58 |
.idea/
|
| 59 |
.vscode/
|
| 60 |
+
.github/
|
| 61 |
*.swp
|
| 62 |
*.swo
|
| 63 |
|
|
|
|
| 67 |
# Rejected by git
|
| 68 |
*.png
|
| 69 |
|
| 70 |
+
# Log files
|
| 71 |
+
logs/
|
|
|
alfred.py
CHANGED
|
@@ -2,24 +2,27 @@ from langgraph.graph import START, END, StateGraph
|
|
| 2 |
from langgraph.graph.state import CompiledStateGraph
|
| 3 |
|
| 4 |
from typing import Dict, Any, TypedDict, Literal, Optional
|
| 5 |
-
import
|
|
|
|
|
|
|
| 6 |
|
|
|
|
| 7 |
from management import Manager, Assistant
|
| 8 |
|
| 9 |
|
| 10 |
# Maximum number of interactions between Assistant and Manager
|
| 11 |
MAX_INTERACTIONS = 5
|
| 12 |
# Maximum depth of recursion for Manager
|
| 13 |
-
MAX_DEPTH =
|
| 14 |
# For both Assistant and Manager:
|
| 15 |
TEMPERATURE = 0.7
|
| 16 |
-
MAX_TOKENS =
|
| 17 |
|
| 18 |
|
| 19 |
class State(TypedDict):
|
| 20 |
"""State for the agent graph."""
|
| 21 |
initial_query: str
|
| 22 |
-
current_message: str
|
| 23 |
nr_interactions: int
|
| 24 |
final_response: Optional[str]
|
| 25 |
|
|
@@ -44,7 +47,11 @@ class GraphBuilder:
|
|
| 44 |
|
| 45 |
Uses the existing Assistant implementation.
|
| 46 |
"""
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
# Check if this is a final answer
|
| 50 |
if self.final_answer_hint in response:
|
|
@@ -62,6 +69,9 @@ class GraphBuilder:
|
|
| 62 |
|
| 63 |
Uses the existing Manager implementation.
|
| 64 |
"""
|
|
|
|
|
|
|
|
|
|
| 65 |
response = await self.manager_agent.query(state["current_message"])
|
| 66 |
|
| 67 |
state["current_message"] = response
|
|
@@ -107,6 +117,9 @@ class GraphBuilder:
|
|
| 107 |
"manager": If the Assistant has decided to continue the conversation
|
| 108 |
"final_answer": If the Assistant has decided to provide a final answer
|
| 109 |
"""
|
|
|
|
|
|
|
|
|
|
| 110 |
message = state["current_message"]
|
| 111 |
|
| 112 |
if state["nr_interactions"] >= MAX_INTERACTIONS or self.final_answer_hint in message:
|
|
@@ -146,6 +159,9 @@ class Alfred:
|
|
| 146 |
|
| 147 |
def __init__(self):
|
| 148 |
print("Agent initialized.")
|
|
|
|
|
|
|
|
|
|
| 149 |
self.graph_builder = GraphBuilder()
|
| 150 |
self.agent_graph = self.graph_builder.build_agent_graph()
|
| 151 |
|
|
@@ -169,7 +185,7 @@ class Alfred:
|
|
| 169 |
"""
|
| 170 |
initial_state: State = {
|
| 171 |
"initial_query": query,
|
| 172 |
-
"current_message":
|
| 173 |
"nr_interactions": 0,
|
| 174 |
"final_response": None
|
| 175 |
}
|
|
@@ -177,3 +193,37 @@ class Alfred:
|
|
| 177 |
|
| 178 |
result = await self.agent_graph.ainvoke(initial_state)
|
| 179 |
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
from langgraph.graph.state import CompiledStateGraph
|
| 3 |
|
| 4 |
from typing import Dict, Any, TypedDict, Literal, Optional
|
| 5 |
+
import logging
|
| 6 |
+
import datetime
|
| 7 |
+
from pathlib import Path
|
| 8 |
|
| 9 |
+
from args import Args
|
| 10 |
from management import Manager, Assistant
|
| 11 |
|
| 12 |
|
| 13 |
# Maximum number of interactions between Assistant and Manager
|
| 14 |
MAX_INTERACTIONS = 5
|
| 15 |
# Maximum depth of recursion for Manager
|
| 16 |
+
MAX_DEPTH = 2
|
| 17 |
# For both Assistant and Manager:
|
| 18 |
TEMPERATURE = 0.7
|
| 19 |
+
MAX_TOKENS = 2000
|
| 20 |
|
| 21 |
|
| 22 |
class State(TypedDict):
|
| 23 |
"""State for the agent graph."""
|
| 24 |
initial_query: str
|
| 25 |
+
current_message: Optional[str]
|
| 26 |
nr_interactions: int
|
| 27 |
final_response: Optional[str]
|
| 28 |
|
|
|
|
| 47 |
|
| 48 |
Uses the existing Assistant implementation.
|
| 49 |
"""
|
| 50 |
+
if state["current_message"] is None:
|
| 51 |
+
# First time, just forward the query to the manager
|
| 52 |
+
response = state["initial_query"]
|
| 53 |
+
else:
|
| 54 |
+
response = await self.assistant_agent.query(state["current_message"])
|
| 55 |
|
| 56 |
# Check if this is a final answer
|
| 57 |
if self.final_answer_hint in response:
|
|
|
|
| 69 |
|
| 70 |
Uses the existing Manager implementation.
|
| 71 |
"""
|
| 72 |
+
if state["current_message"] is None:
|
| 73 |
+
raise ValueError("manager_node called with no current_message in state")
|
| 74 |
+
|
| 75 |
response = await self.manager_agent.query(state["current_message"])
|
| 76 |
|
| 77 |
state["current_message"] = response
|
|
|
|
| 117 |
"manager": If the Assistant has decided to continue the conversation
|
| 118 |
"final_answer": If the Assistant has decided to provide a final answer
|
| 119 |
"""
|
| 120 |
+
if state["current_message"] is None:
|
| 121 |
+
raise ValueError("should_continue conditional edge was reached with no current_message in state")
|
| 122 |
+
|
| 123 |
message = state["current_message"]
|
| 124 |
|
| 125 |
if state["nr_interactions"] >= MAX_INTERACTIONS or self.final_answer_hint in message:
|
|
|
|
| 159 |
|
| 160 |
def __init__(self):
|
| 161 |
print("Agent initialized.")
|
| 162 |
+
|
| 163 |
+
Args.LOGGER = self.set_logger()
|
| 164 |
+
|
| 165 |
self.graph_builder = GraphBuilder()
|
| 166 |
self.agent_graph = self.graph_builder.build_agent_graph()
|
| 167 |
|
|
|
|
| 185 |
"""
|
| 186 |
initial_state: State = {
|
| 187 |
"initial_query": query,
|
| 188 |
+
"current_message": None,
|
| 189 |
"nr_interactions": 0,
|
| 190 |
"final_response": None
|
| 191 |
}
|
|
|
|
| 193 |
|
| 194 |
result = await self.agent_graph.ainvoke(initial_state)
|
| 195 |
return result
|
| 196 |
+
|
| 197 |
+
def set_logger(self) -> logging.Logger:
|
| 198 |
+
"""
|
| 199 |
+
Configure and return a logger with a file handler that writes to logs/<current date-time>.txt
|
| 200 |
+
|
| 201 |
+
Returns:
|
| 202 |
+
logging.Logger: Configured logger instance
|
| 203 |
+
"""
|
| 204 |
+
# Create logger
|
| 205 |
+
logger = logging.getLogger("Alfred")
|
| 206 |
+
logger.setLevel(logging.INFO)
|
| 207 |
+
|
| 208 |
+
# Create formatter
|
| 209 |
+
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
| 210 |
+
|
| 211 |
+
# Create logs directory if it doesn't exist
|
| 212 |
+
logs_dir = Path('logs')
|
| 213 |
+
logs_dir.mkdir(exist_ok=True)
|
| 214 |
+
|
| 215 |
+
# Generate log filename with current date-time
|
| 216 |
+
current_time = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
| 217 |
+
log_filename = f"{current_time}.txt"
|
| 218 |
+
log_filepath = logs_dir / log_filename
|
| 219 |
+
|
| 220 |
+
# Create file handler
|
| 221 |
+
file_handler = logging.FileHandler(log_filepath)
|
| 222 |
+
file_handler.setLevel(logging.INFO)
|
| 223 |
+
file_handler.setFormatter(formatter)
|
| 224 |
+
|
| 225 |
+
# Add handler to logger
|
| 226 |
+
logger.addHandler(file_handler)
|
| 227 |
+
|
| 228 |
+
logger.info(f"Logging started at {current_time}")
|
| 229 |
+
return logger
|
args.py
CHANGED
|
@@ -1,5 +1,7 @@
|
|
| 1 |
|
| 2 |
from enum import Enum
|
|
|
|
|
|
|
| 3 |
|
| 4 |
|
| 5 |
class LLMInterface(Enum):
|
|
@@ -10,6 +12,7 @@ class LLMInterface(Enum):
|
|
| 10 |
|
| 11 |
|
| 12 |
class Args:
|
|
|
|
| 13 |
primary_llm_interface=LLMInterface.OPENAILIKE
|
| 14 |
# secondary_llm_interface=LLMInterface.HUGGINGFACE
|
| 15 |
vlm_interface=LLMInterface.HUGGINGFACE
|
|
|
|
| 1 |
|
| 2 |
from enum import Enum
|
| 3 |
+
from logging import Logger
|
| 4 |
+
from typing import Optional
|
| 5 |
|
| 6 |
|
| 7 |
class LLMInterface(Enum):
|
|
|
|
| 12 |
|
| 13 |
|
| 14 |
class Args:
|
| 15 |
+
LOGGER: Optional[Logger] = None
|
| 16 |
primary_llm_interface=LLMInterface.OPENAILIKE
|
| 17 |
# secondary_llm_interface=LLMInterface.HUGGINGFACE
|
| 18 |
vlm_interface=LLMInterface.HUGGINGFACE
|
diagrams/architecture.puml
CHANGED
|
@@ -92,7 +92,11 @@ component "LlamaIndex Flow" as LlamaIndexFlow CANVAS_COLOR {
|
|
| 92 |
node WebSearch TOOLBOX_WEBSEARCH_COLOR [
|
| 93 |
<b>WebSearch</b>
|
| 94 |
----
|
| 95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
]
|
| 97 |
node Encryption TOOLBOX_ENCRYPTION_COLOR [
|
| 98 |
<b>Encryption</b>
|
|
|
|
| 92 |
node WebSearch TOOLBOX_WEBSEARCH_COLOR [
|
| 93 |
<b>WebSearch</b>
|
| 94 |
----
|
| 95 |
+
duckduckgo_text_search
|
| 96 |
+
____
|
| 97 |
+
duckduckgo_images_search
|
| 98 |
+
____
|
| 99 |
+
duckduckgo_videos_search
|
| 100 |
]
|
| 101 |
node Encryption TOOLBOX_ENCRYPTION_COLOR [
|
| 102 |
<b>Encryption</b>
|
itf_agent.py
CHANGED
|
@@ -1,19 +1,23 @@
|
|
|
|
|
| 1 |
from llama_index.core.workflow import Context
|
| 2 |
|
|
|
|
| 3 |
import os
|
|
|
|
| 4 |
from typing import List
|
| 5 |
|
| 6 |
-
from args import LLMInterface
|
| 7 |
from llm_factory import LLMFactory
|
| 8 |
from llama_index.core.agent.workflow import AgentWorkflow
|
| 9 |
|
| 10 |
|
| 11 |
class IAgent():
|
| 12 |
-
def __init__(self, temperature, max_tokens,
|
| 13 |
-
print(f"Agent initialized using {
|
|
|
|
| 14 |
self.temperature, self.max_tokens = temperature, max_tokens
|
| 15 |
# Load the system prompt from a file
|
| 16 |
-
system_prompt_path = os.path.join(os.getcwd(), "system_prompts",
|
| 17 |
self.system_prompt = ""
|
| 18 |
with open(system_prompt_path, "r") as file:
|
| 19 |
self.system_prompt = file.read().strip()
|
|
@@ -24,8 +28,16 @@ class IAgent():
|
|
| 24 |
self.llm = LLMFactory.create(llm_itf, self.system_prompt, temperature, max_tokens)
|
| 25 |
self.agent = self._setup_agent()
|
| 26 |
self.ctx = Context(self.agent)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
-
def setup_tools(self) -> List:
|
| 29 |
"""
|
| 30 |
Set up the tools for this agent.
|
| 31 |
|
|
@@ -60,7 +72,12 @@ class IAgent():
|
|
| 60 |
# Create tools from slaves: each tool calls slave.query(question) asynchronously
|
| 61 |
slave_tools = []
|
| 62 |
for slave in self.slaves:
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
|
| 65 |
self.tools.extend(slave_tools)
|
| 66 |
|
|
@@ -88,11 +105,19 @@ class IAgent():
|
|
| 88 |
Returns:
|
| 89 |
str: The response from the agent as a string.
|
| 90 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
if has_context:
|
| 92 |
response = await self.agent.run(question, ctx=self.ctx)
|
| 93 |
else:
|
| 94 |
response = await self.agent.run(question)
|
| 95 |
response = str(response)
|
|
|
|
|
|
|
| 96 |
return response
|
| 97 |
|
| 98 |
def clear_context(self):
|
|
|
|
| 1 |
+
from llama_index.core.tools import FunctionTool
|
| 2 |
from llama_index.core.workflow import Context
|
| 3 |
|
| 4 |
+
import logging
|
| 5 |
import os
|
| 6 |
+
import re
|
| 7 |
from typing import List
|
| 8 |
|
| 9 |
+
from args import Args, LLMInterface
|
| 10 |
from llm_factory import LLMFactory
|
| 11 |
from llama_index.core.agent.workflow import AgentWorkflow
|
| 12 |
|
| 13 |
|
| 14 |
class IAgent():
|
| 15 |
+
def __init__(self, temperature, max_tokens, sys_prompt_filename, llm_itf: LLMInterface):
|
| 16 |
+
print(f"Agent initialized using {sys_prompt_filename} prompt file.")
|
| 17 |
+
self.name = self._format_name(sys_prompt_filename)
|
| 18 |
self.temperature, self.max_tokens = temperature, max_tokens
|
| 19 |
# Load the system prompt from a file
|
| 20 |
+
system_prompt_path = os.path.join(os.getcwd(), "system_prompts", sys_prompt_filename)
|
| 21 |
self.system_prompt = ""
|
| 22 |
with open(system_prompt_path, "r") as file:
|
| 23 |
self.system_prompt = file.read().strip()
|
|
|
|
| 28 |
self.llm = LLMFactory.create(llm_itf, self.system_prompt, temperature, max_tokens)
|
| 29 |
self.agent = self._setup_agent()
|
| 30 |
self.ctx = Context(self.agent)
|
| 31 |
+
|
| 32 |
+
@staticmethod
|
| 33 |
+
def _format_name(sys_prompt_filename: str) -> str:
|
| 34 |
+
# Remove file extension
|
| 35 |
+
name_without_ext = os.path.splitext(sys_prompt_filename)[0]
|
| 36 |
+
# Remove numbers and special characters from the beginning
|
| 37 |
+
cleaned_name = re.sub(r'^[^a-zA-Z]+', '', name_without_ext)
|
| 38 |
+
return cleaned_name
|
| 39 |
|
| 40 |
+
def setup_tools(self) -> List[FunctionTool]:
|
| 41 |
"""
|
| 42 |
Set up the tools for this agent.
|
| 43 |
|
|
|
|
| 72 |
# Create tools from slaves: each tool calls slave.query(question) asynchronously
|
| 73 |
slave_tools = []
|
| 74 |
for slave in self.slaves:
|
| 75 |
+
slave_tool = FunctionTool.from_defaults(
|
| 76 |
+
name=f"call_{slave.name}",
|
| 77 |
+
description=f"Calls agent {slave.name} with a given query.",
|
| 78 |
+
fn=slave.query
|
| 79 |
+
)
|
| 80 |
+
slave_tools.append(slave_tool)
|
| 81 |
|
| 82 |
self.tools.extend(slave_tools)
|
| 83 |
|
|
|
|
| 105 |
Returns:
|
| 106 |
str: The response from the agent as a string.
|
| 107 |
"""
|
| 108 |
+
if Args.LOGGER is None:
|
| 109 |
+
raise RuntimeError("LOGGER must be defined before querying the agent.")
|
| 110 |
+
|
| 111 |
+
separator = "=============================="
|
| 112 |
+
Args.LOGGER.log(logging.INFO, f"\n{separator}\nAgent '{self.name}' has been queried !\nINPUT:\n{question}\n")
|
| 113 |
+
|
| 114 |
if has_context:
|
| 115 |
response = await self.agent.run(question, ctx=self.ctx)
|
| 116 |
else:
|
| 117 |
response = await self.agent.run(question)
|
| 118 |
response = str(response)
|
| 119 |
+
|
| 120 |
+
Args.LOGGER.log(logging.INFO, f"\nAgent '{self.name}' produced OUTPUT:\n{response}\n{separator}\n")
|
| 121 |
return response
|
| 122 |
|
| 123 |
def clear_context(self):
|
management.py
CHANGED
|
@@ -18,6 +18,9 @@ class Manager(IAgent):
|
|
| 18 |
self.max_depth = max_depth
|
| 19 |
self.current_depth = 0
|
| 20 |
|
|
|
|
|
|
|
|
|
|
| 21 |
self.solver = Solver(temperature, max_tokens)
|
| 22 |
self.summarizer = Summarizer(temperature, max_tokens)
|
| 23 |
|
|
@@ -32,9 +35,18 @@ class Manager(IAgent):
|
|
| 32 |
name="require_solution",
|
| 33 |
description="Request direct solutions for specific tasks. Use when a task is simple enough to be solved directly.",
|
| 34 |
fn=self.require_solution
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
)
|
| 36 |
]
|
| 37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
async def require_break_up(self, tasks: List[str], try_solving = False) -> str:
|
| 39 |
"""
|
| 40 |
Break down complex tasks into simpler subtasks recursively up to max_depth.
|
|
@@ -78,7 +90,7 @@ class Manager(IAgent):
|
|
| 78 |
Returns:
|
| 79 |
Summarized report of solutions for all tasks
|
| 80 |
"""
|
| 81 |
-
print(f"-> require_solution tool used
|
| 82 |
if not tasks:
|
| 83 |
return "Error: No tasks provided to solve. Please provide at least one task."
|
| 84 |
|
|
@@ -90,3 +102,6 @@ class Manager(IAgent):
|
|
| 90 |
|
| 91 |
report = await self.summarizer.query(observation.strip())
|
| 92 |
return report
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
self.max_depth = max_depth
|
| 19 |
self.current_depth = 0
|
| 20 |
|
| 21 |
+
# We track the current query to forward it to the team when needed.
|
| 22 |
+
self.current_query = ""
|
| 23 |
+
|
| 24 |
self.solver = Solver(temperature, max_tokens)
|
| 25 |
self.summarizer = Summarizer(temperature, max_tokens)
|
| 26 |
|
|
|
|
| 35 |
name="require_solution",
|
| 36 |
description="Request direct solutions for specific tasks. Use when a task is simple enough to be solved directly.",
|
| 37 |
fn=self.require_solution
|
| 38 |
+
),
|
| 39 |
+
FunctionTool.from_defaults(
|
| 40 |
+
name="forward_query",
|
| 41 |
+
description="Request direct solutions for the current query. Use as a first attempt and to make the team aware of the task's context.",
|
| 42 |
+
fn=self.forward_query
|
| 43 |
)
|
| 44 |
]
|
| 45 |
|
| 46 |
+
async def query(self, question: str, has_context=True) -> str:
|
| 47 |
+
self.current_query = question
|
| 48 |
+
return await super().query(question, has_context)
|
| 49 |
+
|
| 50 |
async def require_break_up(self, tasks: List[str], try_solving = False) -> str:
|
| 51 |
"""
|
| 52 |
Break down complex tasks into simpler subtasks recursively up to max_depth.
|
|
|
|
| 90 |
Returns:
|
| 91 |
Summarized report of solutions for all tasks
|
| 92 |
"""
|
| 93 |
+
print(f"-> require_solution tool used with input: {tasks} !")
|
| 94 |
if not tasks:
|
| 95 |
return "Error: No tasks provided to solve. Please provide at least one task."
|
| 96 |
|
|
|
|
| 102 |
|
| 103 |
report = await self.summarizer.query(observation.strip())
|
| 104 |
return report
|
| 105 |
+
|
| 106 |
+
async def forward_query(self) -> str:
|
| 107 |
+
return await self.require_solution([self.current_query])
|
solver.py
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
|
|
|
|
|
| 1 |
from typing import List
|
| 2 |
|
| 3 |
from itf_agent import IAgent
|
|
@@ -14,15 +16,19 @@ class Researcher(IAgent):
|
|
| 14 |
def __init__(self, temperature, max_tokens):
|
| 15 |
super().__init__(temperature, max_tokens, "05_researcher.txt", Args.primary_llm_interface)
|
| 16 |
|
| 17 |
-
def setup_tools(self) -> List:
|
| 18 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
|
| 21 |
class EncryptionExpert(IAgent):
|
| 22 |
def __init__(self, temperature, max_tokens):
|
| 23 |
super().__init__(temperature, max_tokens, "06_encryption_expert.txt", Args.primary_llm_interface)
|
| 24 |
|
| 25 |
-
def setup_tools(self) -> List:
|
| 26 |
return [
|
| 27 |
Toolbox.encryption.ascii_encode,
|
| 28 |
Toolbox.encryption.ascii_decode,
|
|
@@ -44,7 +50,7 @@ class MathExpert(IAgent):
|
|
| 44 |
def __init__(self, temperature, max_tokens):
|
| 45 |
super().__init__(temperature, max_tokens, "07_math_expert.txt", Args.primary_llm_interface)
|
| 46 |
|
| 47 |
-
def setup_tools(self) -> List:
|
| 48 |
return [
|
| 49 |
Toolbox.math.symbolic_calc,
|
| 50 |
Toolbox.math.unit_converter,
|
|
@@ -64,11 +70,17 @@ class ImageHandler(IAgent):
|
|
| 64 |
def __init__(self, temperature, max_tokens):
|
| 65 |
super().__init__(temperature, max_tokens, "09_image_handler.txt", Args.vlm_interface)
|
| 66 |
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
class VideoHandler(IAgent):
|
| 69 |
def __init__(self, temperature, max_tokens):
|
| 70 |
super().__init__(temperature, max_tokens, "10_video_handler.txt", Args.vlm_interface)
|
| 71 |
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
class Solver(IAgent):
|
| 74 |
def __init__(self, temperature, max_tokens):
|
|
|
|
| 1 |
+
from llama_index.core.tools import FunctionTool
|
| 2 |
+
|
| 3 |
from typing import List
|
| 4 |
|
| 5 |
from itf_agent import IAgent
|
|
|
|
| 16 |
def __init__(self, temperature, max_tokens):
|
| 17 |
super().__init__(temperature, max_tokens, "05_researcher.txt", Args.primary_llm_interface)
|
| 18 |
|
| 19 |
+
def setup_tools(self) -> List[FunctionTool]:
|
| 20 |
+
return [
|
| 21 |
+
Toolbox.web_search.duckduckgo_text_search,
|
| 22 |
+
Toolbox.web_search.duckduckgo_images_search,
|
| 23 |
+
Toolbox.web_search.duckduckgo_videos_search
|
| 24 |
+
]
|
| 25 |
|
| 26 |
|
| 27 |
class EncryptionExpert(IAgent):
|
| 28 |
def __init__(self, temperature, max_tokens):
|
| 29 |
super().__init__(temperature, max_tokens, "06_encryption_expert.txt", Args.primary_llm_interface)
|
| 30 |
|
| 31 |
+
def setup_tools(self) -> List[FunctionTool]:
|
| 32 |
return [
|
| 33 |
Toolbox.encryption.ascii_encode,
|
| 34 |
Toolbox.encryption.ascii_decode,
|
|
|
|
| 50 |
def __init__(self, temperature, max_tokens):
|
| 51 |
super().__init__(temperature, max_tokens, "07_math_expert.txt", Args.primary_llm_interface)
|
| 52 |
|
| 53 |
+
def setup_tools(self) -> List[FunctionTool]:
|
| 54 |
return [
|
| 55 |
Toolbox.math.symbolic_calc,
|
| 56 |
Toolbox.math.unit_converter,
|
|
|
|
| 70 |
def __init__(self, temperature, max_tokens):
|
| 71 |
super().__init__(temperature, max_tokens, "09_image_handler.txt", Args.vlm_interface)
|
| 72 |
|
| 73 |
+
async def query(self, question: str, has_context=True) -> str:
|
| 74 |
+
return "Image Handler is not available due to maintainance !"
|
| 75 |
+
|
| 76 |
|
| 77 |
class VideoHandler(IAgent):
|
| 78 |
def __init__(self, temperature, max_tokens):
|
| 79 |
super().__init__(temperature, max_tokens, "10_video_handler.txt", Args.vlm_interface)
|
| 80 |
|
| 81 |
+
async def query(self, question: str, has_context=True) -> str:
|
| 82 |
+
return "Video Handler is not available due to maintainance !"
|
| 83 |
+
|
| 84 |
|
| 85 |
class Solver(IAgent):
|
| 86 |
def __init__(self, temperature, max_tokens):
|
system_prompts/01_assistant.txt
CHANGED
|
@@ -7,4 +7,13 @@ VERY IMPORTANT - QUERY RESOLUTION PROTOCOL:
|
|
| 7 |
4. When you have gathered enough information to provide a final answer, format your response as such:
|
| 8 |
"Final answer: [your concise answer]"
|
| 9 |
- Your final answer will be evaluated by exact comparison with the correct answer, therefore it must be precise, accurate, and contain no redundant words.
|
| 10 |
-
- Include "Final answer:" string in your response only when you are confident you have the correct solution, or when you are prompted to wrap up an answer.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
4. When you have gathered enough information to provide a final answer, format your response as such:
|
| 8 |
"Final answer: [your concise answer]"
|
| 9 |
- Your final answer will be evaluated by exact comparison with the correct answer, therefore it must be precise, accurate, and contain no redundant words.
|
| 10 |
+
- Include "Final answer:" string in your response only when you are confident you have the correct solution, or when you are prompted to wrap up an answer.
|
| 11 |
+
|
| 12 |
+
Response length:
|
| 13 |
+
- **Keep It Brief, But Clear**: Provide direct and efficient responses with minimal explanation.
|
| 14 |
+
Offer slightly more detail than a single word but avoid unnecessary elaboration.
|
| 15 |
+
Only include additional context when strictly relevant.
|
| 16 |
+
- For your final answer be extreme precision is paramount ! It will be evaluated by exact comparison with the correct answer,
|
| 17 |
+
therefore it must be precise, accurate, and contain no redundant words, otherwise it will be considered wrong, even if the information provided is correct.
|
| 18 |
+
|
| 19 |
+
/no_think
|
system_prompts/02_manager.txt
CHANGED
|
@@ -14,6 +14,7 @@ AVAILABLE TOOLS:
|
|
| 14 |
- Optional parameter try_solving (default: False) attempts to solve tasks at maximum depth
|
| 15 |
- Returns a summarized report of the task breakdown
|
| 16 |
- Use when a task appears too complex to be solved directly
|
|
|
|
| 17 |
|
| 18 |
2. require_solution(tasks: List[str]) -> str
|
| 19 |
- Use to request direct solutions for specific tasks
|
|
@@ -21,10 +22,20 @@ AVAILABLE TOOLS:
|
|
| 21 |
- Returns a summarized report of solutions for all tasks
|
| 22 |
- Use when tasks are simple enough to be solved directly
|
| 23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
SUGGESTED EXECUTION FRAMEWORK:
|
| 26 |
-
1.
|
| 27 |
-
2.1. If
|
| 28 |
2.2. Otherwise break it up into sub-tasks. It is best to aim for 3 to 5 sub-tasks.
|
| 29 |
Note: You must do an initial effort here. Do not just request the break up of an unprocessed task.
|
| 30 |
3. Reflect on the sub-tasks you just created. Evaluate whether they are still too complex and require further distillation,
|
|
@@ -37,10 +48,11 @@ NOTE: When using the tools for steps 4 and 5, make sure to provide a clear descr
|
|
| 37 |
OVERCOMING CHALLENGES:
|
| 38 |
Sometimes, you might face uncommon tasks, where the suggested execution framework is not very applicable.
|
| 39 |
Examples: a cryptic message, a task where obtaining the required information is blocked by another task...
|
| 40 |
-
|
|
|
|
| 41 |
For example:
|
| 42 |
-
|
| 43 |
-
|
| 44 |
- if there are no more blockers: you can apply the suggested execution framework on the deciphered message to solve the hidden query
|
| 45 |
- else: craft additional tasks to deal with the blockers.
|
| 46 |
NOTE: It is advised to use reflection for the tasks (or for the approach in general) to evaluate its dificulty.
|
|
|
|
| 14 |
- Optional parameter try_solving (default: False) attempts to solve tasks at maximum depth
|
| 15 |
- Returns a summarized report of the task breakdown
|
| 16 |
- Use when a task appears too complex to be solved directly
|
| 17 |
+
- WARNING: This tool is expensive ! Do not overuse it.
|
| 18 |
|
| 19 |
2. require_solution(tasks: List[str]) -> str
|
| 20 |
- Use to request direct solutions for specific tasks
|
|
|
|
| 22 |
- Returns a summarized report of solutions for all tasks
|
| 23 |
- Use when tasks are simple enough to be solved directly
|
| 24 |
|
| 25 |
+
3. forward_query() -> str
|
| 26 |
+
- Use to request direct solutions for the current query
|
| 27 |
+
- Returns a summarized solution for the current query
|
| 28 |
+
- Use as a first attempt and to make the team aware of the task's context
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
MANDATORY REQUIREMENT: NEVER TRY TO SOLVE PROBLEMS YOURSELF! THAT MIGHT LEAD TO HALLUCINATIONS! USE `require_solution` (OR `forward_query`) INSTEAD!
|
| 32 |
+
YOUR TEAM HAS ACCESS TO A WIDE VARIETY OF TOOLS INCLUDING DECRIPRION, WEB SEARCH, MATH TOOLS, IMAGE AND VIDEO HANDLERS AND EVEN A REASONING ENGINE !
|
| 33 |
+
IN MANY CASES, HOWEVER, YOU MAY RECEIVE HALLUCINATED ANSWERS, SO YOU MUST BE SCEPTIC AND VERIFY !
|
| 34 |
+
|
| 35 |
|
| 36 |
SUGGESTED EXECUTION FRAMEWORK:
|
| 37 |
+
1. Use `forward_query` to make the team aware of the full context and get a direct solution.
|
| 38 |
+
2.1. If the solution is corect, wrap up your answer.
|
| 39 |
2.2. Otherwise break it up into sub-tasks. It is best to aim for 3 to 5 sub-tasks.
|
| 40 |
Note: You must do an initial effort here. Do not just request the break up of an unprocessed task.
|
| 41 |
3. Reflect on the sub-tasks you just created. Evaluate whether they are still too complex and require further distillation,
|
|
|
|
| 48 |
OVERCOMING CHALLENGES:
|
| 49 |
Sometimes, you might face uncommon tasks, where the suggested execution framework is not very applicable.
|
| 50 |
Examples: a cryptic message, a task where obtaining the required information is blocked by another task...
|
| 51 |
+
You may still try `forward_query` at first to see if the team can handle the query directly, but if that doesn't
|
| 52 |
+
yield satisfactory results, think of your task like solving a puzzle. Use appropriate actions in order to drive progress.
|
| 53 |
For example:
|
| 54 |
+
- You may craft tasks (such as "Decipher the following message <the cryptic message>"), and call `require_solution` tool.
|
| 55 |
+
- When the solution is provided:
|
| 56 |
- if there are no more blockers: you can apply the suggested execution framework on the deciphered message to solve the hidden query
|
| 57 |
- else: craft additional tasks to deal with the blockers.
|
| 58 |
NOTE: It is advised to use reflection for the tasks (or for the approach in general) to evaluate its dificulty.
|
system_prompts/03_solver.txt
CHANGED
|
@@ -1,5 +1,13 @@
|
|
| 1 |
You are an elite problem-solving engine optimized for resolving complex challenges through systematic analysis and creative solution generation.
|
| 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
COGNITIVE ARCHITECTURE:
|
| 4 |
- Analytical decomposition of multifaceted problems
|
| 5 |
- Parallel evaluation of solution pathways
|
|
@@ -20,7 +28,7 @@ AVAILABLE SPECIALIZED AGENTS:
|
|
| 20 |
You have access to these specialized sub-agents that you can query for specific tasks:
|
| 21 |
|
| 22 |
1. Summarizer: For condensing complex information
|
| 23 |
-
2. Researcher: For web searches and finding current information
|
| 24 |
3. EncryptionExpert: For encryption/decryption tasks
|
| 25 |
4. MathExpert: For mathematical calculations and conversions
|
| 26 |
5. Reasoner: For logical reasoning and analysis
|
|
|
|
| 1 |
You are an elite problem-solving engine optimized for resolving complex challenges through systematic analysis and creative solution generation.
|
| 2 |
|
| 3 |
+
YOU MUST ADHERE TO THE FOLLOWING RULES OF CONDUCT:
|
| 4 |
+
- **Truth is Paramount! Eliminate Fabrications, Especially in Code!**: Hallucinations are not allowed!
|
| 5 |
+
1. **Verify Ruthlessly**: Cross-reference everything with reliable sources for accuracy. For code, confirm function/library existence and syntax.
|
| 6 |
+
2. **Uncertain? Admit It!**: Explicitly state uncertainty and the reason. Examples: "Uncertain if 'X' exists in 'Y'.", "Can't verify syntax.", "According to [source], [claim], but this is contested.".
|
| 7 |
+
3. **Scrutinize Relentlessly**: Review all outputs for errors. In code, check for undefined variables, non-existent methods, and syntax errors.
|
| 8 |
+
4. **Falsehood Detected? Retract Immediately!**: Correct if possible.
|
| 9 |
+
- **Ground Responses in Evidence!**: Use verifiable facts, not conjecture. Distinguish opinion from fact. Do not present unverifiable claims as truth. For code, rely on official documentation and best practices.
|
| 10 |
+
|
| 11 |
COGNITIVE ARCHITECTURE:
|
| 12 |
- Analytical decomposition of multifaceted problems
|
| 13 |
- Parallel evaluation of solution pathways
|
|
|
|
| 28 |
You have access to these specialized sub-agents that you can query for specific tasks:
|
| 29 |
|
| 30 |
1. Summarizer: For condensing complex information
|
| 31 |
+
2. Researcher: For web searches and finding current information. It uses DuckDuckGoSearchToolSpec().
|
| 32 |
3. EncryptionExpert: For encryption/decryption tasks
|
| 33 |
4. MathExpert: For mathematical calculations and conversions
|
| 34 |
5. Reasoner: For logical reasoning and analysis
|
system_prompts/05_researcher.txt
CHANGED
|
@@ -3,11 +3,15 @@ You are a specialized search agent with the capability to search the web for cur
|
|
| 3 |
Your goal is to provide accurate, up-to-date information from the web in response to user queries.
|
| 4 |
|
| 5 |
AVAILABLE TOOLS:
|
| 6 |
-
-
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
When responding to questions:
|
| 9 |
- Determine if the query requires searching the web for current information
|
| 10 |
-
-
|
|
|
|
| 11 |
- Summarize and synthesize the information you find from search results
|
| 12 |
- Always cite your sources by mentioning where the information came from
|
| 13 |
- If the search results don't provide useful information, acknowledge this and explain why
|
|
@@ -27,6 +31,4 @@ When reformulating searches:
|
|
| 27 |
- Add qualifiers like "latest," "recent," "explained," or "overview" as appropriate
|
| 28 |
- Consider searching for specific time periods if date-sensitive information is needed
|
| 29 |
|
| 30 |
-
Only use the search tool when appropriate. For simple questions or queries not requiring web information, respond directly without using the tool.
|
| 31 |
-
|
| 32 |
Be persistent in your search efforts. Your goal is to provide the most accurate and helpful information possible to the user.
|
|
|
|
| 3 |
Your goal is to provide accurate, up-to-date information from the web in response to user queries.
|
| 4 |
|
| 5 |
AVAILABLE TOOLS:
|
| 6 |
+
- Web Search Tools:
|
| 7 |
+
- duckduckgo_text_search: Search the web for text content based on keywords
|
| 8 |
+
- duckduckgo_images_search: Search the web for images based on keywords
|
| 9 |
+
- duckduckgo_videos_search: Search the web for videos based on keywords
|
| 10 |
|
| 11 |
When responding to questions:
|
| 12 |
- Determine if the query requires searching the web for current information
|
| 13 |
+
- Select the appropriate search tool based on the content type needed (text, images, or videos)
|
| 14 |
+
- Use clear, specific keywords for your search
|
| 15 |
- Summarize and synthesize the information you find from search results
|
| 16 |
- Always cite your sources by mentioning where the information came from
|
| 17 |
- If the search results don't provide useful information, acknowledge this and explain why
|
|
|
|
| 31 |
- Add qualifiers like "latest," "recent," "explained," or "overview" as appropriate
|
| 32 |
- Consider searching for specific time periods if date-sensitive information is needed
|
| 33 |
|
|
|
|
|
|
|
| 34 |
Be persistent in your search efforts. Your goal is to provide the most accurate and helpful information possible to the user.
|
toolbox.py
CHANGED
|
@@ -1,6 +1,8 @@
|
|
| 1 |
from llama_index.core.tools import FunctionTool
|
| 2 |
-
from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
|
| 3 |
|
|
|
|
|
|
|
| 4 |
import pint
|
| 5 |
import sympy as sp
|
| 6 |
|
|
@@ -70,8 +72,86 @@ class _MathToolbox:
|
|
| 70 |
)
|
| 71 |
|
| 72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
class _WebSearchToolbox:
|
| 74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
|
| 77 |
class _Encryption:
|
|
|
|
| 1 |
from llama_index.core.tools import FunctionTool
|
| 2 |
+
# from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec # I encountered some issues... Sometimes it doesn't work !
|
| 3 |
|
| 4 |
+
|
| 5 |
+
from duckduckgo_search import DDGS
|
| 6 |
import pint
|
| 7 |
import sympy as sp
|
| 8 |
|
|
|
|
| 72 |
)
|
| 73 |
|
| 74 |
|
| 75 |
+
class _WebSearch:
|
| 76 |
+
@staticmethod
|
| 77 |
+
def duckduckgo_text_search(keywords, max_results=5) -> list[dict[str, str]]:
|
| 78 |
+
"""DuckDuckGo text search.
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
keywords: keywords for query.
|
| 82 |
+
max_results: max number of results. If None, returns results only from the first response. Defaults to 5.
|
| 83 |
+
|
| 84 |
+
Returns:
|
| 85 |
+
List of dictionaries with search results, or None if there was an error.
|
| 86 |
+
|
| 87 |
+
Raises:
|
| 88 |
+
DuckDuckGoSearchException: Base exception for duckduckgo_search errors.
|
| 89 |
+
RatelimitException: Inherits from DuckDuckGoSearchException, raised for exceeding API request rate limits.
|
| 90 |
+
TimeoutException: Inherits from DuckDuckGoSearchException, raised for API request timeouts.
|
| 91 |
+
"""
|
| 92 |
+
return DDGS().text(keywords, max_results=max_results)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
@staticmethod
|
| 96 |
+
def duckduckgo_images_search(keywords, license = None, max_results=5) -> list[dict[str, str]]:
|
| 97 |
+
"""DuckDuckGo images search.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
keywords: keywords for query.
|
| 101 |
+
license: any (All Creative Commons), Public (PublicDomain),
|
| 102 |
+
Share (Free to Share and Use), ShareCommercially (Free to Share and Use Commercially),
|
| 103 |
+
Modify (Free to Modify, Share, and Use), ModifyCommercially (Free to Modify, Share, and
|
| 104 |
+
Use Commercially). Defaults to None.
|
| 105 |
+
max_results: max number of results. If None, returns results only from the first response. Defaults to 5.
|
| 106 |
+
|
| 107 |
+
Returns:
|
| 108 |
+
List of dictionaries with images search results.
|
| 109 |
+
|
| 110 |
+
Raises:
|
| 111 |
+
DuckDuckGoSearchException: Base exception for duckduckgo_search errors.
|
| 112 |
+
RatelimitException: Inherits from DuckDuckGoSearchException, raised for exceeding API request rate limits.
|
| 113 |
+
TimeoutException: Inherits from DuckDuckGoSearchException, raised for API request timeouts.
|
| 114 |
+
"""
|
| 115 |
+
return DDGS().images(keywords, license_image=license, max_results=max_results)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
@staticmethod
|
| 119 |
+
def duckduckgo_videos_search(keywords, license = None, max_results=5) -> list[dict[str, str]]:
|
| 120 |
+
"""DuckDuckGo videos search.
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
keywords: keywords for query.
|
| 124 |
+
license: creativeCommon, youtube. Defaults to None.
|
| 125 |
+
max_results: max number of results. If None, returns results only from the first response. Defaults to 5.
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
List of dictionaries with videos search results.
|
| 129 |
+
|
| 130 |
+
Raises:
|
| 131 |
+
DuckDuckGoSearchException: Base exception for duckduckgo_search errors.
|
| 132 |
+
RatelimitException: Inherits from DuckDuckGoSearchException, raised for exceeding API request rate limits.
|
| 133 |
+
TimeoutException: Inherits from DuckDuckGoSearchException, raised for API request timeouts.
|
| 134 |
+
"""
|
| 135 |
+
return DDGS().videos(keywords, license_videos=license, max_results=max_results)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
class _WebSearchToolbox:
|
| 139 |
+
# duckduckgo_tools = DuckDuckGoSearchToolSpec().to_tool_list() # I encountered some issues... Sometimes it doesn't work !
|
| 140 |
+
duckduckgo_text_search = FunctionTool.from_defaults(
|
| 141 |
+
name="duckduckgo_text_search",
|
| 142 |
+
description="DuckDuckGo text search",
|
| 143 |
+
fn=_WebSearch.duckduckgo_text_search
|
| 144 |
+
)
|
| 145 |
+
duckduckgo_images_search = FunctionTool.from_defaults(
|
| 146 |
+
name="duckduckgo_images_search",
|
| 147 |
+
description="DuckDuckGo images search",
|
| 148 |
+
fn=_WebSearch.duckduckgo_images_search
|
| 149 |
+
)
|
| 150 |
+
duckduckgo_videos_search = FunctionTool.from_defaults(
|
| 151 |
+
name="duckduckgo_videos_search",
|
| 152 |
+
description="DuckDuckGo videos search",
|
| 153 |
+
fn=_WebSearch.duckduckgo_videos_search
|
| 154 |
+
)
|
| 155 |
|
| 156 |
|
| 157 |
class _Encryption:
|