Spaces:
Sleeping
Sleeping
embracing langgraph; addapted LLMFactory, Args.LLMInterface, IAgent, Toolbox and agents.py; simplified graph: removed math and encryption agents while promoting the reasoner
Browse files- agents.py +53 -31
- args.py +6 -5
- design.puml +0 -24
- design.yaml +1 -21
- graph.py +7 -48
- graph_builder.py +1 -21
- itf_agent.py +19 -83
- llm_factory.py +103 -47
- requirements.txt +4 -0
- system_prompts/11_output_guard.txt +9 -0
- test.py +2 -118
- toolbox.py +3 -80
agents.py
CHANGED
|
@@ -1,82 +1,104 @@
|
|
| 1 |
from args import Args
|
| 2 |
from itf_agent import IAgent
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
|
| 5 |
class Manager(IAgent):
|
| 6 |
"""
|
| 7 |
Orchestrates the workflow by delegating tasks to specialized nodes and integrating their outputs
|
| 8 |
"""
|
| 9 |
-
def __init__(self
|
| 10 |
-
super().__init__(
|
| 11 |
|
| 12 |
|
| 13 |
class Auditor(IAgent):
|
| 14 |
"""
|
| 15 |
Reviews manager's outputs for accuracy, safety, and quality
|
| 16 |
"""
|
| 17 |
-
def __init__(self
|
| 18 |
-
super().__init__(
|
| 19 |
|
| 20 |
|
| 21 |
class Summarizer(IAgent):
|
| 22 |
"""
|
| 23 |
Generates concise summaries of conversations or passages.
|
| 24 |
"""
|
| 25 |
-
def __init__(self
|
| 26 |
-
super().__init__(
|
| 27 |
|
| 28 |
|
| 29 |
class Solver(IAgent):
|
| 30 |
"""
|
| 31 |
Central problem-solving node that coordinates with specialized experts based on task requirements
|
| 32 |
"""
|
| 33 |
-
def __init__(self
|
| 34 |
-
super().__init__(
|
| 35 |
|
| 36 |
|
| 37 |
class Researcher(IAgent):
|
| 38 |
"""
|
| 39 |
Retrieves and synthesizes information from various sources to answer knowledge-based questions
|
| 40 |
"""
|
| 41 |
-
def __init__(self
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
def __init__(self, temperature, max_tokens):
|
| 50 |
-
super().__init__(temperature, max_tokens, "06_encryption_expert.txt", Args.primary_llm_interface)
|
| 51 |
|
| 52 |
|
| 53 |
-
class
|
| 54 |
"""
|
| 55 |
-
Performs
|
| 56 |
"""
|
| 57 |
-
def __init__(self
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
"""
|
| 63 |
Performs logical reasoning, inference, and step-by-step problem-solving
|
| 64 |
"""
|
| 65 |
-
def __init__(self
|
| 66 |
-
super().__init__(
|
| 67 |
|
| 68 |
|
| 69 |
class ImageHandler(IAgent):
|
| 70 |
"""
|
| 71 |
Processes, analyzes, and generates information related to images
|
| 72 |
"""
|
| 73 |
-
def __init__(self
|
| 74 |
-
super().__init__(
|
| 75 |
|
| 76 |
|
| 77 |
class VideoHandler(IAgent):
|
| 78 |
"""
|
| 79 |
Processes, analyzes, and generates information related to videos
|
| 80 |
"""
|
| 81 |
-
def __init__(self
|
| 82 |
-
super().__init__(
|
|
|
|
| 1 |
from args import Args
|
| 2 |
from itf_agent import IAgent
|
| 3 |
+
from llm_factory import AgentPreset
|
| 4 |
+
from toolbox import Toolbox
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
PRIMARY_AGENT_PRESET = AgentPreset(Args.primary_llm_interface, Args.primary_model,
|
| 8 |
+
temperature = None, max_tokens = 2048, repeat_penalty = None)
|
| 9 |
+
SECONDARY_AGENT_PRESET = AgentPreset(Args.primary_llm_interface, Args.secondary_model,
|
| 10 |
+
temperature = None, max_tokens = 2048, repeat_penalty = None)
|
| 11 |
+
VISION_AGENT_PRESET = AgentPreset(Args.vlm_interface, Args.vision_model,
|
| 12 |
+
temperature = None, max_tokens = 2048, repeat_penalty = None)
|
| 13 |
|
| 14 |
|
| 15 |
class Manager(IAgent):
|
| 16 |
"""
|
| 17 |
Orchestrates the workflow by delegating tasks to specialized nodes and integrating their outputs
|
| 18 |
"""
|
| 19 |
+
def __init__(self):
|
| 20 |
+
super().__init__("01_manager.txt", PRIMARY_AGENT_PRESET)
|
| 21 |
|
| 22 |
|
| 23 |
class Auditor(IAgent):
|
| 24 |
"""
|
| 25 |
Reviews manager's outputs for accuracy, safety, and quality
|
| 26 |
"""
|
| 27 |
+
def __init__(self):
|
| 28 |
+
super().__init__("02_auditor.txt", PRIMARY_AGENT_PRESET)
|
| 29 |
|
| 30 |
|
| 31 |
class Summarizer(IAgent):
|
| 32 |
"""
|
| 33 |
Generates concise summaries of conversations or passages.
|
| 34 |
"""
|
| 35 |
+
def __init__(self):
|
| 36 |
+
super().__init__("04_summarizer.txt", PRIMARY_AGENT_PRESET)
|
| 37 |
|
| 38 |
|
| 39 |
class Solver(IAgent):
|
| 40 |
"""
|
| 41 |
Central problem-solving node that coordinates with specialized experts based on task requirements
|
| 42 |
"""
|
| 43 |
+
def __init__(self):
|
| 44 |
+
super().__init__("03_solver.txt", PRIMARY_AGENT_PRESET)
|
| 45 |
|
| 46 |
|
| 47 |
class Researcher(IAgent):
|
| 48 |
"""
|
| 49 |
Retrieves and synthesizes information from various sources to answer knowledge-based questions
|
| 50 |
"""
|
| 51 |
+
def __init__(self):
|
| 52 |
+
toolbox = Toolbox.web_search
|
| 53 |
+
tools = [
|
| 54 |
+
toolbox.duckduckgo_text_search,
|
| 55 |
+
toolbox.duckduckgo_images_search,
|
| 56 |
+
toolbox.duckduckgo_videos_search
|
| 57 |
+
]
|
| 58 |
+
super().__init__("05_researcher.txt", PRIMARY_AGENT_PRESET, tools)
|
|
|
|
|
|
|
| 59 |
|
| 60 |
|
| 61 |
+
class Reasoner(IAgent):
|
| 62 |
"""
|
| 63 |
+
Performs logical reasoning, inference, and step-by-step problem-solving
|
| 64 |
"""
|
| 65 |
+
def __init__(self):
|
| 66 |
+
math_toolbox = Toolbox.math
|
| 67 |
+
encryption_toolbox = Toolbox.encryption
|
| 68 |
+
tools = [
|
| 69 |
+
math_toolbox.symbolic_calc,
|
| 70 |
+
math_toolbox.unit_converter,
|
| 71 |
+
encryption_toolbox.ascii_decode,
|
| 72 |
+
encryption_toolbox.ascii_encode,
|
| 73 |
+
encryption_toolbox.base64_decode,
|
| 74 |
+
encryption_toolbox.base64_encode,
|
| 75 |
+
encryption_toolbox.caesar_cipher_decode,
|
| 76 |
+
encryption_toolbox.caesar_cipher_encode,
|
| 77 |
+
encryption_toolbox.caesar_cipher_brute_force,
|
| 78 |
+
encryption_toolbox.reverse_string
|
| 79 |
+
]
|
| 80 |
+
super().__init__("08_reasoner.txt", PRIMARY_AGENT_PRESET, tools)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class OutputGuard(IAgent):
|
| 84 |
"""
|
| 85 |
Performs logical reasoning, inference, and step-by-step problem-solving
|
| 86 |
"""
|
| 87 |
+
def __init__(self):
|
| 88 |
+
super().__init__("11_output_guard.txt", SECONDARY_AGENT_PRESET)
|
| 89 |
|
| 90 |
|
| 91 |
class ImageHandler(IAgent):
|
| 92 |
"""
|
| 93 |
Processes, analyzes, and generates information related to images
|
| 94 |
"""
|
| 95 |
+
def __init__(self):
|
| 96 |
+
super().__init__("09_image_handler.txt", VISION_AGENT_PRESET)
|
| 97 |
|
| 98 |
|
| 99 |
class VideoHandler(IAgent):
|
| 100 |
"""
|
| 101 |
Processes, analyzes, and generates information related to videos
|
| 102 |
"""
|
| 103 |
+
def __init__(self):
|
| 104 |
+
super().__init__("10_video_handler.txt", VISION_AGENT_PRESET)
|
args.py
CHANGED
|
@@ -5,18 +5,19 @@ from logger import Logger
|
|
| 5 |
|
| 6 |
|
| 7 |
class LLMInterface(Enum):
|
| 8 |
-
HUGGINGFACE = "HuggingFace"
|
| 9 |
-
OPENAILIKE = "OpenAILike"
|
| 10 |
OPENAI = "OpenAI"
|
|
|
|
| 11 |
# Add your own if you like (then adjust the LLMFactory)
|
| 12 |
|
| 13 |
|
| 14 |
class Args:
|
| 15 |
LOGGER = Logger.set_logger()
|
| 16 |
-
primary_llm_interface=LLMInterface.
|
| 17 |
# secondary_llm_interface=LLMInterface.HUGGINGFACE
|
| 18 |
vlm_interface=LLMInterface.HUGGINGFACE
|
| 19 |
-
|
|
|
|
|
|
|
| 20 |
api_base="http://127.0.0.1:1234/v1" # LM Studio local endpoint
|
| 21 |
-
api_key=
|
| 22 |
token = "" # Not needed when using OpenAILike API
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
class LLMInterface(Enum):
|
|
|
|
|
|
|
| 8 |
OPENAI = "OpenAI"
|
| 9 |
+
HUGGINGFACE = "HuggingFace"
|
| 10 |
# Add your own if you like (then adjust the LLMFactory)
|
| 11 |
|
| 12 |
|
| 13 |
class Args:
|
| 14 |
LOGGER = Logger.set_logger()
|
| 15 |
+
primary_llm_interface=LLMInterface.OPENAI
|
| 16 |
# secondary_llm_interface=LLMInterface.HUGGINGFACE
|
| 17 |
vlm_interface=LLMInterface.HUGGINGFACE
|
| 18 |
+
primary_model="qwen2.5-qwq-35b-eureka-cubed-abliterated-uncensored"
|
| 19 |
+
secondary_model="qwen2.5-7b-instruct-1m"
|
| 20 |
+
vision_model="gemma-3-27b-it"
|
| 21 |
api_base="http://127.0.0.1:1234/v1" # LM Studio local endpoint
|
| 22 |
+
api_key=None
|
| 23 |
token = "" # Not needed when using OpenAILike API
|
design.puml
CHANGED
|
@@ -28,22 +28,6 @@ node researcher NOT_IMPLEMENTED_NODE_COLOR[
|
|
| 28 |
researcher
|
| 29 |
]
|
| 30 |
|
| 31 |
-
node encryption_expert NOT_IMPLEMENTED_NODE_COLOR[
|
| 32 |
-
encryption_expert
|
| 33 |
-
]
|
| 34 |
-
|
| 35 |
-
node encryption_advisor NOT_IMPLEMENTED_NODE_COLOR[
|
| 36 |
-
encryption_advisor
|
| 37 |
-
]
|
| 38 |
-
|
| 39 |
-
node math_expert NOT_IMPLEMENTED_NODE_COLOR[
|
| 40 |
-
math_expert
|
| 41 |
-
]
|
| 42 |
-
|
| 43 |
-
node math_advisor NOT_IMPLEMENTED_NODE_COLOR[
|
| 44 |
-
math_advisor
|
| 45 |
-
]
|
| 46 |
-
|
| 47 |
node reasoner NOT_IMPLEMENTED_NODE_COLOR[
|
| 48 |
reasoner
|
| 49 |
]
|
|
@@ -68,18 +52,10 @@ final_answer --> END
|
|
| 68 |
auditor --> manager
|
| 69 |
solver --> manager
|
| 70 |
solver --> researcher
|
| 71 |
-
solver --> encryption_expert
|
| 72 |
-
solver --> math_expert
|
| 73 |
solver --> reasoner
|
| 74 |
solver --> image_handler
|
| 75 |
solver --> video_handler
|
| 76 |
researcher --> solver
|
| 77 |
-
encryption_expert --> solver
|
| 78 |
-
encryption_expert --> encryption_advisor
|
| 79 |
-
encryption_advisor --> encryption_expert
|
| 80 |
-
math_expert --> solver
|
| 81 |
-
math_expert --> math_advisor
|
| 82 |
-
math_advisor --> math_expert
|
| 83 |
reasoner --> solver
|
| 84 |
image_handler --> solver
|
| 85 |
video_handler --> solver
|
|
|
|
| 28 |
researcher
|
| 29 |
]
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
node reasoner NOT_IMPLEMENTED_NODE_COLOR[
|
| 32 |
reasoner
|
| 33 |
]
|
|
|
|
| 52 |
auditor --> manager
|
| 53 |
solver --> manager
|
| 54 |
solver --> researcher
|
|
|
|
|
|
|
| 55 |
solver --> reasoner
|
| 56 |
solver --> image_handler
|
| 57 |
solver --> video_handler
|
| 58 |
researcher --> solver
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
reasoner --> solver
|
| 60 |
image_handler --> solver
|
| 61 |
video_handler --> solver
|
design.yaml
CHANGED
|
@@ -21,7 +21,7 @@ nodes:
|
|
| 21 |
status: NOT_IMPLEMENTED
|
| 22 |
|
| 23 |
- name: solver
|
| 24 |
-
connections: [manager, researcher,
|
| 25 |
description: Central problem-solving node that coordinates with specialized experts based on task requirements
|
| 26 |
status: NOT_IMPLEMENTED
|
| 27 |
|
|
@@ -30,26 +30,6 @@ nodes:
|
|
| 30 |
description: Retrieves and synthesizes information from various sources to answer knowledge-based questions
|
| 31 |
status: NOT_IMPLEMENTED
|
| 32 |
|
| 33 |
-
- name: encryption_expert
|
| 34 |
-
connections: [solver, encryption_advisor]
|
| 35 |
-
description: Handles encryption/decryption tasks and encoding/decoding operations
|
| 36 |
-
status: NOT_IMPLEMENTED
|
| 37 |
-
|
| 38 |
-
- name: encryption_advisor
|
| 39 |
-
connections: [encryption_expert]
|
| 40 |
-
description: Provides specialized guidance on complex encryption problems to the encryption expert
|
| 41 |
-
status: NOT_IMPLEMENTED
|
| 42 |
-
|
| 43 |
-
- name: math_expert
|
| 44 |
-
connections: [solver, math_advisor]
|
| 45 |
-
description: Performs mathematical calculations and solves numerical problems
|
| 46 |
-
status: NOT_IMPLEMENTED
|
| 47 |
-
|
| 48 |
-
- name: math_advisor
|
| 49 |
-
connections: [math_expert]
|
| 50 |
-
description: Provides specialized guidance on complex mathematical problems to the math expert
|
| 51 |
-
status: NOT_IMPLEMENTED
|
| 52 |
-
|
| 53 |
- name: reasoner
|
| 54 |
connections: [solver]
|
| 55 |
description: Performs logical reasoning, inference, and step-by-step problem-solving
|
|
|
|
| 21 |
status: NOT_IMPLEMENTED
|
| 22 |
|
| 23 |
- name: solver
|
| 24 |
+
connections: [manager, researcher, reasoner, image_handler, video_handler]
|
| 25 |
description: Central problem-solving node that coordinates with specialized experts based on task requirements
|
| 26 |
status: NOT_IMPLEMENTED
|
| 27 |
|
|
|
|
| 30 |
description: Retrieves and synthesizes information from various sources to answer knowledge-based questions
|
| 31 |
status: NOT_IMPLEMENTED
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
- name: reasoner
|
| 34 |
connections: [solver]
|
| 35 |
description: Performs logical reasoning, inference, and step-by-step problem-solving
|
graph.py
CHANGED
|
@@ -1,7 +1,9 @@
|
|
| 1 |
-
from langchain_core.messages import AIMessage, HumanMessage
|
| 2 |
from langgraph.graph import START, END, StateGraph
|
|
|
|
|
|
|
| 3 |
|
| 4 |
-
from typing import Any, Dict, List, Literal, Optional, TypedDict
|
| 5 |
import logging
|
| 6 |
from pathlib import Path
|
| 7 |
|
|
@@ -11,7 +13,8 @@ from args import Args
|
|
| 11 |
class State(TypedDict):
|
| 12 |
"""State class for the agent graph."""
|
| 13 |
initial_query: str
|
| 14 |
-
messages: List[Dict[str, Any]]
|
|
|
|
| 15 |
nr_interactions: int
|
| 16 |
final_response: Optional[str]
|
| 17 |
|
|
@@ -55,34 +58,6 @@ class Nodes:
|
|
| 55 |
# TODO: To implement...
|
| 56 |
pass
|
| 57 |
|
| 58 |
-
def encryption_expert_node(self, state: State) -> State:
|
| 59 |
-
"""
|
| 60 |
-
Handles encryption/decryption tasks and encoding/decoding operations
|
| 61 |
-
"""
|
| 62 |
-
# TODO: To implement...
|
| 63 |
-
pass
|
| 64 |
-
|
| 65 |
-
def encryption_advisor_node(self, state: State) -> State:
|
| 66 |
-
"""
|
| 67 |
-
Provides specialized guidance on complex encryption problems to the encryption expert
|
| 68 |
-
"""
|
| 69 |
-
# TODO: To implement...
|
| 70 |
-
pass
|
| 71 |
-
|
| 72 |
-
def math_expert_node(self, state: State) -> State:
|
| 73 |
-
"""
|
| 74 |
-
Performs mathematical calculations and solves numerical problems
|
| 75 |
-
"""
|
| 76 |
-
# TODO: To implement...
|
| 77 |
-
pass
|
| 78 |
-
|
| 79 |
-
def math_advisor_node(self, state: State) -> State:
|
| 80 |
-
"""
|
| 81 |
-
Provides specialized guidance on complex mathematical problems to the math expert
|
| 82 |
-
"""
|
| 83 |
-
# TODO: To implement...
|
| 84 |
-
pass
|
| 85 |
-
|
| 86 |
def reasoner_node(self, state: State) -> State:
|
| 87 |
"""
|
| 88 |
Performs logical reasoning, inference, and step-by-step problem-solving
|
|
@@ -117,26 +92,10 @@ class Edges:
|
|
| 117 |
# TODO: To implement...
|
| 118 |
pass
|
| 119 |
|
| 120 |
-
def solver_edge(self, state: State) -> Literal["manager", "researcher", "
|
| 121 |
"""
|
| 122 |
Conditional edge for solver node.
|
| 123 |
Returns one of: "manager", "researcher", "encryption_expert", "math_expert", "reasoner", "image_handler", "video_handler"
|
| 124 |
"""
|
| 125 |
# TODO: To implement...
|
| 126 |
pass
|
| 127 |
-
|
| 128 |
-
def encryption_expert_edge(self, state: State) -> Literal["solver", "encryption_advisor"]:
|
| 129 |
-
"""
|
| 130 |
-
Conditional edge for encryption_expert node.
|
| 131 |
-
Returns one of: "solver", "encryption_advisor"
|
| 132 |
-
"""
|
| 133 |
-
# TODO: To implement...
|
| 134 |
-
pass
|
| 135 |
-
|
| 136 |
-
def math_expert_edge(self, state: State) -> Literal["solver", "math_advisor"]:
|
| 137 |
-
"""
|
| 138 |
-
Conditional edge for math_expert node.
|
| 139 |
-
Returns one of: "solver", "math_advisor"
|
| 140 |
-
"""
|
| 141 |
-
# TODO: To implement...
|
| 142 |
-
pass
|
|
|
|
| 1 |
+
from langchain_core.messages import AnyMessage, BaseMessage, AIMessage, HumanMessage
|
| 2 |
from langgraph.graph import START, END, StateGraph
|
| 3 |
+
from langgraph.graph.message import add_messages
|
| 4 |
+
from langgraph.prebuilt import ToolNode, tools_condition
|
| 5 |
|
| 6 |
+
from typing import Annotated, Any, Dict, List, Literal, Optional, TypedDict
|
| 7 |
import logging
|
| 8 |
from pathlib import Path
|
| 9 |
|
|
|
|
| 13 |
class State(TypedDict):
|
| 14 |
"""State class for the agent graph."""
|
| 15 |
initial_query: str
|
| 16 |
+
# messages: List[Dict[str, Any]]
|
| 17 |
+
messages: Annotated[list[AnyMessage], add_messages]
|
| 18 |
nr_interactions: int
|
| 19 |
final_response: Optional[str]
|
| 20 |
|
|
|
|
| 58 |
# TODO: To implement...
|
| 59 |
pass
|
| 60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
def reasoner_node(self, state: State) -> State:
|
| 62 |
"""
|
| 63 |
Performs logical reasoning, inference, and step-by-step problem-solving
|
|
|
|
| 92 |
# TODO: To implement...
|
| 93 |
pass
|
| 94 |
|
| 95 |
+
def solver_edge(self, state: State) -> Literal["manager", "researcher", "reasoner", "image_handler", "video_handler"]:
|
| 96 |
"""
|
| 97 |
Conditional edge for solver node.
|
| 98 |
Returns one of: "manager", "researcher", "encryption_expert", "math_expert", "reasoner", "image_handler", "video_handler"
|
| 99 |
"""
|
| 100 |
# TODO: To implement...
|
| 101 |
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
graph_builder.py
CHANGED
|
@@ -22,10 +22,6 @@ class GraphBuilder:
|
|
| 22 |
graph.add_node("auditor", self.nodes.auditor_node)
|
| 23 |
graph.add_node("solver", self.nodes.solver_node)
|
| 24 |
graph.add_node("researcher", self.nodes.researcher_node)
|
| 25 |
-
graph.add_node("encryption_expert", self.nodes.encryption_expert_node)
|
| 26 |
-
graph.add_node("encryption_advisor", self.nodes.encryption_advisor_node)
|
| 27 |
-
graph.add_node("math_expert", self.nodes.math_expert_node)
|
| 28 |
-
graph.add_node("math_advisor", self.nodes.math_advisor_node)
|
| 29 |
graph.add_node("reasoner", self.nodes.reasoner_node)
|
| 30 |
graph.add_node("image_handler", self.nodes.image_handler_node)
|
| 31 |
graph.add_node("video_handler", self.nodes.video_handler_node)
|
|
@@ -34,8 +30,6 @@ class GraphBuilder:
|
|
| 34 |
graph.add_edge("final_answer", END)
|
| 35 |
graph.add_edge("auditor", "manager")
|
| 36 |
graph.add_edge("researcher", "solver")
|
| 37 |
-
graph.add_edge("encryption_advisor", "encryption_expert")
|
| 38 |
-
graph.add_edge("math_advisor", "math_expert")
|
| 39 |
graph.add_edge("reasoner", "solver")
|
| 40 |
graph.add_edge("image_handler", "solver")
|
| 41 |
graph.add_edge("video_handler", "solver")
|
|
@@ -51,21 +45,7 @@ class GraphBuilder:
|
|
| 51 |
"solver",
|
| 52 |
self.edges.solver_edge,
|
| 53 |
{
|
| 54 |
-
"manager": "manager", "researcher": "researcher", "
|
| 55 |
-
}
|
| 56 |
-
)
|
| 57 |
-
graph.add_conditional_edges(
|
| 58 |
-
"encryption_expert",
|
| 59 |
-
self.edges.encryption_expert_edge,
|
| 60 |
-
{
|
| 61 |
-
"solver": "solver", "encryption_advisor": "encryption_advisor"
|
| 62 |
-
}
|
| 63 |
-
)
|
| 64 |
-
graph.add_conditional_edges(
|
| 65 |
-
"math_expert",
|
| 66 |
-
self.edges.math_expert_edge,
|
| 67 |
-
{
|
| 68 |
-
"solver": "solver", "math_advisor": "math_advisor"
|
| 69 |
}
|
| 70 |
)
|
| 71 |
|
|
|
|
| 22 |
graph.add_node("auditor", self.nodes.auditor_node)
|
| 23 |
graph.add_node("solver", self.nodes.solver_node)
|
| 24 |
graph.add_node("researcher", self.nodes.researcher_node)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
graph.add_node("reasoner", self.nodes.reasoner_node)
|
| 26 |
graph.add_node("image_handler", self.nodes.image_handler_node)
|
| 27 |
graph.add_node("video_handler", self.nodes.video_handler_node)
|
|
|
|
| 30 |
graph.add_edge("final_answer", END)
|
| 31 |
graph.add_edge("auditor", "manager")
|
| 32 |
graph.add_edge("researcher", "solver")
|
|
|
|
|
|
|
| 33 |
graph.add_edge("reasoner", "solver")
|
| 34 |
graph.add_edge("image_handler", "solver")
|
| 35 |
graph.add_edge("video_handler", "solver")
|
|
|
|
| 45 |
"solver",
|
| 46 |
self.edges.solver_edge,
|
| 47 |
{
|
| 48 |
+
"manager": "manager", "researcher": "researcher", "reasoner": "reasoner", "image_handler": "image_handler", "video_handler": "video_handler"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
}
|
| 50 |
)
|
| 51 |
|
itf_agent.py
CHANGED
|
@@ -1,32 +1,33 @@
|
|
| 1 |
-
from
|
| 2 |
-
from llama_index.core.workflow import Context
|
| 3 |
|
| 4 |
import logging
|
| 5 |
import os
|
| 6 |
import re
|
| 7 |
from typing import List
|
| 8 |
|
| 9 |
-
from args import Args
|
| 10 |
-
from llm_factory import LLMFactory
|
| 11 |
-
from llama_index.core.agent.workflow import AgentWorkflow
|
| 12 |
|
| 13 |
|
| 14 |
class IAgent():
|
| 15 |
-
def __init__(self,
|
| 16 |
self.name = self._format_name(sys_prompt_filename)
|
| 17 |
-
self.
|
|
|
|
| 18 |
# Load the system prompt from a file
|
| 19 |
system_prompt_path = os.path.join(os.getcwd(), "system_prompts", sys_prompt_filename)
|
| 20 |
self.system_prompt = ""
|
| 21 |
with open(system_prompt_path, "r") as file:
|
| 22 |
self.system_prompt = file.read().strip()
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
|
|
|
|
|
|
| 30 |
|
| 31 |
@staticmethod
|
| 32 |
def _format_name(sys_prompt_filename: str) -> str:
|
|
@@ -36,55 +37,6 @@ class IAgent():
|
|
| 36 |
cleaned_name = re.sub(r'^[^a-zA-Z]+', '', name_without_ext)
|
| 37 |
return cleaned_name
|
| 38 |
|
| 39 |
-
def setup_tools(self) -> List[FunctionTool]:
|
| 40 |
-
"""
|
| 41 |
-
Set up the tools for this agent.
|
| 42 |
-
|
| 43 |
-
Override this method in subclasses to define custom tools.
|
| 44 |
-
By default, returns an empty list.
|
| 45 |
-
|
| 46 |
-
Returns:
|
| 47 |
-
List: A list of tools this agent can use
|
| 48 |
-
"""
|
| 49 |
-
return []
|
| 50 |
-
|
| 51 |
-
def setup_slaves(self) -> List:
|
| 52 |
-
"""
|
| 53 |
-
Set up the slave agents for this agent.
|
| 54 |
-
|
| 55 |
-
Override this method in subclasses to define custom sub-agents.
|
| 56 |
-
By default, returns an empty list.
|
| 57 |
-
|
| 58 |
-
Returns:
|
| 59 |
-
List: A list of slave agents this agent can use
|
| 60 |
-
"""
|
| 61 |
-
return []
|
| 62 |
-
|
| 63 |
-
def _setup_agent(self) -> AgentWorkflow:
|
| 64 |
-
"""
|
| 65 |
-
Initializes and returns an agent workflow based on the presence of tools and slaves.
|
| 66 |
-
If both `self.tools` and `self.slaves` are empty, it sets up a default agent using the provided language model (`self.llm`).
|
| 67 |
-
Otherwise, it creates an agent workflow using the combined list of tools and slaves with the language model.
|
| 68 |
-
Returns:
|
| 69 |
-
AgentWorkflow: An instance of the agent workflow configured with the appropriate tools and language model.
|
| 70 |
-
"""
|
| 71 |
-
# Create tools from slaves: each tool calls slave.query(question) asynchronously
|
| 72 |
-
slave_tools = []
|
| 73 |
-
for slave in self.slaves:
|
| 74 |
-
slave_tool = FunctionTool.from_defaults(
|
| 75 |
-
name=f"call_{slave.name}",
|
| 76 |
-
description=f"Calls agent {slave.name} with a given query.",
|
| 77 |
-
fn=slave.query
|
| 78 |
-
)
|
| 79 |
-
slave_tools.append(slave_tool)
|
| 80 |
-
|
| 81 |
-
self.tools.extend(slave_tools)
|
| 82 |
-
|
| 83 |
-
return AgentWorkflow.from_tools_or_functions(
|
| 84 |
-
self.tools,
|
| 85 |
-
llm=self.llm
|
| 86 |
-
)
|
| 87 |
-
|
| 88 |
def get_system_prompt(self) -> str:
|
| 89 |
"""
|
| 90 |
Retrieves the system prompt.
|
|
@@ -94,7 +46,7 @@ class IAgent():
|
|
| 94 |
"""
|
| 95 |
return self.system_prompt
|
| 96 |
|
| 97 |
-
|
| 98 |
"""
|
| 99 |
Asynchronously queries the agent with a given question and returns the response.
|
| 100 |
|
|
@@ -110,25 +62,9 @@ class IAgent():
|
|
| 110 |
separator = "=============================="
|
| 111 |
Args.LOGGER.log(logging.INFO, f"\n{separator}\nAgent '{self.name}' has been queried !\nINPUT:\n{question}\n")
|
| 112 |
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
response = await self.agent.run(question)
|
| 117 |
-
response = str(response)
|
| 118 |
|
| 119 |
Args.LOGGER.log(logging.INFO, f"\nAgent '{self.name}' produced OUTPUT:\n{response}\n{separator}\n")
|
| 120 |
return response
|
| 121 |
-
|
| 122 |
-
def clear_context(self):
|
| 123 |
-
"""
|
| 124 |
-
Clears the current context of the agent, resetting any conversation history.
|
| 125 |
-
This is useful when starting a new conversation or when the context needs to be refreshed.
|
| 126 |
-
"""
|
| 127 |
-
if self.ctx is not None:
|
| 128 |
-
self.ctx = Context(self.agent)
|
| 129 |
-
|
| 130 |
-
if not self.slaves:
|
| 131 |
-
return
|
| 132 |
-
|
| 133 |
-
for slave in self.slaves:
|
| 134 |
-
slave.clear_context()
|
|
|
|
| 1 |
+
from langchain_core.messages import BaseMessage, SystemMessage
|
|
|
|
| 2 |
|
| 3 |
import logging
|
| 4 |
import os
|
| 5 |
import re
|
| 6 |
from typing import List
|
| 7 |
|
| 8 |
+
from args import Args
|
| 9 |
+
from llm_factory import LLMFactory, AgentPreset
|
|
|
|
| 10 |
|
| 11 |
|
| 12 |
class IAgent():
|
| 13 |
+
def __init__(self, sys_prompt_filename, agent_preset: AgentPreset, tools: List = [], parallel_tool_calls=False):
|
| 14 |
self.name = self._format_name(sys_prompt_filename)
|
| 15 |
+
self.interface = agent_preset.get_interface()
|
| 16 |
+
|
| 17 |
# Load the system prompt from a file
|
| 18 |
system_prompt_path = os.path.join(os.getcwd(), "system_prompts", sys_prompt_filename)
|
| 19 |
self.system_prompt = ""
|
| 20 |
with open(system_prompt_path, "r") as file:
|
| 21 |
self.system_prompt = file.read().strip()
|
| 22 |
+
|
| 23 |
+
# Define LLM
|
| 24 |
+
llm = LLMFactory.create(agent_preset)
|
| 25 |
+
|
| 26 |
+
# Add tools
|
| 27 |
+
if tools:
|
| 28 |
+
self.model = llm.bind_tools(tools, parallel_tool_calls=parallel_tool_calls)
|
| 29 |
+
else:
|
| 30 |
+
self.model = llm
|
| 31 |
|
| 32 |
@staticmethod
|
| 33 |
def _format_name(sys_prompt_filename: str) -> str:
|
|
|
|
| 37 |
cleaned_name = re.sub(r'^[^a-zA-Z]+', '', name_without_ext)
|
| 38 |
return cleaned_name
|
| 39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
def get_system_prompt(self) -> str:
|
| 41 |
"""
|
| 42 |
Retrieves the system prompt.
|
|
|
|
| 46 |
"""
|
| 47 |
return self.system_prompt
|
| 48 |
|
| 49 |
+
def query(self, messages: List[BaseMessage]) -> BaseMessage:
|
| 50 |
"""
|
| 51 |
Asynchronously queries the agent with a given question and returns the response.
|
| 52 |
|
|
|
|
| 62 |
separator = "=============================="
|
| 63 |
Args.LOGGER.log(logging.INFO, f"\n{separator}\nAgent '{self.name}' has been queried !\nINPUT:\n{question}\n")
|
| 64 |
|
| 65 |
+
system_prompt = self.get_system_prompt()
|
| 66 |
+
conversation = [SystemMessage(content=system_prompt)] + messages
|
| 67 |
+
response = self.model.invoke(conversation)
|
|
|
|
|
|
|
| 68 |
|
| 69 |
Args.LOGGER.log(logging.INFO, f"\nAgent '{self.name}' produced OUTPUT:\n{response}\n{separator}\n")
|
| 70 |
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llm_factory.py
CHANGED
|
@@ -1,71 +1,127 @@
|
|
| 1 |
-
from
|
| 2 |
-
from
|
| 3 |
-
|
|
|
|
| 4 |
|
| 5 |
from args import LLMInterface, Args
|
| 6 |
|
| 7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
class LLMFactory():
|
| 9 |
|
| 10 |
@classmethod
|
| 11 |
-
def create(cls,
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
elif interface == LLMInterface.HUGGINGFACE:
|
| 17 |
-
|
| 18 |
else:
|
| 19 |
raise ValueError(f"Interface '{interface}' is not supported !")
|
|
|
|
|
|
|
| 20 |
|
| 21 |
@staticmethod
|
| 22 |
-
def
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
"system_prompt": system_prompt,
|
| 28 |
-
}
|
| 29 |
|
| 30 |
-
if temperature is not None:
|
| 31 |
-
kwargs["temperature"] = temperature
|
| 32 |
-
|
| 33 |
-
if max_tokens is not None:
|
| 34 |
-
kwargs["max_tokens"] = max_tokens
|
| 35 |
-
|
| 36 |
-
llm = OpenAILike(**kwargs)
|
| 37 |
-
return llm
|
| 38 |
-
|
| 39 |
-
@staticmethod
|
| 40 |
-
def _openai_create(system_prompt, temperature = None, max_tokens = None):
|
| 41 |
kwargs = {
|
| 42 |
-
"model":
|
|
|
|
| 43 |
"api_key": Args.api_key,
|
| 44 |
-
"
|
|
|
|
|
|
|
|
|
|
| 45 |
}
|
| 46 |
|
| 47 |
-
|
| 48 |
-
kwargs["temperature"] = temperature
|
| 49 |
|
| 50 |
-
|
| 51 |
-
kwargs["max_tokens"] = max_tokens
|
| 52 |
-
|
| 53 |
-
llm = OpenAI(**kwargs)
|
| 54 |
-
return llm
|
| 55 |
|
| 56 |
@staticmethod
|
| 57 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
kwargs = {
|
| 59 |
-
"
|
| 60 |
-
"
|
| 61 |
-
"
|
|
|
|
| 62 |
}
|
| 63 |
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
if max_tokens is not None:
|
| 68 |
-
kwargs["max_tokens"] = max_tokens
|
| 69 |
|
| 70 |
-
|
| 71 |
-
return llm
|
|
|
|
| 1 |
+
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
|
| 2 |
+
from langchain_openai import ChatOpenAI
|
| 3 |
+
|
| 4 |
+
from typing import Optional
|
| 5 |
|
| 6 |
from args import LLMInterface, Args
|
| 7 |
|
| 8 |
|
| 9 |
+
class AgentPreset:
|
| 10 |
+
def __init__(self, interface: LLMInterface, model_name: str, temperature: Optional[float] = None,
|
| 11 |
+
max_tokens: Optional[int] = None, repeat_penalty: Optional[float] = None):
|
| 12 |
+
"""
|
| 13 |
+
Initialize an AgentPreset with LLM configuration parameters.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
interface: The model interface to use (e.g., OPENAI, HUGGINGFACE)
|
| 17 |
+
model_name: Name of the model to use
|
| 18 |
+
temperature: Controls randomness in responses (0.0-1.0)
|
| 19 |
+
max_tokens: Maximum number of tokens to generate in response
|
| 20 |
+
repeat_penalty: Penalty for token repetition
|
| 21 |
+
"""
|
| 22 |
+
self.interface = interface
|
| 23 |
+
self.model_name = model_name
|
| 24 |
+
self.temperature = temperature
|
| 25 |
+
self.max_tokens = max_tokens
|
| 26 |
+
self.repeat_penalty = repeat_penalty
|
| 27 |
+
|
| 28 |
+
def get_interface(self) -> LLMInterface:
|
| 29 |
+
"""
|
| 30 |
+
Get the model interface.
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
LLMInterface: The interface used for this agent.
|
| 34 |
+
"""
|
| 35 |
+
return self.interface
|
| 36 |
+
|
| 37 |
+
def get_model_name(self) -> str:
|
| 38 |
+
"""
|
| 39 |
+
Get the model name.
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
str: The name of the model.
|
| 43 |
+
"""
|
| 44 |
+
return self.model_name
|
| 45 |
+
|
| 46 |
+
def get_temperature(self) -> float:
|
| 47 |
+
"""
|
| 48 |
+
Get the temperature setting.
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
float: The temperature value controlling randomness.
|
| 52 |
+
"""
|
| 53 |
+
return self.temperature
|
| 54 |
+
|
| 55 |
+
def get_max_tokens(self) -> int:
|
| 56 |
+
"""
|
| 57 |
+
Get the maximum tokens setting.
|
| 58 |
+
|
| 59 |
+
Returns:
|
| 60 |
+
int: The maximum number of tokens for generation.
|
| 61 |
+
"""
|
| 62 |
+
return self.max_tokens
|
| 63 |
+
|
| 64 |
+
def get_repeat_penalty(self) -> float:
|
| 65 |
+
"""
|
| 66 |
+
Get the repeat penalty setting.
|
| 67 |
+
|
| 68 |
+
Returns:
|
| 69 |
+
float: The penalty value for token repetition.
|
| 70 |
+
"""
|
| 71 |
+
return self.repeat_penalty
|
| 72 |
+
|
| 73 |
+
|
| 74 |
class LLMFactory():
|
| 75 |
|
| 76 |
@classmethod
|
| 77 |
+
def create(cls, agent_preset: AgentPreset):
|
| 78 |
+
interface = agent_preset.get_interface()
|
| 79 |
+
|
| 80 |
+
if interface == LLMInterface.OPENAI:
|
| 81 |
+
model = cls._create_openai_model(agent_preset)
|
| 82 |
elif interface == LLMInterface.HUGGINGFACE:
|
| 83 |
+
model = cls._create_huggingface_model(agent_preset)
|
| 84 |
else:
|
| 85 |
raise ValueError(f"Interface '{interface}' is not supported !")
|
| 86 |
+
|
| 87 |
+
return model
|
| 88 |
|
| 89 |
@staticmethod
|
| 90 |
+
def _create_openai_model(agent_preset: AgentPreset):
|
| 91 |
+
model_name = agent_preset.get_model_name()
|
| 92 |
+
temperature = agent_preset.get_temperature()
|
| 93 |
+
max_tokens = agent_preset.get_max_tokens()
|
| 94 |
+
repeat_penalty = agent_preset.get_repeat_penalty()
|
|
|
|
|
|
|
| 95 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
kwargs = {
|
| 97 |
+
"model": model_name,
|
| 98 |
+
"base_url": Args.api_base,
|
| 99 |
"api_key": Args.api_key,
|
| 100 |
+
"temperature": temperature,
|
| 101 |
+
"max_completion_tokens": max_tokens,
|
| 102 |
+
# "presence_penalty": repeat_penalty,
|
| 103 |
+
"frequency_penalty": repeat_penalty
|
| 104 |
}
|
| 105 |
|
| 106 |
+
model = ChatOpenAI(**kwargs)
|
|
|
|
| 107 |
|
| 108 |
+
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
|
| 110 |
@staticmethod
|
| 111 |
+
def _create_huggingface_model(agent_preset: AgentPreset):
|
| 112 |
+
model_name = agent_preset.get_model_name()
|
| 113 |
+
temperature = agent_preset.get_temperature()
|
| 114 |
+
max_tokens = agent_preset.get_max_tokens()
|
| 115 |
+
repeat_penalty = agent_preset.get_repeat_penalty()
|
| 116 |
+
|
| 117 |
kwargs = {
|
| 118 |
+
"model": model_name,
|
| 119 |
+
"temperature": temperature,
|
| 120 |
+
"max_new_tokens": max_tokens,
|
| 121 |
+
"repetition_penalty": repeat_penalty
|
| 122 |
}
|
| 123 |
|
| 124 |
+
llm = HuggingFaceEndpoint(**kwargs)
|
| 125 |
+
model = ChatHuggingFace(llm=llm)
|
|
|
|
|
|
|
|
|
|
| 126 |
|
| 127 |
+
return model
|
|
|
requirements.txt
CHANGED
|
@@ -10,4 +10,8 @@ llama-index-embeddings-huggingface
|
|
| 10 |
llama-index-llms-openai
|
| 11 |
llama-index-llms-openai-like
|
| 12 |
llama-index-tools-duckduckgo
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
pint
|
|
|
|
| 10 |
llama-index-llms-openai
|
| 11 |
llama-index-llms-openai-like
|
| 12 |
llama-index-tools-duckduckgo
|
| 13 |
+
langchain
|
| 14 |
+
langchain_openai
|
| 15 |
+
langchain_huggingface
|
| 16 |
+
langchain-core>=0.3.11
|
| 17 |
pint
|
system_prompts/11_output_guard.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Act as a guard for a powerful reasoning model, but with many flaws. It can answer the questions very smart, but it is prone to glitch or output Chinese words out of nowhere sometimes.
|
| 2 |
+
|
| 3 |
+
Your role is to detect any issues with the output and correct it (for example, translate the Chinese, remove the glitched repetitive tokens, etc.)
|
| 4 |
+
|
| 5 |
+
If there is a thinking block, remove it ! Only output the response !
|
| 6 |
+
|
| 7 |
+
If there is no issue with the response, you will just output it word for word. Otherwise, you will output the corrected version.
|
| 8 |
+
|
| 9 |
+
Do not output anything else. Do not offer explanations or any other redundant words.
|
test.py
CHANGED
|
@@ -109,82 +109,6 @@ class TestAlfredAgent(unittest.TestCase):
|
|
| 109 |
# TODO: Add assertions to verify the state changes
|
| 110 |
print(f"State after node execution: {test_state}")
|
| 111 |
|
| 112 |
-
def test_encryption_expert_node(self):
|
| 113 |
-
"""
|
| 114 |
-
Test the encryption_expert node functionality.
|
| 115 |
-
|
| 116 |
-
Handles encryption/decryption tasks and encoding/decoding operations
|
| 117 |
-
"""
|
| 118 |
-
# Create an instance of Nodes class
|
| 119 |
-
nodes = Nodes()
|
| 120 |
-
|
| 121 |
-
# Create a test state
|
| 122 |
-
test_state = {} # TODO: Initialize with appropriate test data
|
| 123 |
-
|
| 124 |
-
# Test the node function
|
| 125 |
-
print(f"Testing 'encryption_expert' node...")
|
| 126 |
-
nodes.encryption_expert_node(test_state)
|
| 127 |
-
|
| 128 |
-
# TODO: Add assertions to verify the state changes
|
| 129 |
-
print(f"State after node execution: {test_state}")
|
| 130 |
-
|
| 131 |
-
def test_encryption_advisor_node(self):
|
| 132 |
-
"""
|
| 133 |
-
Test the encryption_advisor node functionality.
|
| 134 |
-
|
| 135 |
-
Provides specialized guidance on complex encryption problems to the encryption expert
|
| 136 |
-
"""
|
| 137 |
-
# Create an instance of Nodes class
|
| 138 |
-
nodes = Nodes()
|
| 139 |
-
|
| 140 |
-
# Create a test state
|
| 141 |
-
test_state = {} # TODO: Initialize with appropriate test data
|
| 142 |
-
|
| 143 |
-
# Test the node function
|
| 144 |
-
print(f"Testing 'encryption_advisor' node...")
|
| 145 |
-
nodes.encryption_advisor_node(test_state)
|
| 146 |
-
|
| 147 |
-
# TODO: Add assertions to verify the state changes
|
| 148 |
-
print(f"State after node execution: {test_state}")
|
| 149 |
-
|
| 150 |
-
def test_math_expert_node(self):
|
| 151 |
-
"""
|
| 152 |
-
Test the math_expert node functionality.
|
| 153 |
-
|
| 154 |
-
Performs mathematical calculations and solves numerical problems
|
| 155 |
-
"""
|
| 156 |
-
# Create an instance of Nodes class
|
| 157 |
-
nodes = Nodes()
|
| 158 |
-
|
| 159 |
-
# Create a test state
|
| 160 |
-
test_state = {} # TODO: Initialize with appropriate test data
|
| 161 |
-
|
| 162 |
-
# Test the node function
|
| 163 |
-
print(f"Testing 'math_expert' node...")
|
| 164 |
-
nodes.math_expert_node(test_state)
|
| 165 |
-
|
| 166 |
-
# TODO: Add assertions to verify the state changes
|
| 167 |
-
print(f"State after node execution: {test_state}")
|
| 168 |
-
|
| 169 |
-
def test_math_advisor_node(self):
|
| 170 |
-
"""
|
| 171 |
-
Test the math_advisor node functionality.
|
| 172 |
-
|
| 173 |
-
Provides specialized guidance on complex mathematical problems to the math expert
|
| 174 |
-
"""
|
| 175 |
-
# Create an instance of Nodes class
|
| 176 |
-
nodes = Nodes()
|
| 177 |
-
|
| 178 |
-
# Create a test state
|
| 179 |
-
test_state = {} # TODO: Initialize with appropriate test data
|
| 180 |
-
|
| 181 |
-
# Test the node function
|
| 182 |
-
print(f"Testing 'math_advisor' node...")
|
| 183 |
-
nodes.math_advisor_node(test_state)
|
| 184 |
-
|
| 185 |
-
# TODO: Add assertions to verify the state changes
|
| 186 |
-
print(f"State after node execution: {test_state}")
|
| 187 |
-
|
| 188 |
def test_reasoner_node(self):
|
| 189 |
"""
|
| 190 |
Test the reasoner node functionality.
|
|
@@ -266,7 +190,7 @@ class TestAlfredAgent(unittest.TestCase):
|
|
| 266 |
"""
|
| 267 |
Test the conditional edge for solver node.
|
| 268 |
|
| 269 |
-
This edge should return one of: "manager", "researcher", "
|
| 270 |
"""
|
| 271 |
# Create an instance of Edges class
|
| 272 |
edges = Edges()
|
|
@@ -280,48 +204,8 @@ class TestAlfredAgent(unittest.TestCase):
|
|
| 280 |
|
| 281 |
# TODO: Add assertions to verify the result
|
| 282 |
print(f"Edge decision: {result}")
|
| 283 |
-
assert result in ["manager", "researcher", "
|
| 284 |
-
|
| 285 |
-
def test_encryption_expert_edge(self):
|
| 286 |
-
"""
|
| 287 |
-
Test the conditional edge for encryption_expert node.
|
| 288 |
-
|
| 289 |
-
This edge should return one of: "solver", "encryption_advisor"
|
| 290 |
-
"""
|
| 291 |
-
# Create an instance of Edges class
|
| 292 |
-
edges = Edges()
|
| 293 |
-
|
| 294 |
-
# Create a test state
|
| 295 |
-
test_state = {} # TODO: Initialize with appropriate test data
|
| 296 |
-
|
| 297 |
-
# Test the edge function
|
| 298 |
-
print(f"Testing 'encryption_expert' conditional edge...")
|
| 299 |
-
result = edges.encryption_expert_edge(test_state)
|
| 300 |
-
|
| 301 |
-
# TODO: Add assertions to verify the result
|
| 302 |
-
print(f"Edge decision: {result}")
|
| 303 |
-
assert result in ["solver", "encryption_advisor"], f"Edge result '{result}' not in expected values"
|
| 304 |
|
| 305 |
-
def test_math_expert_edge(self):
|
| 306 |
-
"""
|
| 307 |
-
Test the conditional edge for math_expert node.
|
| 308 |
-
|
| 309 |
-
This edge should return one of: "solver", "math_advisor"
|
| 310 |
-
"""
|
| 311 |
-
# Create an instance of Edges class
|
| 312 |
-
edges = Edges()
|
| 313 |
-
|
| 314 |
-
# Create a test state
|
| 315 |
-
test_state = {} # TODO: Initialize with appropriate test data
|
| 316 |
-
|
| 317 |
-
# Test the edge function
|
| 318 |
-
print(f"Testing 'math_expert' conditional edge...")
|
| 319 |
-
result = edges.math_expert_edge(test_state)
|
| 320 |
-
|
| 321 |
-
# TODO: Add assertions to verify the result
|
| 322 |
-
print(f"Edge decision: {result}")
|
| 323 |
-
assert result in ["solver", "math_advisor"], f"Edge result '{result}' not in expected values"
|
| 324 |
-
|
| 325 |
def test_full_workflow(self):
|
| 326 |
"""
|
| 327 |
Test the Alfred agent full workflow.
|
|
|
|
| 109 |
# TODO: Add assertions to verify the state changes
|
| 110 |
print(f"State after node execution: {test_state}")
|
| 111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
def test_reasoner_node(self):
|
| 113 |
"""
|
| 114 |
Test the reasoner node functionality.
|
|
|
|
| 190 |
"""
|
| 191 |
Test the conditional edge for solver node.
|
| 192 |
|
| 193 |
+
This edge should return one of: "manager", "researcher", "reasoner", "image_handler", "video_handler"
|
| 194 |
"""
|
| 195 |
# Create an instance of Edges class
|
| 196 |
edges = Edges()
|
|
|
|
| 204 |
|
| 205 |
# TODO: Add assertions to verify the result
|
| 206 |
print(f"Edge decision: {result}")
|
| 207 |
+
assert result in ["manager", "researcher", "reasoner", "image_handler", "video_handler"], f"Edge result '{result}' not in expected values"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 208 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 209 |
def test_full_workflow(self):
|
| 210 |
"""
|
| 211 |
Test the Alfred agent full workflow.
|
toolbox.py
CHANGED
|
@@ -1,6 +1,3 @@
|
|
| 1 |
-
from llama_index.core.tools import FunctionTool
|
| 2 |
-
|
| 3 |
-
|
| 4 |
from duckduckgo_search import DDGS
|
| 5 |
import pint
|
| 6 |
import sympy as sp
|
|
@@ -58,19 +55,6 @@ class _Math:
|
|
| 58 |
return f"Error in unit conversion: {str(e)}"
|
| 59 |
|
| 60 |
|
| 61 |
-
class _MathToolbox:
|
| 62 |
-
symbolic_calc = FunctionTool.from_defaults(
|
| 63 |
-
name="symbolic_calc",
|
| 64 |
-
description="Evaluates complex mathematical expressions using SymPy",
|
| 65 |
-
fn=_Math.symbolic_calc
|
| 66 |
-
)
|
| 67 |
-
unit_converter = FunctionTool.from_defaults(
|
| 68 |
-
name="unit_converter",
|
| 69 |
-
description="Converts values between different units of measurement",
|
| 70 |
-
fn=_Math.unit_converter
|
| 71 |
-
)
|
| 72 |
-
|
| 73 |
-
|
| 74 |
class _WebSearch:
|
| 75 |
@staticmethod
|
| 76 |
def duckduckgo_text_search(keywords, max_results=5) -> list[dict[str, str]]:
|
|
@@ -134,24 +118,6 @@ class _WebSearch:
|
|
| 134 |
return DDGS().videos(keywords, license_videos=license, max_results=max_results)
|
| 135 |
|
| 136 |
|
| 137 |
-
class _WebSearchToolbox:
|
| 138 |
-
duckduckgo_text_search = FunctionTool.from_defaults(
|
| 139 |
-
name="duckduckgo_text_search",
|
| 140 |
-
description="DuckDuckGo text search",
|
| 141 |
-
fn=_WebSearch.duckduckgo_text_search
|
| 142 |
-
)
|
| 143 |
-
duckduckgo_images_search = FunctionTool.from_defaults(
|
| 144 |
-
name="duckduckgo_images_search",
|
| 145 |
-
description="DuckDuckGo images search",
|
| 146 |
-
fn=_WebSearch.duckduckgo_images_search
|
| 147 |
-
)
|
| 148 |
-
duckduckgo_videos_search = FunctionTool.from_defaults(
|
| 149 |
-
name="duckduckgo_videos_search",
|
| 150 |
-
description="DuckDuckGo videos search",
|
| 151 |
-
fn=_WebSearch.duckduckgo_videos_search
|
| 152 |
-
)
|
| 153 |
-
|
| 154 |
-
|
| 155 |
class _Encryption:
|
| 156 |
|
| 157 |
@staticmethod
|
|
@@ -312,50 +278,7 @@ class _Encryption:
|
|
| 312 |
return reversed_text
|
| 313 |
|
| 314 |
|
| 315 |
-
class _EncryptionToolbox:
|
| 316 |
-
ascii_encode = FunctionTool.from_defaults(
|
| 317 |
-
name="ascii_encode",
|
| 318 |
-
description="Convert each character in a string to its ASCII value",
|
| 319 |
-
fn=_Encryption.ascii_encode
|
| 320 |
-
)
|
| 321 |
-
ascii_decode = FunctionTool.from_defaults(
|
| 322 |
-
name="ascii_decode",
|
| 323 |
-
description="Convert space-separated ASCII values back to characters",
|
| 324 |
-
fn=_Encryption.ascii_decode
|
| 325 |
-
)
|
| 326 |
-
base64_encode = FunctionTool.from_defaults(
|
| 327 |
-
name="base64_encode",
|
| 328 |
-
description="Encode a string to base64",
|
| 329 |
-
fn=_Encryption.base64_encode
|
| 330 |
-
)
|
| 331 |
-
base64_decode = FunctionTool.from_defaults(
|
| 332 |
-
name="base64_decode",
|
| 333 |
-
description="Decode a base64 string to plain text",
|
| 334 |
-
fn=_Encryption.base64_decode
|
| 335 |
-
)
|
| 336 |
-
caesar_cipher_encode = FunctionTool.from_defaults(
|
| 337 |
-
name="caesar_cipher_encode",
|
| 338 |
-
description="Encode a string using Caesar cipher with specified shift",
|
| 339 |
-
fn=_Encryption.caesar_cipher_encode
|
| 340 |
-
)
|
| 341 |
-
caesar_cipher_decode = FunctionTool.from_defaults(
|
| 342 |
-
name="caesar_cipher_decode",
|
| 343 |
-
description="Decode a Caesar cipher string with specified shift",
|
| 344 |
-
fn=_Encryption.caesar_cipher_decode
|
| 345 |
-
)
|
| 346 |
-
caesar_cipher_brute_force = FunctionTool.from_defaults(
|
| 347 |
-
name="caesar_cipher_brute_force",
|
| 348 |
-
description="Try all 26 possible shifts to decode a Caesar cipher text",
|
| 349 |
-
fn=_Encryption.caesar_cipher_brute_force
|
| 350 |
-
)
|
| 351 |
-
reverse_string = FunctionTool.from_defaults(
|
| 352 |
-
name="reverse_string",
|
| 353 |
-
description="Reverse a string",
|
| 354 |
-
fn=_Encryption.reverse_string
|
| 355 |
-
)
|
| 356 |
-
|
| 357 |
-
|
| 358 |
class Toolbox:
|
| 359 |
-
math =
|
| 360 |
-
web_search =
|
| 361 |
-
encryption =
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from duckduckgo_search import DDGS
|
| 2 |
import pint
|
| 3 |
import sympy as sp
|
|
|
|
| 55 |
return f"Error in unit conversion: {str(e)}"
|
| 56 |
|
| 57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
class _WebSearch:
|
| 59 |
@staticmethod
|
| 60 |
def duckduckgo_text_search(keywords, max_results=5) -> list[dict[str, str]]:
|
|
|
|
| 118 |
return DDGS().videos(keywords, license_videos=license, max_results=max_results)
|
| 119 |
|
| 120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
class _Encryption:
|
| 122 |
|
| 123 |
@staticmethod
|
|
|
|
| 278 |
return reversed_text
|
| 279 |
|
| 280 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 281 |
class Toolbox:
|
| 282 |
+
math = _Math()
|
| 283 |
+
web_search = _WebSearch()
|
| 284 |
+
encryption = _Encryption()
|