Spaces:
Sleeping
Sleeping
File size: 6,132 Bytes
fae0e51 cc6bd3b fae0e51 cc6bd3b fae0e51 cc6bd3b fae0e51 cc6bd3b fae0e51 cc6bd3b fae0e51 cc6bd3b fae0e51 cc6bd3b fae0e51 cc6bd3b fae0e51 cc6bd3b fae0e51 cc6bd3b fae0e51 cc6bd3b fae0e51 cc6bd3b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 |
from langgraph.graph import START, END, StateGraph
from langgraph.graph.state import CompiledStateGraph
from typing import Dict, Any, TypedDict, Literal, Optional
import asyncio
from management import Manager, Assistant
# Maximum number of interactions between Assistant and Manager
MAX_INTERACTIONS = 5
# Maximum depth of recursion for Manager
MAX_DEPTH = 3
# For both Assistant and Manager:
TEMPERATURE = 0.7
MAX_TOKENS = 100
class State(TypedDict):
"""State for the agent graph."""
initial_query: str
current_message: str
nr_interactions: int
final_response: Optional[str]
class GraphBuilder:
def __init__(self):
"""
Initializes the GraphBuilder.
"""
self.assistant_agent = Assistant(TEMPERATURE, MAX_TOKENS)
self.manager_agent = Manager(TEMPERATURE, MAX_TOKENS, MAX_DEPTH)
self.final_answer_hint = "Final answer:"
def clear_chat_history(self):
self.assistant_agent.clear_context()
self.manager_agent.clear_context()
async def assistant_node(self, state: State) -> State:
"""
Assistant agent that evaluates the query and decides whether to give a final answer
or continue the conversation with the Manager.
Uses the existing Assistant implementation.
"""
response = await self.assistant_agent.query(state["current_message"])
# Check if this is a final answer
if self.final_answer_hint in response:
# Extract the text after final answer hint
state["final_response"] = response.split(self.final_answer_hint, 1)[1].strip()
state["current_message"] = response
state["nr_interactions"] += 1
return state
async def manager_node(self, state: State) -> State:
"""
Manager agent that handles the queries from the Assistant and provides responses.
Uses the existing Manager implementation.
"""
response = await self.manager_agent.query(state["current_message"])
state["current_message"] = response
return state
async def final_answer_node(self, state: State) -> State:
"""
Final answer node that formats and returns the final response.
If there's already a final answer in the state, it uses that.
Otherwise, it asks the assistant to formulate a final answer.
"""
print("========== final_answer_node ==========")
# If we already have a final answer, use it
final_response = state.get("final_response")
if final_response is not None:
print(f"====================\nFinal response:\n{final_response}\n====================")
return state
# Otherwise, have the assistant formulate a final answer
prompt = f"Based on the conversation so far, provide a final answer to the original query:\n\n{state['initial_query']}"
state["current_message"] = prompt
response = await self.assistant_agent.query(state["current_message"])
# Format the response
if self.final_answer_hint not in response:
print(f"WARNING: final_answer_hint '{self.final_answer_hint}' not in response !")
response = f"{self.final_answer_hint}{response}"
# Extract the text after final answer hint
state["final_response"] = response.split(self.final_answer_hint, 1)[1].strip()
final_response = state.get("final_response")
print(f"====================\nFinal response:\n{final_response}\n====================")
return state
def should_continue(self, state: State) -> Literal["manager", "final_answer"]:
"""
Decides whether to continue to the Manager or to provide a final answer.
Returns:
"manager": If the Assistant has decided to continue the conversation
"final_answer": If the Assistant has decided to provide a final answer
"""
message = state["current_message"]
if state["nr_interactions"] >= MAX_INTERACTIONS or self.final_answer_hint in message:
return "final_answer"
else:
return "manager"
def build_agent_graph(self) -> CompiledStateGraph:
"""Build and return the agent graph."""
graph = StateGraph(State)
# Add the nodes with sync wrappers
graph.add_node("assistant", self.assistant_node)
graph.add_node("manager", self.manager_node)
graph.add_node("final_answer", self.final_answer_node)
# Add the edges
graph.add_edge(START, "assistant")
graph.add_conditional_edges(
"assistant",
self.should_continue,
{
"manager": "manager",
"final_answer": "final_answer"
}
)
graph.add_edge("manager", "assistant")
graph.add_edge("final_answer", END)
return graph.compile()
class Alfred:
def __init__(self):
print("Agent initialized.")
self.graph_builder = GraphBuilder()
self.agent_graph = self.graph_builder.build_agent_graph()
async def __call__(self, question: str) -> str:
print(f"Agent received question (first 50 chars): {question[:50]}...")
result = await self.process_query(question)
response = result["final_response"]
print(f"Agent processed the response: {response}")
return response
async def process_query(self, query: str) -> Dict[str, Any]:
"""
Process a query through the agent graph.
Args:
query: The initial query to process
Returns:
The final state of the graph execution
"""
initial_state: State = {
"initial_query": query,
"current_message": query,
"nr_interactions": 0,
"final_response": None
}
self.graph_builder.clear_chat_history()
result = await self.agent_graph.ainvoke(initial_state)
return result
|