Spaces:
Sleeping
Sleeping
import asyncio | |
import os | |
from llm_factory import LLMFactory | |
# TODO: Langgraph graph that process the following: | |
# 1. Thakes the Query Node (Start Node) and sends it to the Assistant Node (has memory - access to the conversation hystory stored in the State) | |
# 2. The Assistant Node decides whether the querry is ready to be delivered (the solution is available in the conversation hystory, or the `MAX_DEPTH` has been reached) | |
# - if yes: formulates a concise final answer and sends it to the Final_Answer Node (End Node) | |
# - if no: formulates a querry (could be the first one or a follow-up) and sends it to the Manager Node (also has access to the conversation hystory) | |
# (!) This communication happens back and forth until the querry gets solved (or up to a maximum depth defined by a `MAX_DEPTH` variable) | |
class Manager: | |
def __init__(self): | |
print("Agent initialized.") | |
async def __call__(self, question: str) -> str: | |
print(f"Agent received question (first 50 chars): {question[:50]}...") | |
self.final_answer = "" | |
response = await self.query(question) | |
print(f"Agent processed the response: {response}") | |
return response | |
async def query(self, question: str) -> str: | |
# TODO | |
pass | |
if __name__ == "__main__": | |
print("---__main__---") | |