gdms's picture
4_1 no web search
3deb4d8
import re
from langgraph.prebuilt import create_react_agent
from agent_util import Agent_Util
from prompts import *
from tools import *
from langgraph_supervisor import create_supervisor
from langchain.chat_models import init_chat_model
from langchain_core.messages import AIMessage
import glob
class Agent:
def __init__(self):
print("Initializing Agent....")
print("**************************************************************************************")
print('........ Versão: Ajuste modelos e temperatura .....')
print("**************************************************************************************")
print("--> Audio Agent")
self.audio_agent = create_react_agent(
model=init_chat_model(GPT_4_0_MINI_MODEL),
tools=[extract_text_from_url_tool, extract_text_from_file_tool, clean_ingredient_measure_tool],
prompt= AUDIO_AGENT_PROMPT,
name="audio_agent",
)
print("--> Web Search Agent")
self.web_search_agent = create_react_agent(
model=init_chat_model(GPT_4_1_MINI_MODEL),
tools=[search_web_tool],
prompt= WEB_SEARCH_AGENT_PROMPT,
name="web_research_agent",
)
print("--> Supervisor")
self.supervisor = create_supervisor(
model=init_chat_model(GPT_4_0_MINI_MODEL, temperature=0),
agents=[self.web_search_agent, self.audio_agent],
tools=[bird_video_count_tool,chess_image_to_fen_tool,chess_fen_get_best_next_move_tool,
get_excel_columns_tool, calculate_excel_sum_by_columns_tool,execute_python_code_tool,
text_inverter_tool, check_table_commutativity_tool, filter_vegetables_from_list_tool],
prompt= SUPERVISOR_PROMPT,
add_handoff_back_messages=True,
output_mode="last_message",
).compile()
print("Agent initialized.")
def __call__(self, question: str, task_id: str, task_file_name: str) -> str:
print(f"Agent received question({task_id}) (first 50 chars): {question[:50]}...")
file_prefix = ""
if task_file_name:
print(f"Task com arquivo {task_file_name}")
File_Util.baixa_arquivo_task(task_file_name)
file_prefix = f"File: {task_file_name} . "
# Chamando sem stream
response = self.supervisor.invoke(
{
"messages": [
{
"role": "user",
"content": f"{file_prefix}{question}",
}
]
}
)
print(f"Resposta LLM: {response}")
# Extrair o conteúdo das mensagens do tipo AIMessage
final_content = ""
for m in reversed(response['messages']):
print(f"buscando resposta em {m.content}")
if isinstance(m, AIMessage):
print('AI Message')
if "FINAL ANSWER:" in m.content.upper():
print("Tem Final Answer")
final_content = m.content
break
# Extrair o valor final
print(f"Final Content: {final_content}")
match = re.search(r"(?i)FINAL ANSWER:\s*(.*)", final_content)
final_answer = match.group(1).strip() if match else ""
print("Final Answer:", final_answer)
return final_answer