Spaces:
Sleeping
Sleeping
File size: 3,504 Bytes
f15d6bc 30944a6 f15d6bc 30944a6 736ad28 30944a6 4f3dd54 30944a6 4f3dd54 89d8a0d 4f3dd54 30944a6 2946830 9102dbe 30944a6 3deb4d8 30944a6 5deb7d1 30944a6 1f75af9 30944a6 c9eb90f 30944a6 f15d6bc 30944a6 f15d6bc 283ad74 4f3dd54 283ad74 e634f59 1e6d15d 736ad28 1e6d15d 9edff99 52f0457 1e6d15d f15d6bc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
import re
from langgraph.prebuilt import create_react_agent
from agent_util import Agent_Util
from prompts import *
from tools import *
from langgraph_supervisor import create_supervisor
from langchain.chat_models import init_chat_model
from langchain_core.messages import AIMessage
import glob
class Agent:
def __init__(self):
print("Initializing Agent....")
print("**************************************************************************************")
print('........ Versão: Ajuste modelos e temperatura .....')
print("**************************************************************************************")
print("--> Audio Agent")
self.audio_agent = create_react_agent(
model=init_chat_model(GPT_4_0_MINI_MODEL),
tools=[extract_text_from_url_tool, extract_text_from_file_tool, clean_ingredient_measure_tool],
prompt= AUDIO_AGENT_PROMPT,
name="audio_agent",
)
print("--> Web Search Agent")
self.web_search_agent = create_react_agent(
model=init_chat_model(GPT_4_1_MINI_MODEL),
tools=[search_web_tool],
prompt= WEB_SEARCH_AGENT_PROMPT,
name="web_research_agent",
)
print("--> Supervisor")
self.supervisor = create_supervisor(
model=init_chat_model(GPT_4_0_MINI_MODEL, temperature=0),
agents=[self.web_search_agent, self.audio_agent],
tools=[bird_video_count_tool,chess_image_to_fen_tool,chess_fen_get_best_next_move_tool,
get_excel_columns_tool, calculate_excel_sum_by_columns_tool,execute_python_code_tool,
text_inverter_tool, check_table_commutativity_tool, filter_vegetables_from_list_tool],
prompt= SUPERVISOR_PROMPT,
add_handoff_back_messages=True,
output_mode="last_message",
).compile()
print("Agent initialized.")
def __call__(self, question: str, task_id: str, task_file_name: str) -> str:
print(f"Agent received question({task_id}) (first 50 chars): {question[:50]}...")
file_prefix = ""
if task_file_name:
print(f"Task com arquivo {task_file_name}")
File_Util.baixa_arquivo_task(task_file_name)
file_prefix = f"File: {task_file_name} . "
# Chamando sem stream
response = self.supervisor.invoke(
{
"messages": [
{
"role": "user",
"content": f"{file_prefix}{question}",
}
]
}
)
print(f"Resposta LLM: {response}")
# Extrair o conteúdo das mensagens do tipo AIMessage
final_content = ""
for m in reversed(response['messages']):
print(f"buscando resposta em {m.content}")
if isinstance(m, AIMessage):
print('AI Message')
if "FINAL ANSWER:" in m.content.upper():
print("Tem Final Answer")
final_content = m.content
break
# Extrair o valor final
print(f"Final Content: {final_content}")
match = re.search(r"(?i)FINAL ANSWER:\s*(.*)", final_content)
final_answer = match.group(1).strip() if match else ""
print("Final Answer:", final_answer)
return final_answer |