File size: 4,123 Bytes
f15d6bc
30944a6
f15d6bc
30944a6
 
 
 
 
 
 
 
 
 
 
 
 
5879225
30944a6
 
 
 
 
 
 
5879225
30944a6
 
 
 
 
 
 
5879225
30944a6
 
 
 
 
 
 
 
 
f15d6bc
30944a6
f15d6bc
 
283ad74
f15d6bc
30944a6
f15d6bc
 
 
 
 
30944a6
f15d6bc
 
 
 
 
 
 
 
 
 
 
44a5fa9
f15d6bc
46540c8
44a5fa9
46540c8
 
f15d6bc
 
 
e7d04f7
283ad74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5879225
283ad74
5879225
283ad74
 
 
f15d6bc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import re
from langgraph.prebuilt import create_react_agent
from agent_util import Agent_Util
from prompts import *
from tools import *
from langgraph_supervisor import create_supervisor
from langchain.chat_models import init_chat_model

import glob

class Agent:
    def __init__(self):
        print("Initializing Agent....")
        
        print("--> Audio Agent")
        self.audio_agent = create_react_agent(
            model=init_chat_model("openai:gpt-4o-mini", temperature=0),
            tools=[extract_text_from_url_tool, extract_text_from_file_tool],
            prompt= AUDIO_AGENT_PROMPT,
            name="audio_agent",
        )
        
        print("--> Web Search Agent")
        self.web_search_agent = create_react_agent(
            model=init_chat_model("openai:gpt-4o-mini", temperature=0),
            tools=[search_web_tool],
            prompt= WEB_SEARCH_AGENT_PROMPT,
            name="web_research_agent",
        )
        
        print("--> Supervisor")
        self.supervisor = create_supervisor(
            model=init_chat_model("openai:gpt-4o-mini", temperature=0),
            agents=[self.web_search_agent, self.audio_agent],
            tools=[bird_video_count_tool,chess_image_to_fen_tool,chess_fen_get_best_next_move_tool,
                get_excel_columns_tool, calculate_excel_sum_by_columns_tool,execute_python_code_tool,
                text_inverter_tool, check_table_commutativity_tool],
            prompt= SUPERVISOR_PROMPT,
            add_handoff_back_messages=True,
            output_mode="full_history",
        ).compile()
        
        print("Agent initialized.")     
        
    
    
    def _call_antiga(self, question: str, task_id: str, task_file_name: str) -> str:
        print(f"Agent received question({task_id}) (first 50 chars): {question[:50]}...")
        
        file_prefix = ""
        if task_file_name:
            print(f"Task com arquivo {task_file_name}")
            File_Util.baixa_arquivo_task(task_file_name)
            file_prefix = f"File: {task_file_name}  . "
        
        for chunk in self.supervisor.stream(
            {
                "messages": [
                    {
                        "role": "user",
                        "content": f"{file_prefix}{question}",
                    }
                ]
            },
        ):
            Agent_Util.pretty_print_messages(chunk, last_message=True)
            final_chunk = chunk

        print("Extraindo a resposta do agente")
        agent_answer = final_chunk["supervisor"]["messages"]
        print(f"resposta: {agent_answer}")
        
                
        final_answer = re.sub(r"^FINAL ANSWER:\s*", "", agent_answer, flags=re.IGNORECASE)
        print(f"Agent returning answer for task {task_id}: {final_answer}")
        
        return final_answer
    
    
    def __call__(self, question: str, task_id: str, task_file_name: str) -> str:
        print(f"Agent (nova forma de invocar) received question({task_id}) (first 50 chars): {question[:50]}...")
    
        file_prefix = ""
        if task_file_name:
            print(f"Task com arquivo {task_file_name}")
            File_Util.baixa_arquivo_task(task_file_name)
            file_prefix = f"File: {task_file_name}  . "

        # Chamando sem stream
        response = self.supervisor.invoke(
            {
                "messages": [
                    {
                        "role": "user",
                        "content": f"{file_prefix}{question}",
                    }
                ]
            }
        )

        messages = response.get("supervisor", {}).get("messages", [])
        if not messages:
            print(f"Nenhuma mensagem retornada para task {task_id}.")
            return "Desculpe, não houve resposta."

        print("Extraindo last_message")
        last_msg = messages[-1]["content"]
        print(f"Last Message: {last_msg}")
        final_answer = re.sub(r"^FINAL ANSWER:\s*", "", last_msg.strip(), flags=re.IGNORECASE)
        print(f"Agent returning answer for task {task_id}: {final_answer}")
        
        return final_answer