Spaces:
Runtime error
Runtime error
Delete agent.py with huggingface_hub
Browse files
agent.py
DELETED
@@ -1,206 +0,0 @@
|
|
1 |
-
from cerebrum.llm.apis import llm_chat, llm_call_tool, llm_chat_with_json_output
|
2 |
-
from cerebrum.interface import AutoTool
|
3 |
-
import os
|
4 |
-
import json
|
5 |
-
|
6 |
-
def get_config():
|
7 |
-
from cerebrum.config.config_manager import config
|
8 |
-
return config
|
9 |
-
config = get_config()
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
aios_kernel_url = config.get_kernel_url()
|
14 |
-
|
15 |
-
class DemoAgent:
|
16 |
-
def __init__(self, agent_name):
|
17 |
-
self.agent_name = agent_name
|
18 |
-
self.config = self.load_config()
|
19 |
-
self.tools = [
|
20 |
-
tool.get_tool_call_format()
|
21 |
-
for tool in AutoTool.from_batch_preloaded(self.config["tools"])
|
22 |
-
]
|
23 |
-
|
24 |
-
self.plan_max_fail_times = 3
|
25 |
-
self.tool_call_max_fail_times = 3
|
26 |
-
|
27 |
-
self.start_time = None
|
28 |
-
self.end_time = None
|
29 |
-
self.request_waiting_times: list = []
|
30 |
-
self.request_turnaround_times: list = []
|
31 |
-
self.messages = []
|
32 |
-
self.workflow_mode = "manual" # (manual, automatic)
|
33 |
-
self.rounds = 0
|
34 |
-
|
35 |
-
def load_config(self):
|
36 |
-
script_path = os.path.abspath(__file__)
|
37 |
-
script_dir = os.path.dirname(script_path)
|
38 |
-
config_file = os.path.join(script_dir, "config.json")
|
39 |
-
|
40 |
-
with open(config_file, "r") as f:
|
41 |
-
config = json.load(f)
|
42 |
-
return config
|
43 |
-
|
44 |
-
def pre_select_tools(self, tool_names):
|
45 |
-
pre_selected_tools = []
|
46 |
-
for tool_name in tool_names:
|
47 |
-
for tool in self.tools:
|
48 |
-
if tool["function"]["name"] == tool_name:
|
49 |
-
pre_selected_tools.append(tool)
|
50 |
-
break
|
51 |
-
return pre_selected_tools
|
52 |
-
|
53 |
-
def build_system_instruction(self):
|
54 |
-
prefix = "".join(["".join(self.config["description"])])
|
55 |
-
|
56 |
-
plan_instruction = "".join(
|
57 |
-
[
|
58 |
-
f"You are given the available tools from the tool list: {json.dumps(self.tools)} to help you solve problems. ",
|
59 |
-
"Generate a plan with comprehensive yet minimal steps to fulfill the task. ",
|
60 |
-
"The plan must follow the json format as below: ",
|
61 |
-
"[",
|
62 |
-
'{"action_type": "action_type_value", "action": "action_value","tool_use": [tool_name1, tool_name2,...]}',
|
63 |
-
'{"action_type": "action_type_value", "action": "action_value", "tool_use": [tool_name1, tool_name2,...]}',
|
64 |
-
"...",
|
65 |
-
"]",
|
66 |
-
"In each step of the planned plan, identify tools to use and recognize no tool is necessary. ",
|
67 |
-
"Followings are some plan examples. ",
|
68 |
-
"[" "[",
|
69 |
-
'{"action_type": "tool_use", "action": "gather information from arxiv. ", "tool_use": ["arxiv"]},',
|
70 |
-
'{"action_type": "chat", "action": "write a summarization based on the gathered information. ", "tool_use": []}',
|
71 |
-
"];",
|
72 |
-
"[",
|
73 |
-
'{"action_type": "tool_use", "action": "gather information from arxiv. ", "tool_use": ["arxiv"]},',
|
74 |
-
'{"action_type": "chat", "action": "understand the current methods and propose ideas that can improve ", "tool_use": []}',
|
75 |
-
"]",
|
76 |
-
"]",
|
77 |
-
]
|
78 |
-
)
|
79 |
-
|
80 |
-
if self.workflow_mode == "manual":
|
81 |
-
self.messages.append({"role": "system", "content": prefix})
|
82 |
-
|
83 |
-
else:
|
84 |
-
assert self.workflow_mode == "automatic"
|
85 |
-
self.messages.append({"role": "system", "content": prefix})
|
86 |
-
self.messages.append({"role": "user", "content": plan_instruction})
|
87 |
-
|
88 |
-
def automatic_workflow(self):
|
89 |
-
for i in range(self.plan_max_fail_times):
|
90 |
-
response = llm_chat_with_json_output(
|
91 |
-
messages=self.messages,
|
92 |
-
message_return_type="json"
|
93 |
-
)["response"]["response_message"]
|
94 |
-
|
95 |
-
try:
|
96 |
-
workflow = json.loads(response)
|
97 |
-
except:
|
98 |
-
workflow = None
|
99 |
-
|
100 |
-
self.rounds += 1
|
101 |
-
|
102 |
-
if workflow:
|
103 |
-
return workflow
|
104 |
-
|
105 |
-
else:
|
106 |
-
self.messages.append(
|
107 |
-
{
|
108 |
-
"role": "assistant",
|
109 |
-
"content": f"Fail {i+1} times to generate a valid plan. I need to regenerate a plan",
|
110 |
-
}
|
111 |
-
)
|
112 |
-
return None
|
113 |
-
|
114 |
-
def manual_workflow(self):
|
115 |
-
workflow = [
|
116 |
-
{
|
117 |
-
"action_type": "call_tool",
|
118 |
-
"action": "Search for relevant papers",
|
119 |
-
"tool_use": ["demo_author/arxiv"],
|
120 |
-
},
|
121 |
-
{
|
122 |
-
"action_type": "chat",
|
123 |
-
"action": "Provide responses based on the user's query",
|
124 |
-
"tool_use": [],
|
125 |
-
},
|
126 |
-
]
|
127 |
-
return workflow
|
128 |
-
|
129 |
-
def run(self, task_input):
|
130 |
-
self.build_system_instruction()
|
131 |
-
|
132 |
-
self.messages.append({"role": "user", "content": task_input})
|
133 |
-
|
134 |
-
workflow = None
|
135 |
-
|
136 |
-
if self.workflow_mode == "automatic":
|
137 |
-
workflow = self.automatic_workflow()
|
138 |
-
self.messages = self.messages[:1] # clear long context
|
139 |
-
|
140 |
-
else:
|
141 |
-
assert self.workflow_mode == "manual"
|
142 |
-
workflow = self.manual_workflow()
|
143 |
-
|
144 |
-
self.messages.append(
|
145 |
-
{
|
146 |
-
"role": "user",
|
147 |
-
"content": f"[Thinking]: The workflow generated for the problem is {json.dumps(workflow)}. Follow the workflow to solve the problem step by step. ",
|
148 |
-
}
|
149 |
-
)
|
150 |
-
|
151 |
-
try:
|
152 |
-
if workflow:
|
153 |
-
final_result = ""
|
154 |
-
|
155 |
-
for i, step in enumerate(workflow):
|
156 |
-
action_type = step["action_type"]
|
157 |
-
action = step["action"]
|
158 |
-
tool_use = step["tool_use"]
|
159 |
-
|
160 |
-
prompt = f"At step {i + 1}, you need to: {action}. "
|
161 |
-
self.messages.append({"role": "user", "content": prompt})
|
162 |
-
|
163 |
-
if tool_use:
|
164 |
-
selected_tools = self.pre_select_tools(tool_use)
|
165 |
-
|
166 |
-
else:
|
167 |
-
selected_tools = None
|
168 |
-
|
169 |
-
if action_type == "call_tool":
|
170 |
-
response = llm_call_tool(
|
171 |
-
agent_name=self.agent_name,
|
172 |
-
messages=self.messages,
|
173 |
-
tools=selected_tools,
|
174 |
-
base_url=aios_kernel_url
|
175 |
-
)["response"]
|
176 |
-
else:
|
177 |
-
response = llm_chat(
|
178 |
-
agent_name=self.agent_name,
|
179 |
-
messages=self.messages,
|
180 |
-
base_url=aios_kernel_url
|
181 |
-
)["response"]
|
182 |
-
|
183 |
-
self.messages.append({"role": "assistant", "content": response["response_message"]})
|
184 |
-
|
185 |
-
self.rounds += 1
|
186 |
-
|
187 |
-
|
188 |
-
final_result = self.messages[-1]["content"]
|
189 |
-
|
190 |
-
return {
|
191 |
-
"agent_name": self.agent_name,
|
192 |
-
"result": final_result,
|
193 |
-
"rounds": self.rounds,
|
194 |
-
}
|
195 |
-
|
196 |
-
else:
|
197 |
-
return {
|
198 |
-
"agent_name": self.agent_name,
|
199 |
-
"result": "Failed to generate a valid workflow in the given times.",
|
200 |
-
"rounds": self.rounds,
|
201 |
-
|
202 |
-
}
|
203 |
-
|
204 |
-
except Exception as e:
|
205 |
-
|
206 |
-
return {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|