matuteiglesias commited on
Commit
095ea7c
·
verified ·
1 Parent(s): 60f0872

🆕 Fresh upload after wipe

Browse files
.huggingface.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # .huggingface.yaml
2
+ title: Demo Agent - Research Assistant
3
+ sdk: gradio
4
+ app_file: app.py
5
+ tags:
6
+ - academic
7
+ - LLM
8
+ - planning
9
+ - agent
10
+ license: cc0
README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ 🌀 Refreshing build...
__pycache__/agent.cpython-310.pyc ADDED
Binary file (6.44 kB). View file
 
__pycache__/agent.cpython-311.pyc ADDED
Binary file (8.68 kB). View file
 
agent.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import json
4
+ import traceback
5
+
6
+ import sys
7
+ from pathlib import Path
8
+
9
+ cerebrum_path = Path.home() / "repos" / "terra" / "src" / "cerebrum"
10
+ sys.path.insert(0, str(cerebrum_path))
11
+
12
+
13
+ REMOTE = os.getenv("HF_SPACE") == "true"
14
+ CEREBRUM_AVAILABLE = False
15
+
16
+ try:
17
+ from aios.llm.apis import llm_chat, llm_call_tool, llm_chat_with_json_output
18
+ from aios.interface import AutoTool
19
+ from aios.config.config_manager import config as aios_config
20
+ CEREBRUM_AVAILABLE = True
21
+ print("✅ Loaded aios-agent-sdk (Cerebrum) successfully.")
22
+ except ImportError:
23
+ print("⚠️ cerebrum (aios-agent-sdk) not found. Falling back to mock mode.")
24
+ def llm_chat(*args, **kwargs):
25
+ return {"response": {"response_message": "[MOCK] Chat reply"}}
26
+ def llm_call_tool(*args, **kwargs):
27
+ return {"response": {"response_message": "[MOCK] Tool reply"}}
28
+ def llm_chat_with_json_output(*args, **kwargs):
29
+ return {"response": {"response_message": '[{"action_type": "chat", "action": "Mock Plan", "tool_use": []}]'}}
30
+ # class AutoTool:
31
+ # @staticmethod
32
+ # def from_batch_preloaded(tools):
33
+ # return []
34
+
35
+
36
+
37
+
38
+ from cerebrum.llm.apis import llm_chat, llm_call_tool, llm_chat_with_json_output
39
+ from cerebrum.interface import AutoTool
40
+ import os
41
+ import json
42
+
43
+ def get_config():
44
+ from cerebrum.config.config_manager import config
45
+ return config
46
+ config = get_config()
47
+
48
+
49
+
50
+ aios_kernel_url = config.get_kernel_url()
51
+
52
+ class DemoAgent:
53
+ def __init__(self, agent_name):
54
+ self.agent_name = agent_name
55
+ self.config = self.load_config()
56
+ self.tools = [
57
+ tool.get_tool_call_format()
58
+ for tool in AutoTool.from_batch_preloaded(self.config["tools"])
59
+ ]
60
+
61
+ self.plan_max_fail_times = 3
62
+ self.tool_call_max_fail_times = 3
63
+
64
+ self.start_time = None
65
+ self.end_time = None
66
+ self.request_waiting_times: list = []
67
+ self.request_turnaround_times: list = []
68
+ self.messages = []
69
+ self.workflow_mode = "manual" # (manual, automatic)
70
+ self.rounds = 0
71
+
72
+ def load_config(self):
73
+ script_path = os.path.abspath(__file__)
74
+ script_dir = os.path.dirname(script_path)
75
+ config_file = os.path.join(script_dir, "config.json")
76
+
77
+ with open(config_file, "r") as f:
78
+ config = json.load(f)
79
+ return config
80
+
81
+ def pre_select_tools(self, tool_names):
82
+ pre_selected_tools = []
83
+ for tool_name in tool_names:
84
+ for tool in self.tools:
85
+ if tool["function"]["name"] == tool_name:
86
+ pre_selected_tools.append(tool)
87
+ break
88
+ return pre_selected_tools
89
+
90
+ def build_system_instruction(self):
91
+ prefix = "".join(["".join(self.config["description"])])
92
+
93
+ plan_instruction = "".join(
94
+ [
95
+ f"You are given the available tools from the tool list: {json.dumps(self.tools)} to help you solve problems. ",
96
+ "Generate a plan with comprehensive yet minimal steps to fulfill the task. ",
97
+ "The plan must follow the json format as below: ",
98
+ "[",
99
+ '{"action_type": "action_type_value", "action": "action_value","tool_use": [tool_name1, tool_name2,...]}',
100
+ '{"action_type": "action_type_value", "action": "action_value", "tool_use": [tool_name1, tool_name2,...]}',
101
+ "...",
102
+ "]",
103
+ "In each step of the planned plan, identify tools to use and recognize no tool is necessary. ",
104
+ "Followings are some plan examples. ",
105
+ "[" "[",
106
+ '{"action_type": "tool_use", "action": "gather information from arxiv. ", "tool_use": ["arxiv"]},',
107
+ '{"action_type": "chat", "action": "write a summarization based on the gathered information. ", "tool_use": []}',
108
+ "];",
109
+ "[",
110
+ '{"action_type": "tool_use", "action": "gather information from arxiv. ", "tool_use": ["arxiv"]},',
111
+ '{"action_type": "chat", "action": "understand the current methods and propose ideas that can improve ", "tool_use": []}',
112
+ "]",
113
+ "]",
114
+ ]
115
+ )
116
+
117
+ if self.workflow_mode == "manual":
118
+ self.messages.append({"role": "system", "content": prefix})
119
+
120
+ else:
121
+ assert self.workflow_mode == "automatic"
122
+ self.messages.append({"role": "system", "content": prefix})
123
+ self.messages.append({"role": "user", "content": plan_instruction})
124
+
125
+ def automatic_workflow(self):
126
+ for i in range(self.plan_max_fail_times):
127
+ response = llm_chat_with_json_output(
128
+ messages=self.messages,
129
+ message_return_type="json"
130
+ )["response"]["response_message"]
131
+
132
+ try:
133
+ workflow = json.loads(response)
134
+ except:
135
+ workflow = None
136
+
137
+ self.rounds += 1
138
+
139
+ if workflow:
140
+ return workflow
141
+
142
+ else:
143
+ self.messages.append(
144
+ {
145
+ "role": "assistant",
146
+ "content": f"Fail {i+1} times to generate a valid plan. I need to regenerate a plan",
147
+ }
148
+ )
149
+ return None
150
+
151
+ def manual_workflow(self):
152
+ workflow = [
153
+ {
154
+ "action_type": "call_tool",
155
+ "action": "Search for relevant papers",
156
+ "tool_use": ["demo_author/arxiv"],
157
+ },
158
+ {
159
+ "action_type": "chat",
160
+ "action": "Provide responses based on the user's query",
161
+ "tool_use": [],
162
+ },
163
+ ]
164
+ return workflow
165
+
166
+ def run(self, task_input):
167
+ self.build_system_instruction()
168
+
169
+ self.messages.append({"role": "user", "content": task_input})
170
+
171
+ workflow = None
172
+
173
+ if self.workflow_mode == "automatic":
174
+ workflow = self.automatic_workflow()
175
+ self.messages = self.messages[:1] # clear long context
176
+
177
+ else:
178
+ assert self.workflow_mode == "manual"
179
+ workflow = self.manual_workflow()
180
+
181
+ self.messages.append(
182
+ {
183
+ "role": "user",
184
+ "content": f"[Thinking]: The workflow generated for the problem is {json.dumps(workflow)}. Follow the workflow to solve the problem step by step. ",
185
+ }
186
+ )
187
+
188
+ try:
189
+ if workflow:
190
+ final_result = ""
191
+
192
+ for i, step in enumerate(workflow):
193
+ action_type = step["action_type"]
194
+ action = step["action"]
195
+ tool_use = step["tool_use"]
196
+
197
+ prompt = f"At step {i + 1}, you need to: {action}. "
198
+ self.messages.append({"role": "user", "content": prompt})
199
+
200
+ if tool_use:
201
+ selected_tools = self.pre_select_tools(tool_use)
202
+
203
+ else:
204
+ selected_tools = None
205
+
206
+ if action_type == "call_tool":
207
+ response = llm_call_tool(
208
+ agent_name=self.agent_name,
209
+ messages=self.messages,
210
+ tools=selected_tools,
211
+ base_url=aios_kernel_url
212
+ )["response"]
213
+ else:
214
+ response = llm_chat(
215
+ agent_name=self.agent_name,
216
+ messages=self.messages,
217
+ base_url=aios_kernel_url
218
+ )["response"]
219
+
220
+ self.messages.append({"role": "assistant", "content": response["response_message"]})
221
+
222
+ self.rounds += 1
223
+
224
+
225
+ final_result = self.messages[-1]["content"]
226
+
227
+ return {
228
+ "agent_name": self.agent_name,
229
+ "result": final_result,
230
+ "rounds": self.rounds,
231
+ }
232
+
233
+ else:
234
+ return {
235
+ "agent_name": self.agent_name,
236
+ "result": "Failed to generate a valid workflow in the given times.",
237
+ "rounds": self.rounds,
238
+
239
+ }
240
+
241
+ except Exception as e:
242
+
243
+ return {}
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import gradio as gr
3
+ import os
4
+ import sys
5
+ import platform
6
+ import pkgutil
7
+ import socket
8
+
9
+ print("===== HF Space Environment Diagnostics =====")
10
+ print(f"Platform: {platform.system()} {platform.release()}")
11
+ print(f"Python Version: {sys.version}")
12
+ print(f"Environment Variables:\n{os.environ}")
13
+ print(f"Installed Packages:\n{[p.name for p in pkgutil.iter_modules()]}")
14
+
15
+ try:
16
+ ip = socket.gethostbyname(socket.gethostname())
17
+ print(f"Local IP: {ip}")
18
+ except:
19
+ print("⚠️ Could not determine local IP")
20
+
21
+ print("===== Starting Agent Interface =====")
22
+
23
+ from agent import DemoAgent
24
+
25
+ agent = DemoAgent(agent_name="demo_agent")
26
+
27
+ def run_agent(input_text):
28
+ print(f">>> Received input: {input_text}")
29
+ result = agent.run(input_text)
30
+ print(f"<<< Output: {result}")
31
+ return result.get("result", "⚠️ No result returned.")
32
+
33
+ iface = gr.Interface(
34
+ fn=run_agent,
35
+ inputs=gr.Textbox(lines=5, label="Enter your research question"),
36
+ outputs=gr.Textbox(label="Agent Response"),
37
+ title="Demo Agent: Academic Research Assistant",
38
+ description="Find papers, summarize key findings, and generate research questions."
39
+ )
40
+
41
+ if __name__ == "__main__":
42
+ iface.launch()
config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "demo_agent",
3
+ "description": [
4
+ "You are an academic research assistant. ",
5
+ "Help users find relevant research papers, summarize key findings, and generate potential research questions."
6
+ ],
7
+ "tools": [
8
+ "demo_author/arxiv"
9
+ ],
10
+ "meta": {
11
+ "author": "demo_author",
12
+ "version": "0.0.1",
13
+ "license": "CC0"
14
+ },
15
+ "build": {
16
+ "entry": "agent.py",
17
+ "module": "DemoAgent"
18
+ }
19
+ }
meta_requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ arxiv