Spaces:
Sleeping
Sleeping
from cerebrum.llm.apis import llm_chat, llm_call_tool, llm_chat_with_json_output | |
from cerebrum.interface import AutoTool | |
import json | |
import os | |
def get_config(): | |
from cerebrum.config.config_manager import config | |
return config | |
config = get_config() | |
import traceback | |
aios_kernel_url = config.get_kernel_url() | |
class ChatSupportAgent: | |
def __init__(self, agent_name): | |
self.agent_name = agent_name | |
self.messages = [] | |
self.rounds = 0 | |
self.log_to_board = False | |
self.config = self.load_config() | |
self.tools = [ | |
tool.get_tool_call_format() | |
for tool in AutoTool.from_batch_preloaded(self.config["tools"]) | |
] | |
def load_config(self): | |
script_path = os.path.abspath(__file__) | |
script_dir = os.path.dirname(script_path) | |
config_file = os.path.join(script_dir, "config.json") | |
with open(config_file, "r") as f: | |
config = json.load(f) | |
return config | |
def pre_select_tools(self, tool_names): | |
pre_selected_tools = [] | |
for tool_name in tool_names: | |
for tool in self.tools: | |
if tool["function"]["name"] == tool_name: | |
pre_selected_tools.append(tool) | |
break | |
return pre_selected_tools | |
def build_system_instruction(self): | |
prefix = "".join(["".join(self.config["description"])]) | |
plan_instruction = "".join( | |
[ | |
f"You are given the available tools from the tool list: {json.dumps(self.tools)} to help you solve problems. ", | |
"Generate a plan with comprehensive yet minimal steps to fulfill the task. ", | |
"The plan must follow the json format as below: ", | |
"[", | |
'{"action_type": "action_type_value", "action": "action_value","tool_use": [tool_name1, tool_name2,...]}', | |
'{"action_type": "action_type_value", "action": "action_value", "tool_use": [tool_name1, tool_name2,...]}', | |
"...", | |
"]", | |
"In each step of the planned plan, identify tools to use and recognize no tool is necessary. ", | |
"Followings are some plan examples. ", | |
"[" "[", | |
'{"action_type": "tool_use", "action": "gather information from arxiv. ", "tool_use": ["arxiv"]},', | |
'{"action_type": "chat", "action": "write a summarization based on the gathered information. ", "tool_use": []}', | |
"];", | |
"[", | |
'{"action_type": "tool_use", "action": "gather information from arxiv. ", "tool_use": ["arxiv"]},', | |
'{"action_type": "chat", "action": "understand the current methods and propose ideas that can improve ", "tool_use": []}', | |
"]", | |
"]", | |
] | |
) | |
# if self.workflow_mode == "manual": | |
# self.messages.append({"role": "system", "content": prefix}) | |
# else: | |
# assert self.workflow_mode == "automatic" | |
# self.messages.append({"role": "system", "content": prefix}) | |
# self.messages.append({"role": "user", "content": plan_instruction}) | |
def automatic_workflow(self): | |
for i in range(self.plan_max_fail_times): | |
response = llm_chat_with_json_output(llms=[{"name": "gpt-4o-mini", "backend": "openai"}], | |
messages=self.messages, | |
message_return_type="json" | |
)["response"]["response_message"] | |
try: | |
workflow = json.loads(response) | |
except: | |
workflow = None | |
self.rounds += 1 | |
if workflow: | |
return workflow | |
else: | |
self.messages.append( | |
{ | |
"role": "assistant", | |
"content": f"Fail {i+1} times to generate a valid plan. I need to regenerate a plan", | |
} | |
) | |
return None | |
def manual_workflow(self): | |
workflow = [ | |
{ | |
"action_type": "chat", | |
"action": "identify the user's technical issue or requirement", | |
"tool_use": [] | |
}, | |
{ | |
"action_type": "chat", | |
"action": "search for troubleshooting steps for the identified issue", | |
"tool_use": [] | |
}, | |
{ | |
"action_type": "chat", | |
"action": "organize the above information and summarize the solution", | |
"tool_use": [] | |
} | |
] | |
return workflow | |
def run(self, task_input): | |
try: | |
# 1. System instruction / prompt setup | |
self.build_system_instruction() | |
self.messages.append({"role": "user", "content": task_input}) | |
# 2. Decide workflow | |
workflow = None | |
if getattr(self, "workflow_mode", "manual") == "automatic": | |
workflow = self.automatic_workflow() | |
self.messages = self.messages[:1] # Reset context to avoid token bloat | |
else: | |
workflow = self.manual_workflow() | |
# 3. Append the workflow plan (optional thinking step) | |
self.messages.append({ | |
"role": "user", | |
"content": f"[Thinking]: The workflow generated is {json.dumps(workflow)}. Follow it step by step." | |
}) | |
# 4. Execute the workflow | |
if workflow: | |
final_result = "" | |
for i, step in enumerate(workflow): | |
action_type = step["action_type"] | |
action = step["action"] | |
tool_use = step["tool_use"] | |
# Append step prompt | |
step_prompt = f"Step {i+1}: {action}" | |
self.messages.append({"role": "user", "content": step_prompt}) | |
# Prepare tools (if any) | |
selected_tools = self.pre_select_tools(tool_use) if tool_use else None | |
# Call appropriate LLM method | |
if action_type == "call_tool": | |
response = llm_call_tool( | |
agent_name=self.agent_name, | |
messages=self.messages, | |
llms=[{"name": "gpt-4o-mini", "backend": "openai"}], | |
tools=selected_tools, | |
base_url=aios_kernel_url | |
)["response"] | |
else: | |
response = llm_chat( | |
agent_name=self.agent_name, | |
messages=self.messages, | |
llms=[{"name": "gpt-4o-mini", "backend": "openai"}], | |
base_url=aios_kernel_url | |
)["response"] | |
self.rounds += 1 | |
self.messages.append({"role": "assistant", "content": response.get("response_message", "")}) | |
final_result = self.messages[-1]["content"] | |
return { | |
"agent_name": self.agent_name, | |
"result": final_result.strip(), | |
"rounds": self.rounds, | |
} | |
else: | |
return { | |
"agent_name": self.agent_name, | |
"result": "β οΈ No valid workflow was generated.", | |
"rounds": self.rounds, | |
} | |
except Exception as e: | |
print(f"[Agent Run Error]: {e}") | |
traceback.print_exc() | |
return { | |
"agent_name": self.agent_name, | |
"result": f"β οΈ Exception: {e}", | |
"rounds": self.rounds, | |
} | |
# def run(self, task_input): | |
# try: | |
# self.messages.append({"role": "user", "content": task_input}) | |
# response = llm_chat( | |
# agent_name=self.agent_name, | |
# messages=self.messages, | |
# llms=[{"name": "gpt-4o-mini", "backend": "openai"}], | |
# base_url=aios_kernel_url | |
# )["response"] | |
# self.rounds += 1 | |
# response_message = response.get("response_message", "β οΈ No response.") | |
# return { | |
# "agent_name": self.agent_name, | |
# "result": response_message.strip(), | |
# "rounds": self.rounds | |
# } | |
# except Exception as e: | |
# print(f"Error in run(): {e}") | |
# traceback.print_exc() | |
# return { | |
# "agent_name": self.agent_name, | |
# "result": f"β οΈ Error: {str(e)}", | |
# "rounds": self.rounds | |
# } | |
# def run(self, task_input): | |
# self.messages.append({"role": "user", "content": task_input}) | |
# # Optional: auto-load tools from config | |
# tool_names = config.get("tools", []) # or just hardcode a small list | |
# tools = [ | |
# tool.get_tool_call_format() | |
# for tool in AutoTool.from_batch_preloaded(tool_names) | |
# ] if tool_names else [] | |
# # Run LLM with optional tool calls | |
# result = llm_chat(llms=[{"name": "gpt-4o-mini", "backend": "openai"}], | |
# agent_name=self.agent_name, | |
# messages=self.messages, | |
# base_url=aios_kernel_url | |
# ) | |
# self.rounds += 1 | |
# # # π§ Default to plain response if no tools triggered | |
# # if "response" not in result: | |
# # return {"error": "No response from LLM"} | |
# response_message = result.get("result", None) | |
# if not response_message: | |
# print("error", "No result in result") | |
# tool_calls = result["response"].get("tool_calls") or [] | |
# tool_responses = [] | |
# for call in tool_calls: | |
# tool_name = call["name"] | |
# params = call["parameters"] | |
# tool = AutoTool.from_preloaded(tool_name) | |
# response = tool.run(params) | |
# tool_responses.append((tool_name, response)) | |
# response_message = result["response"].get("response_message", "") | |
# full_result = response_message + "\n\n".join(f"[{name}]: {resp}" for name, resp in tool_responses) | |
# return { | |
# "agent_name": self.agent_name, | |
# "result": full_result.strip(), | |
# "rounds": self.rounds, | |
# } |