broadfield-dev commited on
Commit
db2c3fd
·
verified ·
1 Parent(s): 7e1df79

Update tools/orchestrator.py

Browse files
Files changed (1) hide show
  1. tools/orchestrator.py +35 -44
tools/orchestrator.py CHANGED
@@ -7,7 +7,7 @@ import time
7
  from model_logic import call_model_stream, MODELS_BY_PROVIDER, get_default_model_display_name_for_provider
8
  from memory_logic import retrieve_memories_semantic, retrieve_rules_semantic
9
  from tools.websearch import search_and_scrape_duckduckgo, scrape_url
10
- from tools.space_builder import create_huggingface_space, update_huggingface_space_file
11
  import prompts
12
  from utils import format_insights_for_prompt
13
 
@@ -26,7 +26,7 @@ def decide_on_tool(user_input: str, chat_history_for_prompt: list, initial_insig
26
  if url_match:
27
  return "scrape_url_and_report", {"url": url_match.group(1)}
28
 
29
- tool_trigger_keywords = ["what is", "how to", "explain", "search for", "build", "create", "make", "update", "modify", "change", "fix"]
30
  if len(user_input.split()) > 3 or "?" in user_input or any(w in user_input_lower for w in tool_trigger_keywords):
31
  history_snippet = "\n".join([f"{msg['role']}: {msg['content'][:100]}" for msg in chat_history_for_prompt[-2:]])
32
  guideline_snippet = initial_insights_ctx_str[:200].replace('\n', ' ')
@@ -40,11 +40,7 @@ def decide_on_tool(user_input: str, chat_history_for_prompt: list, initial_insig
40
  json_match_tool = re.search(r"\{.*\}", tool_resp_raw, re.DOTALL)
41
  if json_match_tool:
42
  action_data = json.loads(json_match_tool.group(0))
43
- action_type = action_data.get("action", "quick_respond")
44
- action_input = action_data.get("action_input", {})
45
- if not isinstance(action_input, dict):
46
- action_input = {}
47
- return action_type, action_input
48
  except Exception as e:
49
  logger.error(f"Tool decision LLM error: {e}")
50
 
@@ -71,56 +67,53 @@ def orchestrate_and_respond(user_input: str, provider_name: str, model_display_n
71
  params = ["owner", "space_name", "sdk", "description"]
72
  if all(p in action_input for p in params):
73
  yield "status", "[Tool: Generating space content...]"
74
- description = action_input["description"]
75
- owner = action_input["owner"]
76
- space_name = action_input["space_name"]
77
- sdk = action_input["sdk"]
78
-
79
- space_gen_user_prompt = prompts.get_space_generation_user_prompt(description, owner, space_name)
80
  space_gen_messages = [
81
  {"role": "system", "content": prompts.SPACE_GENERATION_SYSTEM_PROMPT},
82
- {"role": "user", "content": space_gen_user_prompt}
83
  ]
84
-
85
- markdown_content = "".join(list(call_model_stream(
86
- provider=provider_name, model_display_name=model_display_name,
87
- messages=space_gen_messages, api_key_override=ui_api_key_override,
88
- temperature=0.1, max_tokens=4096
89
- )))
90
-
91
- yield "status", "[Tool: Creating Space with generated content...]"
92
-
93
- result = create_huggingface_space(
94
- owner=owner, space_name=space_name, sdk=sdk,
95
- markdown_content=markdown_content.strip()
96
- )
97
- context_str = f"Tool Result (Create Space): {result.get('result') or result.get('error', 'Unknown outcome from tool.')}"
 
 
98
  else:
99
- context_str = "Tool Failed: Missing required parameters for create_huggingface_space. Required: " + ", ".join(params)
 
 
 
 
 
 
 
 
 
 
 
100
  elif action_type == "update_huggingface_space_file":
101
- yield "status", "[Tool: Updating file...]"
102
  params = ["owner", "space_name", "file_path", "new_content", "commit_message"]
103
  if all(p in action_input for p in params):
 
104
  result = update_huggingface_space_file(**action_input)
105
- context_str = f"Tool Result (Update File): {result.get('result') or result.get('error', 'Unknown outcome from tool.')}"
106
  else:
107
- context_str = "Tool Failed: Missing required parameters for update_huggingface_space_file. Required: " + ", ".join(params)
108
  elif action_type == "search_duckduckgo_and_report" and WEB_SEARCH_ENABLED:
109
  query = action_input.get("search_engine_query")
110
  if query:
111
  yield "status", f"[Web: '{query[:60]}'...]"
112
  results = search_and_scrape_duckduckgo(query, num_results=2)
113
  context_str = "Web Content:\n" + "\n".join([f"Source {i+1} ({r.get('url','N/A')}):\n{r.get('content', r.get('error', 'N/A'))[:3000]}\n---" for i, r in enumerate(results)])
114
- else:
115
- context_str = "Tool Failed: Missing 'search_engine_query' for web search."
116
- elif action_type == "scrape_url_and_report" and WEB_SEARCH_ENABLED:
117
- url = action_input.get("url")
118
- if url:
119
- yield "status", f"[Web: '{url[:60]}'...]"
120
- result = scrape_url(url)
121
- context_str = f"Web Content for {url}:\n{result.get('content', result.get('error', 'No content scraped.'))}"
122
- else:
123
- context_str = "Tool Failed: Missing 'url' for scraping."
124
  elif action_type == "answer_using_conversation_memory":
125
  yield "status", "[Searching conversation memory...]"
126
  mems = retrieve_memories_semantic(f"User query: {user_input}\nContext:\n{history_str_for_prompt[-1000:]}", k=2)
@@ -132,8 +125,6 @@ def orchestrate_and_respond(user_input: str, provider_name: str, model_display_n
132
  streamed_response = ""
133
  try:
134
  for chunk in call_model_stream(provider_name, model_display_name, final_llm_messages, ui_api_key_override, max_tokens=4096):
135
- if isinstance(chunk, str) and chunk.startswith("Error:"):
136
- streamed_response += f"\n{chunk}\n"; yield "response_chunk", f"\n{chunk}\n"; break
137
  streamed_response += chunk
138
  yield "response_chunk", chunk
139
  except Exception as e:
 
7
  from model_logic import call_model_stream, MODELS_BY_PROVIDER, get_default_model_display_name_for_provider
8
  from memory_logic import retrieve_memories_semantic, retrieve_rules_semantic
9
  from tools.websearch import search_and_scrape_duckduckgo, scrape_url
10
+ from tools.space_builder import create_huggingface_space, update_huggingface_space_file, list_space_files, get_space_file_content
11
  import prompts
12
  from utils import format_insights_for_prompt
13
 
 
26
  if url_match:
27
  return "scrape_url_and_report", {"url": url_match.group(1)}
28
 
29
+ tool_trigger_keywords = ["search", "build", "create", "make", "update", "modify", "change", "fix", "list", "show", "files", "get", "read", "edit"]
30
  if len(user_input.split()) > 3 or "?" in user_input or any(w in user_input_lower for w in tool_trigger_keywords):
31
  history_snippet = "\n".join([f"{msg['role']}: {msg['content'][:100]}" for msg in chat_history_for_prompt[-2:]])
32
  guideline_snippet = initial_insights_ctx_str[:200].replace('\n', ' ')
 
40
  json_match_tool = re.search(r"\{.*\}", tool_resp_raw, re.DOTALL)
41
  if json_match_tool:
42
  action_data = json.loads(json_match_tool.group(0))
43
+ return action_data.get("action", "quick_respond"), action_data.get("action_input", {})
 
 
 
 
44
  except Exception as e:
45
  logger.error(f"Tool decision LLM error: {e}")
46
 
 
67
  params = ["owner", "space_name", "sdk", "description"]
68
  if all(p in action_input for p in params):
69
  yield "status", "[Tool: Generating space content...]"
 
 
 
 
 
 
70
  space_gen_messages = [
71
  {"role": "system", "content": prompts.SPACE_GENERATION_SYSTEM_PROMPT},
72
+ {"role": "user", "content": prompts.get_space_generation_user_prompt(**action_input)}
73
  ]
74
+ markdown_content = "".join(list(call_model_stream(provider_name, model_display_name, space_gen_messages, ui_api_key_override, 0.1, 4096)))
75
+ yield "status", "[Tool: Creating Space...]"
76
+ result = create_huggingface_space(markdown_content=markdown_content.strip(), **action_input)
77
+ context_str = f"Tool Result (Create Space): {result.get('result') or result.get('error', 'Unknown outcome')}"
78
+ else:
79
+ context_str = "Tool Failed: Missing parameters for create_huggingface_space. Required: " + ", ".join(params)
80
+ elif action_type == "list_space_files":
81
+ params = ["owner", "space_name"]
82
+ if all(p in action_input for p in params):
83
+ yield "status", "[Tool: Listing files...]"
84
+ result = list_space_files(**action_input)
85
+ if "error" in result:
86
+ context_str = f"Tool Result (List Files): Error - {result['error']}"
87
+ else:
88
+ files_str = "\n- ".join(result.get("files", []))
89
+ context_str = f"Tool Result (List Files):\nStatus: {result.get('status', 'OK')}\nFiles:\n- {files_str}"
90
  else:
91
+ context_str = "Tool Failed: Missing parameters for list_space_files. Required: " + ", ".join(params)
92
+ elif action_type == "get_space_file_content":
93
+ params = ["owner", "space_name", "file_path"]
94
+ if all(p in action_input for p in params):
95
+ yield "status", "[Tool: Reading file content...]"
96
+ result = get_space_file_content(**action_input)
97
+ if "error" in result:
98
+ context_str = f"Tool Result (Get Content): Error - {result['error']}"
99
+ else:
100
+ context_str = f"Tool Result (Get Content for '{action_input['file_path']}'):\nStatus: {result.get('status', 'OK')}\n```\n{result.get('content', '')}\n```"
101
+ else:
102
+ context_str = "Tool Failed: Missing parameters for get_space_file_content. Required: " + ", ".join(params)
103
  elif action_type == "update_huggingface_space_file":
 
104
  params = ["owner", "space_name", "file_path", "new_content", "commit_message"]
105
  if all(p in action_input for p in params):
106
+ yield "status", "[Tool: Updating file...]"
107
  result = update_huggingface_space_file(**action_input)
108
+ context_str = f"Tool Result (Update File): {result.get('result') or result.get('error', 'Unknown outcome')}"
109
  else:
110
+ context_str = "Tool Failed: Missing parameters for update_huggingface_space_file. Required: " + ", ".join(params)
111
  elif action_type == "search_duckduckgo_and_report" and WEB_SEARCH_ENABLED:
112
  query = action_input.get("search_engine_query")
113
  if query:
114
  yield "status", f"[Web: '{query[:60]}'...]"
115
  results = search_and_scrape_duckduckgo(query, num_results=2)
116
  context_str = "Web Content:\n" + "\n".join([f"Source {i+1} ({r.get('url','N/A')}):\n{r.get('content', r.get('error', 'N/A'))[:3000]}\n---" for i, r in enumerate(results)])
 
 
 
 
 
 
 
 
 
 
117
  elif action_type == "answer_using_conversation_memory":
118
  yield "status", "[Searching conversation memory...]"
119
  mems = retrieve_memories_semantic(f"User query: {user_input}\nContext:\n{history_str_for_prompt[-1000:]}", k=2)
 
125
  streamed_response = ""
126
  try:
127
  for chunk in call_model_stream(provider_name, model_display_name, final_llm_messages, ui_api_key_override, max_tokens=4096):
 
 
128
  streamed_response += chunk
129
  yield "response_chunk", chunk
130
  except Exception as e: