Spaces:
Runtime error
Runtime error
Update tools/orchestrator.py
Browse files- tools/orchestrator.py +56 -57
tools/orchestrator.py
CHANGED
@@ -7,7 +7,7 @@ import time
|
|
7 |
from model_logic import call_model_stream, MODELS_BY_PROVIDER, get_default_model_display_name_for_provider
|
8 |
from memory_logic import retrieve_memories_semantic, retrieve_rules_semantic
|
9 |
from tools.websearch import search_and_scrape_duckduckgo, scrape_url
|
10 |
-
from tools.space_builder import
|
11 |
import prompts
|
12 |
from utils import format_insights_for_prompt
|
13 |
|
@@ -23,28 +23,27 @@ def decide_on_tool(user_input: str, chat_history_for_prompt: list, initial_insig
|
|
23 |
|
24 |
if "http://" in user_input or "https://" in user_input:
|
25 |
url_match = re.search(r'(https?://[^\s]+)', user_input)
|
26 |
-
if url_match:
|
|
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
if len(user_input.split()) > 3 or "?" in user_input or any(w in user_input_lower for w in ["what is", "how to", "explain", "search for", "build", "create", "make"]):
|
32 |
history_snippet = "\n".join([f"{msg['role']}: {msg['content'][:100]}" for msg in chat_history_for_prompt[-2:]])
|
33 |
guideline_snippet = initial_insights_ctx_str[:200].replace('\n', ' ')
|
34 |
tool_user_prompt = prompts.get_tool_user_prompt(user_input, history_snippet, guideline_snippet)
|
35 |
-
tool_decision_messages = [{"role":"system", "content": prompts.TOOL_SYSTEM_PROMPT}, {"role":"user", "content": tool_user_prompt}]
|
36 |
-
tool_model_display = next((dn for dn, mid in MODELS_BY_PROVIDER.get(TOOL_DECISION_PROVIDER.lower(), {}).get("models", {}).items() if mid == TOOL_DECISION_MODEL_ID), None)
|
37 |
-
if not tool_model_display: tool_model_display = get_default_model_display_name_for_provider(TOOL_DECISION_PROVIDER)
|
38 |
|
39 |
if tool_model_display:
|
40 |
try:
|
41 |
-
tool_resp_raw = "".join(list(call_model_stream(provider=TOOL_DECISION_PROVIDER, model_display_name=tool_model_display, messages=tool_decision_messages, temperature=0.0, max_tokens=
|
42 |
json_match_tool = re.search(r"\{.*\}", tool_resp_raw, re.DOTALL)
|
43 |
if json_match_tool:
|
44 |
action_data = json.loads(json_match_tool.group(0))
|
45 |
action_type = action_data.get("action", "quick_respond")
|
46 |
action_input = action_data.get("action_input", {})
|
47 |
-
if not isinstance(action_input, dict):
|
|
|
48 |
return action_type, action_input
|
49 |
except Exception as e:
|
50 |
logger.error(f"Tool decision LLM error: {e}")
|
@@ -52,72 +51,72 @@ def decide_on_tool(user_input: str, chat_history_for_prompt: list, initial_insig
|
|
52 |
return "quick_respond", {}
|
53 |
|
54 |
def orchestrate_and_respond(user_input: str, provider_name: str, model_display_name: str, chat_history_for_prompt: list[dict], custom_system_prompt: str = None, ui_api_key_override: str = None):
|
55 |
-
process_start_time = time.time()
|
56 |
request_id = os.urandom(4).hex()
|
57 |
logger.info(f"ORCHESTRATOR [{request_id}] Start. User: '{user_input[:50]}...'")
|
58 |
-
|
59 |
-
history_str_for_prompt = "\n".join([f"{('User' if t['role'] == 'user' else 'AI')}: {t['content']}" for t in chat_history_for_prompt[-(MAX_HISTORY_TURNS * 2):]])
|
60 |
|
61 |
yield "status", "[Checking guidelines...]"
|
62 |
initial_insights = retrieve_rules_semantic(f"{user_input}\n{history_str_for_prompt}", k=5)
|
63 |
initial_insights_ctx_str, parsed_initial_insights_list = format_insights_for_prompt(initial_insights)
|
64 |
|
65 |
yield "status", "[Choosing best approach...]"
|
66 |
-
action_type,
|
67 |
-
logger.info(f"ORCHESTRATOR [{request_id}]: Tool Decision: Action='{action_type}', Input='{
|
68 |
|
69 |
yield "status", f"[Path: {action_type}]"
|
70 |
-
|
71 |
-
context_str
|
72 |
|
73 |
-
if action_type == "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
yield "status", "[Searching conversation memory...]"
|
75 |
mems = retrieve_memories_semantic(f"User query: {user_input}\nContext:\n{history_str_for_prompt[-1000:]}", k=2)
|
76 |
context_str = "Relevant Past Interactions:\n" + "\n".join([f"- User:{m.get('user_input','')}->AI:{m.get('bot_response','')} (Takeaway:{m.get('metrics',{}).get('takeaway','N/A')})" for m in mems]) if mems else "No relevant past interactions found."
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
if query_or_url:
|
81 |
-
yield "status", f"[Web: '{query_or_url[:60]}'...]"
|
82 |
-
web_results = []
|
83 |
-
try:
|
84 |
-
if action_type == "search_duckduckgo_and_report": web_results = search_and_scrape_duckduckgo(query_or_url, num_results=2)
|
85 |
-
elif action_type == "scrape_url_and_report": web_results = [scrape_url(query_or_url)]
|
86 |
-
except Exception as e: web_results = [{"url": query_or_url, "error": str(e)}]
|
87 |
-
context_str = "Web Content:\n" + "\n".join([f"Source {i+1}:\nURL:{r.get('url','N/A')}\nTitle:{r.get('title','N/A')}\nContent:\n{(r.get('content') or r.get('error') or 'N/A')[:3500]}\n---" for i,r in enumerate(web_results)]) if web_results else f"No results from {action_type} for '{query_or_url}'."
|
88 |
-
yield "status", "[Synthesizing web report...]"
|
89 |
-
final_system_prompt_str += " Generate report/answer from web content, history, & guidelines. Cite URLs as [Source X]."
|
90 |
-
elif action_type == "build_huggingface_space":
|
91 |
-
build_prompt = action_input_dict.get("build_prompt")
|
92 |
-
if not build_prompt:
|
93 |
-
context_str = "Tool Action Failed: The model decided to build a space but did not provide the necessary instructions (build_prompt)."
|
94 |
-
final_system_prompt_str += " Report the tool failure to the user."
|
95 |
-
else:
|
96 |
-
yield "status", f"[Tool: Building Space '{build_prompt[:50]}'...]"
|
97 |
-
build_result = build_huggingface_space(build_prompt)
|
98 |
-
if "error" in build_result:
|
99 |
-
context_str = f"Hugging Face Space Builder Tool Result:\n- Status: FAILED\n- Error: {build_result['error']}"
|
100 |
-
final_system_prompt_str += " The space building tool failed. Report the error to the user and ask if they want to try again."
|
101 |
-
yield "status", "[Tool: Space building failed.]"
|
102 |
-
else:
|
103 |
-
context_str = f"Hugging Face Space Builder Tool Result:\n- Status: SUCCESS\n- Details: {build_result['result']}"
|
104 |
-
final_system_prompt_str += " The space building tool has completed. Inform the user about the result, providing any links or key information from the tool's output."
|
105 |
-
yield "status", "[Tool: Space building complete.]"
|
106 |
-
else: # quick_respond or fallback
|
107 |
-
final_system_prompt_str += " Respond directly using guidelines & history."
|
108 |
-
|
109 |
-
final_user_prompt_str = prompts.get_final_response_prompt(history_str_for_prompt, initial_insights_ctx_str, user_input, context_str)
|
110 |
-
final_llm_messages = [{"role": "system", "content": final_system_prompt_str}, {"role": "user", "content": final_user_prompt_str}]
|
111 |
|
112 |
streamed_response = ""
|
113 |
try:
|
114 |
-
for chunk in call_model_stream(
|
115 |
if isinstance(chunk, str) and chunk.startswith("Error:"):
|
116 |
streamed_response += f"\n{chunk}\n"; yield "response_chunk", f"\n{chunk}\n"; break
|
117 |
-
streamed_response += chunk
|
|
|
118 |
except Exception as e:
|
119 |
streamed_response += f"\n\n(Error: {e})"; yield "response_chunk", f"\n\n(Error: {e})"
|
120 |
|
121 |
-
final_bot_text = streamed_response.strip() or "(No response
|
122 |
-
logger.info(f"ORCHESTRATOR [{request_id}]: Finished.
|
123 |
yield "final_response_and_insights", {"response": final_bot_text, "insights_used": parsed_initial_insights_list}
|
|
|
7 |
from model_logic import call_model_stream, MODELS_BY_PROVIDER, get_default_model_display_name_for_provider
|
8 |
from memory_logic import retrieve_memories_semantic, retrieve_rules_semantic
|
9 |
from tools.websearch import search_and_scrape_duckduckgo, scrape_url
|
10 |
+
from tools.space_builder import create_huggingface_space, update_huggingface_space_file
|
11 |
import prompts
|
12 |
from utils import format_insights_for_prompt
|
13 |
|
|
|
23 |
|
24 |
if "http://" in user_input or "https://" in user_input:
|
25 |
url_match = re.search(r'(https?://[^\s]+)', user_input)
|
26 |
+
if url_match:
|
27 |
+
return "scrape_url_and_report", {"url": url_match.group(1)}
|
28 |
|
29 |
+
tool_trigger_keywords = ["what is", "how to", "explain", "search for", "build", "create", "make", "update", "modify", "change", "fix"]
|
30 |
+
if len(user_input.split()) > 3 or "?" in user_input or any(w in user_input_lower for w in tool_trigger_keywords):
|
|
|
|
|
31 |
history_snippet = "\n".join([f"{msg['role']}: {msg['content'][:100]}" for msg in chat_history_for_prompt[-2:]])
|
32 |
guideline_snippet = initial_insights_ctx_str[:200].replace('\n', ' ')
|
33 |
tool_user_prompt = prompts.get_tool_user_prompt(user_input, history_snippet, guideline_snippet)
|
34 |
+
tool_decision_messages = [{"role": "system", "content": prompts.TOOL_SYSTEM_PROMPT}, {"role": "user", "content": tool_user_prompt}]
|
35 |
+
tool_model_display = next((dn for dn, mid in MODELS_BY_PROVIDER.get(TOOL_DECISION_PROVIDER.lower(), {}).get("models", {}).items() if mid == TOOL_DECISION_MODEL_ID), None) or get_default_model_display_name_for_provider(TOOL_DECISION_PROVIDER)
|
|
|
36 |
|
37 |
if tool_model_display:
|
38 |
try:
|
39 |
+
tool_resp_raw = "".join(list(call_model_stream(provider=TOOL_DECISION_PROVIDER, model_display_name=tool_model_display, messages=tool_decision_messages, temperature=0.0, max_tokens=2048)))
|
40 |
json_match_tool = re.search(r"\{.*\}", tool_resp_raw, re.DOTALL)
|
41 |
if json_match_tool:
|
42 |
action_data = json.loads(json_match_tool.group(0))
|
43 |
action_type = action_data.get("action", "quick_respond")
|
44 |
action_input = action_data.get("action_input", {})
|
45 |
+
if not isinstance(action_input, dict):
|
46 |
+
action_input = {}
|
47 |
return action_type, action_input
|
48 |
except Exception as e:
|
49 |
logger.error(f"Tool decision LLM error: {e}")
|
|
|
51 |
return "quick_respond", {}
|
52 |
|
53 |
def orchestrate_and_respond(user_input: str, provider_name: str, model_display_name: str, chat_history_for_prompt: list[dict], custom_system_prompt: str = None, ui_api_key_override: str = None):
|
|
|
54 |
request_id = os.urandom(4).hex()
|
55 |
logger.info(f"ORCHESTRATOR [{request_id}] Start. User: '{user_input[:50]}...'")
|
56 |
+
history_str_for_prompt = "\n".join([f"{t['role']}: {t['content']}" for t in chat_history_for_prompt])
|
|
|
57 |
|
58 |
yield "status", "[Checking guidelines...]"
|
59 |
initial_insights = retrieve_rules_semantic(f"{user_input}\n{history_str_for_prompt}", k=5)
|
60 |
initial_insights_ctx_str, parsed_initial_insights_list = format_insights_for_prompt(initial_insights)
|
61 |
|
62 |
yield "status", "[Choosing best approach...]"
|
63 |
+
action_type, action_input = decide_on_tool(user_input, chat_history_for_prompt, initial_insights_ctx_str)
|
64 |
+
logger.info(f"ORCHESTRATOR [{request_id}]: Tool Decision: Action='{action_type}', Input='{action_input}'")
|
65 |
|
66 |
yield "status", f"[Path: {action_type}]"
|
67 |
+
final_system_prompt = custom_system_prompt or prompts.DEFAULT_SYSTEM_PROMPT
|
68 |
+
context_str = None
|
69 |
|
70 |
+
if action_type == "create_huggingface_space":
|
71 |
+
yield "status", "[Tool: Creating Space...]"
|
72 |
+
params = ["owner", "space_name", "sdk", "markdown_content"]
|
73 |
+
if all(p in action_input for p in params):
|
74 |
+
result = create_huggingface_space(**action_input)
|
75 |
+
context_str = f"Tool Result (Create Space): {result.get('result') or result.get('error', 'Unknown outcome from tool.')}"
|
76 |
+
else:
|
77 |
+
context_str = "Tool Failed: Missing required parameters for create_huggingface_space. Required: " + ", ".join(params)
|
78 |
+
elif action_type == "update_huggingface_space_file":
|
79 |
+
yield "status", "[Tool: Updating file...]"
|
80 |
+
params = ["owner", "space_name", "file_path", "new_content", "commit_message"]
|
81 |
+
if all(p in action_input for p in params):
|
82 |
+
result = update_huggingface_space_file(**action_input)
|
83 |
+
context_str = f"Tool Result (Update File): {result.get('result') or result.get('error', 'Unknown outcome from tool.')}"
|
84 |
+
else:
|
85 |
+
context_str = "Tool Failed: Missing required parameters for update_huggingface_space_file. Required: " + ", ".join(params)
|
86 |
+
elif action_type == "search_duckduckgo_and_report" and WEB_SEARCH_ENABLED:
|
87 |
+
query = action_input.get("search_engine_query")
|
88 |
+
if query:
|
89 |
+
yield "status", f"[Web: '{query[:60]}'...]"
|
90 |
+
results = search_and_scrape_duckduckgo(query, num_results=2)
|
91 |
+
context_str = "Web Content:\n" + "\n".join([f"Source {i+1} ({r.get('url','N/A')}):\n{r.get('content', r.get('error', 'N/A'))[:3000]}\n---" for i, r in enumerate(results)])
|
92 |
+
else:
|
93 |
+
context_str = "Tool Failed: Missing 'search_engine_query' for web search."
|
94 |
+
elif action_type == "scrape_url_and_report" and WEB_SEARCH_ENABLED:
|
95 |
+
url = action_input.get("url")
|
96 |
+
if url:
|
97 |
+
yield "status", f"[Web: '{url[:60]}'...]"
|
98 |
+
result = scrape_url(url)
|
99 |
+
context_str = f"Web Content for {url}:\n{result.get('content', result.get('error', 'No content scraped.'))}"
|
100 |
+
else:
|
101 |
+
context_str = "Tool Failed: Missing 'url' for scraping."
|
102 |
+
elif action_type == "answer_using_conversation_memory":
|
103 |
yield "status", "[Searching conversation memory...]"
|
104 |
mems = retrieve_memories_semantic(f"User query: {user_input}\nContext:\n{history_str_for_prompt[-1000:]}", k=2)
|
105 |
context_str = "Relevant Past Interactions:\n" + "\n".join([f"- User:{m.get('user_input','')}->AI:{m.get('bot_response','')} (Takeaway:{m.get('metrics',{}).get('takeaway','N/A')})" for m in mems]) if mems else "No relevant past interactions found."
|
106 |
+
|
107 |
+
final_user_prompt = prompts.get_final_response_prompt(history_str_for_prompt, initial_insights_ctx_str, user_input, context_str)
|
108 |
+
final_llm_messages = [{"role": "system", "content": final_system_prompt}, {"role": "user", "content": final_user_prompt}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
streamed_response = ""
|
111 |
try:
|
112 |
+
for chunk in call_model_stream(provider_name, model_display_name, final_llm_messages, ui_api_key_override, max_tokens=4096):
|
113 |
if isinstance(chunk, str) and chunk.startswith("Error:"):
|
114 |
streamed_response += f"\n{chunk}\n"; yield "response_chunk", f"\n{chunk}\n"; break
|
115 |
+
streamed_response += chunk
|
116 |
+
yield "response_chunk", chunk
|
117 |
except Exception as e:
|
118 |
streamed_response += f"\n\n(Error: {e})"; yield "response_chunk", f"\n\n(Error: {e})"
|
119 |
|
120 |
+
final_bot_text = streamed_response.strip() or "(No response)"
|
121 |
+
logger.info(f"ORCHESTRATOR [{request_id}]: Finished. Response length: {len(final_bot_text)}")
|
122 |
yield "final_response_and_insights", {"response": final_bot_text, "insights_used": parsed_initial_insights_list}
|