broadfield-dev commited on
Commit
0da85d3
·
verified ·
1 Parent(s): b1c8c44

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -2
app.py CHANGED
@@ -129,9 +129,23 @@ def process_user_interaction_gradio(user_input: str, provider_name: str, model_d
129
  yield "status", "<i>[LLM choosing best approach...]</i>"
130
  history_snippet = "\n".join([f"{msg['role']}: {msg['content'][:100]}" for msg in chat_history_for_prompt[-2:]])
131
  guideline_snippet = initial_insights_ctx_str[:200].replace('\n', ' ')
132
- tool_sys_prompt = "You are a precise routing agent... Output JSON only. Example: {\"action\": \"search_duckduckgo_and_report\", \"action_input\": {\"search_engine_query\": \"query\"}}"
133
- tool_user_prompt = f"User Query: \"{user_input}\nRecent History:\n{history_snippet}\nGuidelines: {guideline_snippet}...\nAvailable Actions: quick_respond, answer_using_conversation_memory, search_duckduckgo_and_report, scrape_url_and_report.\nSelect one action and input. Output JSON."
 
 
 
 
 
 
 
 
 
 
 
 
134
  tool_decision_messages = [{"role":"system", "content": tool_sys_prompt}, {"role":"user", "content": tool_user_prompt}]
 
 
135
  tool_provider, tool_model_id = TOOL_DECISION_PROVIDER_ENV, TOOL_DECISION_MODEL_ID_ENV
136
  tool_model_display = next((dn for dn, mid in MODELS_BY_PROVIDER.get(tool_provider.lower(), {}).get("models", {}).items() if mid == tool_model_id), None)
137
  if not tool_model_display: tool_model_display = get_default_model_display_name_for_provider(tool_provider)
 
129
  yield "status", "<i>[LLM choosing best approach...]</i>"
130
  history_snippet = "\n".join([f"{msg['role']}: {msg['content'][:100]}" for msg in chat_history_for_prompt[-2:]])
131
  guideline_snippet = initial_insights_ctx_str[:200].replace('\n', ' ')
132
+
133
+ # --- MODIFIED: Improved prompts for the tool-decision LLM ---
134
+ tool_sys_prompt = """You are a precise routing agent. Your job is to analyze the user's query and the conversation context, then select the single best action to provide an answer.
135
+ Output ONLY a single, valid JSON object with "action" and "action_input" keys. Do not add any other text or explanations.
136
+
137
+ Example: {"action": "search_duckduckgo_and_report", "action_input": {"search_engine_query": "latest AI research"}}
138
+
139
+ Here are the available actions with descriptions of when to use them:
140
+ - "quick_respond": Use for simple greetings, acknowledgements, or if the answer is obvious from the immediate context and requires no special tools.
141
+ - "answer_using_conversation_memory": Use if the user's query refers to a previous conversation, asks you to "remember" or "recall" something, or seems like it could be answered by a past interaction you've had. This tool searches a database of your past conversations for relevant information.
142
+ - "search_duckduckgo_and_report": Use for general knowledge questions, questions about current events, or when the user explicitly asks you to search the web for information.
143
+ - "scrape_url_and_report": Use ONLY when the user provides a specific URL to read from.
144
+ """
145
+ tool_user_prompt = f"User Query: \"{user_input}\"\n\nRecent History:\n{history_snippet}\n\nGuidelines Snippet (for context):\n{guideline_snippet}...\n\nBased on the query and the action descriptions provided in the system prompt, select the single best action to take. Output the corresponding JSON object."
146
  tool_decision_messages = [{"role":"system", "content": tool_sys_prompt}, {"role":"user", "content": tool_user_prompt}]
147
+ # --- END OF MODIFICATION ---
148
+
149
  tool_provider, tool_model_id = TOOL_DECISION_PROVIDER_ENV, TOOL_DECISION_MODEL_ID_ENV
150
  tool_model_display = next((dn for dn, mid in MODELS_BY_PROVIDER.get(tool_provider.lower(), {}).get("models", {}).items() if mid == tool_model_id), None)
151
  if not tool_model_display: tool_model_display = get_default_model_display_name_for_provider(tool_provider)