Spaces:
Running
Running
# | |
# SPDX-FileCopyrightText: Hadad <[email protected]> | |
# SPDX-License-Identifier: Apache-2.0 | |
# | |
import json # Import JSON module for encoding and decoding JSON data | |
from src.tools.deep_search import SearchTools # Import SearchTools class for deep search functionality | |
# Asynchronous handler for deep search command | |
async def deep_search_integration( | |
input, # User input containing the /dp command and query | |
new_history, # Conversation history in message format | |
session_id, # Session ID for conversation context | |
selected_model, # Selected AI model for generation | |
jarvis, # AI backend function for generating responses | |
mode, # Mode for AI response generation | |
temperature, # Temperature parameter for AI | |
top_k, # Top-k parameter for AI | |
min_p, # Min-p parameter for AI | |
top_p, # Top-p parameter for AI | |
repetition_penalty # Repetition penalty for AI | |
): | |
# Instantiate SearchTools class to enable deep search capabilities when requested by user | |
search_tools = SearchTools() # Create SearchTools instance | |
# Extract the search query after the '/dp' command prefix and strip whitespace | |
search_query = input[3:].strip() # Get search query after /dp | |
# If no search query is provided after the command, yield empty and exit early | |
if not search_query: # Check if search query is empty | |
yield [] # Yield empty list for missing search query | |
return # Exit function | |
try: # Try block for deep search | |
# Perform an asynchronous deep search using SearchTools with the given query | |
search_results = await search_tools.search(search_query) # Perform deep search | |
# Serialize the search query and results (limited to first 5000 characters) into JSON string | |
search_content = json.dumps({ | |
"query": search_query, # Search query | |
"search_results": search_results[:5000] # Search results limited to 5000 characters | |
}) | |
# Construct conversation history including deep search results and detailed instructions for summarization | |
search_instructions = ( | |
new_history | |
+ [ | |
{ | |
"role": "system", | |
"content": ( | |
"Deep search results for query: '" + search_query + "':\n\n\n" + search_content + "\n\n\n" | |
"Please analyze these search results and provide a comprehensive summary of the information.\n" | |
"Identify the most relevant information related to the query.\n" | |
"Format your response in a clear, structured way with appropriate headings and bullet points if needed.\n" | |
"If the search results don't provide sufficient information, acknowledge this limitation.\n" | |
"Please provide links or URLs from each of your search results.\n\n\n" | |
"Use the same language as the previous user input or user request.\n" | |
"For example, if the previous user input or user request is in Indonesian, explain in Indonesian.\n" | |
"If it is in English, explain in English. This also applies to other languages.\n\n\n" | |
) | |
} | |
] | |
) | |
# Use async generator to process the deep search results and generate a summary response | |
async for search_response in jarvis( | |
session_id=session_id, # Session ID | |
model=selected_model, # Selected model | |
history=search_instructions, # Updated history with search results | |
user_message=input, # User input | |
mode=mode, # Mode for AI response | |
temperature=temperature, # temperature parameter | |
top_k=top_k, # top_k parameter | |
min_p=min_p, # min_p parameter | |
top_p=top_p, # top_p parameter | |
repetition_penalty=repetition_penalty # repetition_penalty parameter | |
): | |
yield [{"role": "tool", "content": search_response}] # Yield search summary in tool role | |
return # Exit after handling deep search | |
except Exception as e: # Exception handling for deep search failure | |
# If deep search fails, let AI generate a contextual error message | |
generation_failed = ( | |
new_history | |
+ [ | |
{ | |
"role": "system", | |
"content": ( | |
"Deep search failed for the user's query: '" + search_query + "'\n\n\n" | |
"Please explain to the user that the search operation failed and suggest they try again later.\n" | |
"Be helpful and empathetic in your response. You can also suggest alternative approaches or workarounds.\n\n\n" | |
"Use the same language as the previous user input or user request.\n" | |
"For example, if the previous user input or user request is in Indonesian, explain in Indonesian.\n" | |
"If it is in English, explain in English. This also applies to other languages.\n\n\n" | |
) | |
} | |
] | |
) | |
# Use AI to generate a contextual error message | |
async for error_response in jarvis( | |
session_id=session_id, # Session ID | |
model=selected_model, # Selected model | |
history=generation_failed, # History with error context | |
user_message=input, # User input | |
mode="/no_think", # Use non-reasoning mode for error handling | |
temperature=0.7, # Fixed temperature for more consistent error messages | |
top_k=20, # Limit token sampling | |
min_p=0, # Minimum probability threshold | |
top_p=0.8, # Nucleus sampling threshold | |
repetition_penalty=1 # No repetition penalty | |
): | |
yield [{"role": "tool", "content": error_response}] # Yield error response in tool role | |
return # Exit after error handling |