ai / src /client /chat_handler.py
hadadrjt's picture
ai: Allow reasoning/non on audio/image generation.
3d462e4
#
# SPDX-FileCopyrightText: Hadad <[email protected]>
# SPDX-License-Identifier: Apache-2.0
#
import json # Import JSON module for encoding and decoding JSON data
import uuid # Import UUID module to generate unique session identifiers
from typing import Any, List # Import typing annotations for type hinting
from config import model # Import model configuration dictionary from config module
from src.core.server import jarvis # Import the async function to interact with AI backend
from src.core.parameter import parameters # Import parameters (not used directly here but imported for completeness)
from src.core.session import session # Import session dictionary to store conversation histories
from src.utils.input import extract_input_and_files # Import utility to extract input and files from message
from src.utils.history import convert_history # Import utility to convert history format
from src.client.responses.audio import audio_integration # Import handler for audio generation
from src.client.responses.image import image_integration # Import handler for image generation
from src.client.responses.deep_search import deep_search_integration # Import handler for deep search
import gradio as gr # Import Gradio library for UI and request handling
# Define the asynchronous respond function to process user messages and generate AI responses
async def respond(
message, # Incoming user message, can be a string or a dictionary containing text and files
history: List[Any], # List containing conversation history as pairs of user and assistant messages
model_label, # Label/key to select the specific AI model from available models configuration
temperature, # Sampling temperature parameter controlling randomness of AI response generation (0.0 to 2.0)
top_k, # Number of highest probability tokens to keep for sampling during text generation
min_p, # Minimum probability threshold for token sampling to filter low probability tokens
top_p, # Cumulative probability threshold for nucleus sampling technique
repetition_penalty, # Penalty factor to reduce repetitive tokens in generated text output
thinking, # Boolean flag indicating if AI should operate in "thinking" mode with deeper reasoning
image_gen, # Boolean flag to enable image generation commands using /image prefix
audio_gen, # Boolean flag to enable audio generation commands using /audio prefix
search_gen, # Boolean flag to enable deep search commands using /dp prefix
request: gr.Request # Gradio request object to access session information such as session hash
):
# Select the AI model based on the provided label, fallback to first model if label not found
selected_model = model.get(model_label, list(model.values())[0]) # Choose model based on label
# Retrieve session ID from the Gradio request's session hash, generate new UUID if none exists
session_id = request.session_hash or str(uuid.uuid4()) # Get or create session ID
# Initialize an empty conversation history list for this session if it does not already exist
if session_id not in session: # Check if session ID is not in session dictionary
session[session_id] = [] # Initialize empty history for new session
# Determine the mode string based on the 'thinking' flag, affects AI response generation behavior
mode = "/think" if thinking else "/no_think" # Set mode based on thinking flag
# Extract input text and files from the message using utility function
input, files = extract_input_and_files(message) # Unpack input and files
# Strip leading and trailing whitespace from the input for clean processing
stripped_input = input.strip() # Remove whitespace from input
# Convert the stripped input to lowercase for case-insensitive command detection
lowered_input = stripped_input.lower() # Convert input to lowercase
# If the input is empty after stripping whitespace, yield an empty list and exit function early
if not stripped_input: # Check if input is empty
yield [] # Yield empty list for empty input
return # Exit function
# If the input is exactly one of the command keywords without parameters, yield empty and exit early
if lowered_input in ["/audio", "/image", "/dp"]: # Check for command keywords only
yield [] # Yield empty list for bare command
return # Exit function
# Convert conversation history from tuples style to messages style format for AI model consumption
new_history = convert_history(history) # Convert history to message format
# Update the global session dictionary with the newly formatted conversation history for this session
session[session_id] = new_history # Update session with new history
# Handle audio generation command if enabled and input starts with '/audio' prefix
if audio_gen and lowered_input.startswith("/audio"): # Check for audio command
async for audio_response in audio_integration(
input, # User input
new_history, # Conversation history
session_id, # Session ID
selected_model, # Selected model
jarvis, # AI backend function
mode, # Mode for AI response
temperature, # temperature parameter
top_k, # top_k parameter
min_p, # min_p parameter
top_p, # top_p parameter
repetition_penalty # repetition_penalty parameter
):
yield audio_response # Yield audio response
return # Exit function after handling audio
# Handle image generation command if enabled and input starts with '/image' prefix
if image_gen and lowered_input.startswith("/image"): # Check for image command
async for image_response in image_integration(
input, # User input
new_history, # Conversation history
session_id, # Session ID
selected_model, # Selected model
jarvis, # AI backend function
mode, # Mode for AI response
temperature, # temperature parameter
top_k, # top_k parameter
min_p, # min_p parameter
top_p, # top_p parameter
repetition_penalty # repetition_penalty parameter
):
yield image_response # Yield image response
return # Exit function after handling image
# Handle deep search command if enabled and input starts with '/dp' prefix
if search_gen and lowered_input.startswith("/dp"): # Check for deep search command
async for search_response in deep_search_integration(
input, # User input
new_history, # Conversation history
session_id, # Session ID
selected_model, # Selected model
jarvis, # AI backend function
mode, # Mode for AI response
temperature, # temperature parameter
top_k, # top_k parameter
min_p, # min_p parameter
top_p, # top_p parameter
repetition_penalty # repetition_penalty parameter
):
yield search_response # Yield search response
return # Exit function after handling deep search
# For all other inputs that do not match special commands, use the jarvis function to generate a normal response
async for response in jarvis(
session_id=session_id, # Session ID for conversation context
model=selected_model, # Selected model for generation
history=new_history, # Pass the conversation history
user_message=input, # User input message
mode=mode, # Use the mode determined by the thinking flag
files=files, # Pass any attached files along with the message
temperature=temperature, # temperature parameter
top_k=top_k, # top_k parameter
min_p=min_p, # min_p parameter
top_p=top_p, # top_p parameter
repetition_penalty=repetition_penalty # repetition_penalty parameter
):
yield response # Yield each chunk of the response as it is generated by the AI model