Spaces:
Running
Running
# chatbot_handler.py | |
import logging | |
import json | |
import aiohttp # Using asynchronous aiohttp | |
# import os # No longer needed for API key from environment | |
# Ensure GEMINI_API_KEY is set in your environment variables | |
# api_key = os.getenv('GEMINI_API_KEY') # Removed: API key will be empty string, provided by Canvas | |
def format_history_for_gemini(gradio_chat_history: list) -> list: | |
""" | |
Converts Gradio chat history (list of dicts with 'role' and 'content') | |
to Gemini API's 'contents' format. | |
Gemini expects roles 'user' and 'model'. | |
It also filters out system messages if any, as Gemini handles system prompts differently. | |
""" | |
gemini_contents = [] | |
for msg in gradio_chat_history: | |
role = "user" if msg["role"] == "user" else "model" # Gemini uses 'model' for assistant | |
# Ensure content is a string, skip if not (e.g. if a gr.Plot was accidentally in history) | |
if isinstance(msg.get("content"), str): | |
gemini_contents.append({"role": role, "parts": [{"text": msg["content"]}]}) | |
else: | |
logging.warning(f"Skipping non-string content in chat history for Gemini: {msg.get('content')}") | |
return gemini_contents | |
async def generate_llm_response(user_message: str, plot_id: str, plot_label: str, chat_history_for_plot: list): | |
""" | |
Generates a response from the LLM using Gemini API. | |
Args: | |
user_message (str): The latest message from the user. | |
plot_id (str): The ID of the plot being discussed. | |
plot_label (str): The label of the plot being discussed. | |
chat_history_for_plot (list): The current conversation history for this plot. | |
This list already includes the latest user_message. | |
Returns: | |
str: The LLM's response text. | |
""" | |
logging.info(f"Generating LLM response for plot_id: {plot_id} ('{plot_label}'). User message: '{user_message}'") | |
gemini_formatted_history = format_history_for_gemini(chat_history_for_plot) | |
if not gemini_formatted_history: | |
logging.error("Cannot generate LLM response: Formatted history is empty.") | |
return "I'm sorry, there was an issue processing the conversation history." | |
payload = { | |
"contents": gemini_formatted_history, | |
"generationConfig": { | |
"temperature": 0.7, | |
"topK": 1, | |
"topP": 1, | |
"maxOutputTokens": 2048, | |
} | |
} | |
# API key is an empty string. Canvas will automatically provide it in runtime. | |
apiKey = "" | |
# Using gemini-2.0-flash as per general instructions | |
apiUrl = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key={apiKey}" | |
async with aiohttp.ClientSession() as session: | |
try: | |
async with session.post(apiUrl, headers={'Content-Type': 'application/json'}, json=payload, timeout=45) as resp: | |
response_json = await resp.json() | |
logging.debug(f"LLM API Raw Response for '{plot_label}': {json.dumps(response_json, indent=2)}") | |
if resp.status != 200: | |
error_detail = response_json.get('error', {}).get('message', 'Unknown API error') | |
if response_json.get("promptFeedback") and response_json["promptFeedback"].get("blockReason"): | |
reason = response_json["promptFeedback"]["blockReason"] | |
safety_ratings_info = [f"{rating['category']}: {rating['probability']}" for rating in response_json['promptFeedback'].get('safetyRatings', [])] | |
details = f" Safety Ratings: {', '.join(safety_ratings_info)}" if safety_ratings_info else "" | |
logging.warning(f"Content blocked by API (Status {resp.status}) for '{plot_label}'. Reason: {reason}.{details}") | |
return f"I'm sorry, I can't respond to that due to content policy: {reason}." | |
logging.error(f"LLM API Error (Status {resp.status}) for '{plot_label}': {error_detail}") | |
return f"Sorry, the AI model returned an error (Status {resp.status}). Please try again later." | |
if response_json.get("candidates") and \ | |
response_json["candidates"][0].get("content") and \ | |
response_json["candidates"][0]["content"].get("parts") and \ | |
response_json["candidates"][0]["content"]["parts"][0].get("text"): | |
response_text = response_json["candidates"][0]["content"]["parts"][0]["text"] | |
logging.info(f"LLM generated response for '{plot_label}': {response_text[:100]}...") | |
return response_text | |
elif response_json.get("promptFeedback") and response_json["promptFeedback"].get("blockReason"): | |
reason = response_json["promptFeedback"]["blockReason"] | |
safety_ratings_info = [f"{rating['category']}: {rating['probability']}" for rating in response_json['promptFeedback'].get('safetyRatings', [])] | |
details = f" Safety Ratings: {', '.join(safety_ratings_info)}" if safety_ratings_info else "" | |
logging.warning(f"Content blocked by API (Status 200 but no candidate) for '{plot_label}'. Reason: {reason}.{details}") | |
return f"I'm sorry, your request was processed but could not be answered due to content policy: {reason}." | |
else: | |
logging.error(f"Unexpected LLM API response structure for '{plot_label}': {response_json}") | |
return "Sorry, I received an unexpected or empty response from the AI model." | |
except aiohttp.ClientTimeout: | |
logging.error(f"LLM API call timed out for '{plot_label}'.", exc_info=True) | |
return "Sorry, the request to the AI model timed out. Please try again." | |
except aiohttp.ClientError as e: | |
logging.error(f"Error calling LLM API (aiohttp) for '{plot_label}': {e}", exc_info=True) | |
return f"Sorry, I couldn't connect to the AI model at the moment. Network Error: {type(e).__name__}." | |
except json.JSONDecodeError as e: | |
logging.error(f"Error decoding LLM API response for '{plot_label}': {e}", exc_info=True) | |
try: | |
raw_text_response = await resp.text() | |
logging.error(f"LLM API Raw Text Response (on JSONDecodeError) for '{plot_label}': {raw_text_response}") | |
except Exception as read_err: | |
logging.error(f"Could not read raw text response: {read_err}") | |
return "Sorry, I received an unreadable response from the AI model." | |
except Exception as e: | |
logging.error(f"Generic error during LLM call for '{plot_label}': {e}", exc_info=True) | |
return f"An unexpected error occurred while trying to get an AI response: {type(e).__name__}." | |