Spaces:
Running
Running
# chatbot_handler.py | |
import logging | |
import json | |
from google import genai | |
import os | |
# Gemini API key configuration | |
GEMINI_API_KEY = os.getenv('GEMINI_API_KEY', '') | |
client = None | |
model_name = "gemini-2.0-flash" | |
safety_settings = [] | |
generation_config = genai.types.GenerationConfig( | |
temperature=0.7, | |
top_k=1, | |
top_p=1, | |
max_output_tokens=2048, | |
) | |
try: | |
if GEMINI_API_KEY: | |
client = genai.Client(api_key=GEMINI_API_KEY) | |
# Optional: safety settings | |
safety_settings = [ | |
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, | |
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, | |
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, | |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, | |
] | |
logging.info(f"Gemini client initialized with model '{model_name}'") | |
else: | |
logging.error("Gemini API Key is not set.") | |
except Exception as e: | |
logging.error(f"Failed to initialize Gemini client: {e}", exc_info=True) | |
def format_history_for_gemini(gradio_chat_history: list) -> list: | |
gemini_contents = [] | |
for msg in gradio_chat_history: | |
role = "user" if msg["role"] == "user" else "model" | |
content = msg.get("content") | |
if isinstance(content, str): | |
gemini_contents.append({"role": role, "parts": [{"text": content}]}) | |
else: | |
logging.warning(f"Skipping non-string content in chat history: {content}") | |
return gemini_contents | |
async def generate_llm_response(user_message: str, plot_id: str, plot_label: str, chat_history_for_plot: list, plot_data_summary: str = None): | |
if not client: | |
logging.error("Gemini client not initialized.") | |
return "The AI model is not available. Configuration error." | |
gemini_formatted_history = format_history_for_gemini(chat_history_for_plot) | |
if not gemini_formatted_history: | |
logging.error("Empty or invalid chat history.") | |
return "There was an issue processing the conversation history." | |
try: | |
response = await client.models.generate_content_async( | |
model=model_name, | |
contents=gemini_formatted_history, | |
generation_config=generation_config, | |
safety_settings=safety_settings, | |
) | |
if response.prompt_feedback and response.prompt_feedback.block_reason: | |
reason = response.prompt_feedback.block_reason.name | |
logging.warning(f"Blocked by prompt feedback: {reason}") | |
return f"Blocked due to content policy: {reason}." | |
if response.candidates and response.candidates[0].content.parts: | |
return "".join(part.text for part in response.candidates[0].content.parts) | |
finish_reason = response.candidates[0].finish_reason.name if response.candidates and response.candidates[0].finish_reason else "UNKNOWN" | |
return f"Unexpected response. Finish reason: {finish_reason}." | |
except Exception as e: | |
logging.error(f"Error generating response for plot '{plot_label}': {e}", exc_info=True) | |
return f"Unexpected error occurred: {type(e).__name__}." | |