File size: 3,251 Bytes
de15097
 
 
d37e828
d2de44b
 
 
 
 
 
 
c231c9f
 
d2de44b
 
 
 
 
 
 
c231c9f
d2de44b
 
 
 
c231c9f
 
 
 
 
 
 
d2de44b
 
 
c231c9f
d2de44b
 
de15097
 
 
 
c231c9f
 
 
 
de15097
d2de44b
de15097
 
d2de44b
c231c9f
d2de44b
 
 
 
de15097
 
 
d2de44b
 
c231c9f
 
d2de44b
 
 
 
 
c231c9f
 
 
d2de44b
 
 
 
 
 
 
 
 
 
c231c9f
d2de44b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
# chatbot_handler.py
import logging
import json
from google import genai
import os

# Gemini API key configuration
GEMINI_API_KEY = os.getenv('GEMINI_API_KEY', '')

client = None
model_name = "gemini-2.0-flash"
safety_settings = []

generation_config = genai.types.GenerationConfig(
    temperature=0.7,
    top_k=1,
    top_p=1,
    max_output_tokens=2048,
)

try:
    if GEMINI_API_KEY:
        client = genai.Client(api_key=GEMINI_API_KEY)

        # Optional: safety settings
        safety_settings = [
            {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
            {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
            {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
            {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
        ]

        logging.info(f"Gemini client initialized with model '{model_name}'")
    else:
        logging.error("Gemini API Key is not set.")
except Exception as e:
    logging.error(f"Failed to initialize Gemini client: {e}", exc_info=True)


def format_history_for_gemini(gradio_chat_history: list) -> list:
    gemini_contents = []
    for msg in gradio_chat_history:
        role = "user" if msg["role"] == "user" else "model"
        content = msg.get("content")
        if isinstance(content, str):
            gemini_contents.append({"role": role, "parts": [{"text": content}]})
        else:
            logging.warning(f"Skipping non-string content in chat history: {content}")
    return gemini_contents


async def generate_llm_response(user_message: str, plot_id: str, plot_label: str, chat_history_for_plot: list, plot_data_summary: str = None):
    if not client:
        logging.error("Gemini client not initialized.")
        return "The AI model is not available. Configuration error."

    gemini_formatted_history = format_history_for_gemini(chat_history_for_plot)

    if not gemini_formatted_history:
        logging.error("Empty or invalid chat history.")
        return "There was an issue processing the conversation history."

    try:
        response = await client.models.generate_content_async(
            model=model_name,
            contents=gemini_formatted_history,
            generation_config=generation_config,
            safety_settings=safety_settings,
        )

        if response.prompt_feedback and response.prompt_feedback.block_reason:
            reason = response.prompt_feedback.block_reason.name
            logging.warning(f"Blocked by prompt feedback: {reason}")
            return f"Blocked due to content policy: {reason}."

        if response.candidates and response.candidates[0].content.parts:
            return "".join(part.text for part in response.candidates[0].content.parts)

        finish_reason = response.candidates[0].finish_reason.name if response.candidates and response.candidates[0].finish_reason else "UNKNOWN"
        return f"Unexpected response. Finish reason: {finish_reason}."

    except Exception as e:
        logging.error(f"Error generating response for plot '{plot_label}': {e}", exc_info=True)
        return f"Unexpected error occurred: {type(e).__name__}."