File size: 1,690 Bytes
aac4d20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# modules/gemini_handler.py
"""
Dedicated module for all interactions with the Google Gemini API.
"""
import google.generativeai as genai
from .config import GEMINI_API_KEY

# Configure the API key
if not GEMINI_API_KEY:
    raise ValueError("GEMINI_API_KEY not found. Please set it in your environment.")
genai.configure(api_key=GEMINI_API_KEY)

# Set up the model with safety settings to be less restrictive
# This is important for medical contexts, but use with caution.
generation_config = {
    "temperature": 0.2,
    "top_p": 1,
    "top_k": 1,
    "max_output_tokens": 4096,
}

safety_settings = [
    {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
    {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
    {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
    {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
]

model = genai.GenerativeModel(
    model_name="gemini-pro",
    generation_config=generation_config,
    safety_settings=safety_settings
)

async def generate_gemini_response(prompt: str) -> str:
    """Generic function to call the Gemini API and get a response."""
    try:
        response = await model.generate_content_async(prompt)
        # Handle cases where the response might be blocked
        if not response.parts:
            return "The AI response was blocked due to safety settings. Please rephrase your query."
        return response.text
    except Exception as e:
        print(f"An error occurred with the Gemini API: {e}")
        return f"Error: Could not get a response from the AI model. Details: {e}"