Spaces:
Sleeping
Sleeping
# modules/gemini_handler.py | |
""" | |
Dedicated module for all interactions with the Google Gemini API. | |
""" | |
import google.generativeai as genai | |
from .config import GEMINI_API_KEY | |
# Configure the API key | |
if not GEMINI_API_KEY: | |
raise ValueError("GEMINI_API_KEY not found. Please set it in your environment.") | |
genai.configure(api_key=GEMINI_API_KEY) | |
# Set up the model with safety settings to be less restrictive | |
# This is important for medical contexts, but use with caution. | |
generation_config = { | |
"temperature": 0.2, | |
"top_p": 1, | |
"top_k": 1, | |
"max_output_tokens": 4096, | |
} | |
safety_settings = [ | |
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, | |
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, | |
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, | |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, | |
] | |
model = genai.GenerativeModel( | |
model_name="gemini-pro", | |
generation_config=generation_config, | |
safety_settings=safety_settings | |
) | |
async def generate_gemini_response(prompt: str) -> str: | |
"""Generic function to call the Gemini API and get a response.""" | |
try: | |
response = await model.generate_content_async(prompt) | |
# Handle cases where the response might be blocked | |
if not response.parts: | |
return "The AI response was blocked due to safety settings. Please rephrase your query." | |
return response.text | |
except Exception as e: | |
print(f"An error occurred with the Gemini API: {e}") | |
return f"Error: Could not get a response from the AI model. Details: {e}" |