Spaces:
Sleeping
Sleeping
Create src/chimera/api_clients/gemini_client.py
Browse files
src/chimera/api_clients/gemini_client.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/chimera/api_clients/gemini_client.py
|
2 |
+
import google.generativeai as genai
|
3 |
+
from ..config import GEMINI_API_KEY, GEMINI_MODEL_NAME
|
4 |
+
from ..utils.logging_config import logger
|
5 |
+
import asyncio
|
6 |
+
|
7 |
+
# Configure the Gemini client globally (or instantiate as needed)
|
8 |
+
if GEMINI_API_KEY:
|
9 |
+
genai.configure(api_key=GEMINI_API_KEY)
|
10 |
+
else:
|
11 |
+
logger.error("Gemini API Key not configured!")
|
12 |
+
# Depending on your flow, you might raise an error or handle this state
|
13 |
+
|
14 |
+
async def generate_analysis(prompt: str, retries=3, delay=5) -> str:
|
15 |
+
"""
|
16 |
+
Generates content using the Gemini API with async handling and retries.
|
17 |
+
"""
|
18 |
+
if not GEMINI_API_KEY:
|
19 |
+
return "Error: Gemini API Key not configured."
|
20 |
+
|
21 |
+
try:
|
22 |
+
model = genai.GenerativeModel(GEMINI_MODEL_NAME)
|
23 |
+
# Note: The current google-generativeai SDK might not be fully async yet.
|
24 |
+
# If performance becomes an issue, consider running sync calls
|
25 |
+
# in an executor pool using asyncio.to_thread.
|
26 |
+
# For now, we'll call it directly but within an async function context.
|
27 |
+
|
28 |
+
logger.info(f"Sending prompt to Gemini (first ~100 chars): {prompt[:100]}...")
|
29 |
+
|
30 |
+
# Placeholder for potential future truly async call
|
31 |
+
# response = await model.generate_content_async(prompt)
|
32 |
+
|
33 |
+
# Using run_in_executor for the synchronous call
|
34 |
+
loop = asyncio.get_running_loop()
|
35 |
+
for attempt in range(retries):
|
36 |
+
try:
|
37 |
+
# Wrap the synchronous call in run_in_executor
|
38 |
+
response = await loop.run_in_executor(
|
39 |
+
None, # Use default executor
|
40 |
+
lambda: model.generate_content(prompt)
|
41 |
+
)
|
42 |
+
logger.info("Received response from Gemini.")
|
43 |
+
# Basic check for blocked content or empty response
|
44 |
+
if not response.parts:
|
45 |
+
# Handle safety flags/blocks if necessary
|
46 |
+
if response.prompt_feedback.block_reason:
|
47 |
+
logger.warning(f"Gemini prompt blocked: {response.prompt_feedback.block_reason}")
|
48 |
+
return f"Error: Content generation blocked by safety filters ({response.prompt_feedback.block_reason})."
|
49 |
+
else:
|
50 |
+
logger.warning("Gemini returned empty response.")
|
51 |
+
return "Error: Gemini returned an empty response."
|
52 |
+
|
53 |
+
return response.text
|
54 |
+
|
55 |
+
except Exception as e: # Catch broad exceptions from the SDK
|
56 |
+
logger.error(f"Gemini API call attempt {attempt + 1} failed: {e}")
|
57 |
+
if attempt < retries - 1:
|
58 |
+
await asyncio.sleep(delay * (attempt + 1)) # Exponential backoff
|
59 |
+
else:
|
60 |
+
logger.error("Gemini API call failed after multiple retries.")
|
61 |
+
return f"Error: Failed to get response from Gemini after {retries} attempts. ({e})"
|
62 |
+
|
63 |
+
except Exception as e:
|
64 |
+
logger.exception("Critical error during Gemini generation setup or execution.")
|
65 |
+
return f"Error: An unexpected error occurred while contacting Gemini: {e}"
|
66 |
+
|
67 |
+
return "Error: Unexpected exit from generate_analysis function." # Should not happen
|