GuglielmoTor commited on
Commit
0f768de
·
verified ·
1 Parent(s): 365263e

Update chatbot_handler.py

Browse files
Files changed (1) hide show
  1. chatbot_handler.py +8 -12
chatbot_handler.py CHANGED
@@ -2,10 +2,10 @@
2
  import logging
3
  import json
4
  import aiohttp # Using asynchronous aiohttp
5
- import os
6
 
7
- # Ensure GROQ_API_KEY is set in your environment variables
8
- api_key = os.getenv('GEMINI_API_KEY')
9
 
10
  def format_history_for_gemini(gradio_chat_history: list) -> list:
11
  """
@@ -38,18 +38,15 @@ async def generate_llm_response(user_message: str, plot_id: str, plot_label: str
38
  """
39
  logging.info(f"Generating LLM response for plot_id: {plot_id} ('{plot_label}'). User message: '{user_message}'")
40
 
41
- # The chat_history_for_plot already contains the full conversation including the latest user message.
42
- # The initial system-like prompt is the first message from the assistant in the history.
43
  gemini_formatted_history = format_history_for_gemini(chat_history_for_plot)
44
 
45
  if not gemini_formatted_history:
46
  logging.error("Cannot generate LLM response: Formatted history is empty.")
47
  return "I'm sorry, there was an issue processing the conversation history."
48
 
49
- # Construct payload for Gemini API
50
  payload = {
51
  "contents": gemini_formatted_history,
52
- "generationConfig": { # Optional: configure generation parameters
53
  "temperature": 0.7,
54
  "topK": 1,
55
  "topP": 1,
@@ -57,9 +54,10 @@ async def generate_llm_response(user_message: str, plot_id: str, plot_label: str
57
  }
58
  }
59
 
60
-
61
- # Using gemini-2.0-flash as per instructions
62
- apiUrl = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-preview-05-20:generateContent?key={api_key}"
 
63
 
64
  async with aiohttp.ClientSession() as session:
65
  try:
@@ -69,7 +67,6 @@ async def generate_llm_response(user_message: str, plot_id: str, plot_label: str
69
 
70
  if resp.status != 200:
71
  error_detail = response_json.get('error', {}).get('message', 'Unknown API error')
72
- # Check for specific content policy block from Gemini, even on error status
73
  if response_json.get("promptFeedback") and response_json["promptFeedback"].get("blockReason"):
74
  reason = response_json["promptFeedback"]["blockReason"]
75
  safety_ratings_info = [f"{rating['category']}: {rating['probability']}" for rating in response_json['promptFeedback'].get('safetyRatings', [])]
@@ -86,7 +83,6 @@ async def generate_llm_response(user_message: str, plot_id: str, plot_label: str
86
  response_text = response_json["candidates"][0]["content"]["parts"][0]["text"]
87
  logging.info(f"LLM generated response for '{plot_label}': {response_text[:100]}...")
88
  return response_text
89
- # Check for promptFeedback even on 200 if candidates are missing (e.g. blocked content)
90
  elif response_json.get("promptFeedback") and response_json["promptFeedback"].get("blockReason"):
91
  reason = response_json["promptFeedback"]["blockReason"]
92
  safety_ratings_info = [f"{rating['category']}: {rating['probability']}" for rating in response_json['promptFeedback'].get('safetyRatings', [])]
 
2
  import logging
3
  import json
4
  import aiohttp # Using asynchronous aiohttp
5
+ # import os # No longer needed for API key from environment
6
 
7
+ # Ensure GEMINI_API_KEY is set in your environment variables
8
+ # api_key = os.getenv('GEMINI_API_KEY') # Removed: API key will be empty string, provided by Canvas
9
 
10
  def format_history_for_gemini(gradio_chat_history: list) -> list:
11
  """
 
38
  """
39
  logging.info(f"Generating LLM response for plot_id: {plot_id} ('{plot_label}'). User message: '{user_message}'")
40
 
 
 
41
  gemini_formatted_history = format_history_for_gemini(chat_history_for_plot)
42
 
43
  if not gemini_formatted_history:
44
  logging.error("Cannot generate LLM response: Formatted history is empty.")
45
  return "I'm sorry, there was an issue processing the conversation history."
46
 
 
47
  payload = {
48
  "contents": gemini_formatted_history,
49
+ "generationConfig": {
50
  "temperature": 0.7,
51
  "topK": 1,
52
  "topP": 1,
 
54
  }
55
  }
56
 
57
+ # API key is an empty string. Canvas will automatically provide it in runtime.
58
+ apiKey = ""
59
+ # Using gemini-2.0-flash as per general instructions
60
+ apiUrl = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key={apiKey}"
61
 
62
  async with aiohttp.ClientSession() as session:
63
  try:
 
67
 
68
  if resp.status != 200:
69
  error_detail = response_json.get('error', {}).get('message', 'Unknown API error')
 
70
  if response_json.get("promptFeedback") and response_json["promptFeedback"].get("blockReason"):
71
  reason = response_json["promptFeedback"]["blockReason"]
72
  safety_ratings_info = [f"{rating['category']}: {rating['probability']}" for rating in response_json['promptFeedback'].get('safetyRatings', [])]
 
83
  response_text = response_json["candidates"][0]["content"]["parts"][0]["text"]
84
  logging.info(f"LLM generated response for '{plot_label}': {response_text[:100]}...")
85
  return response_text
 
86
  elif response_json.get("promptFeedback") and response_json["promptFeedback"].get("blockReason"):
87
  reason = response_json["promptFeedback"]["blockReason"]
88
  safety_ratings_info = [f"{rating['category']}: {rating['probability']}" for rating in response_json['promptFeedback'].get('safetyRatings', [])]