MedQA / agent.py
mgbam's picture
Update agent.py
991c5ac verified
raw
history blame
10.4 kB
# /home/user/app/agent.py
import os
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.agents import AgentExecutor, create_structured_chat_agent
# from langchain_google_genai import HarmBlockThreshold, HarmCategory # Optional for safety settings
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
# --- Import your defined tools FROM THE 'tools' PACKAGE ---
# This relies on tools/__init__.py correctly exporting these names.
from tools import (
BioPortalLookupTool,
UMLSLookupTool,
QuantumTreatmentOptimizerTool,
# QuantumOptimizerInput, # Only if needed for type hints directly in this file for some reason
# GeminiTool, # Assuming not used for now as main LLM is Gemini
)
from config.settings import settings # This loads your HF secrets into the settings object
from services.logger import app_logger
# --- Initialize LLM (Gemini) ---
llm = None # Initialize to None in case of failure
try:
gemini_api_key_from_settings = settings.GEMINI_API_KEY
api_key_to_use = gemini_api_key_from_settings or os.getenv("GOOGLE_API_KEY")
if not api_key_to_use:
app_logger.error(
"CRITICAL: Gemini API Key not found. "
"Ensure GEMINI_API_KEY is set in Hugging Face Space secrets and loaded into settings, "
"or GOOGLE_API_KEY is set as an environment variable."
)
raise ValueError(
"Gemini API Key not configured. Please set it in Hugging Face Space secrets "
"as GEMINI_API_KEY or ensure GOOGLE_API_KEY environment variable is available."
)
llm = ChatGoogleGenerativeAI(
model="gemini-1.5-pro-latest",
temperature=0.2,
google_api_key=api_key_to_use,
convert_system_message_to_human=True,
)
app_logger.info(f"ChatGoogleGenerativeAI ({llm.model}) initialized successfully using provided API key.")
except Exception as e:
detailed_error_message = str(e)
user_facing_error = f"Gemini LLM initialization failed: {detailed_error_message}. " \
"Check API key validity, model name, and configurations in Hugging Face Secrets."
if "default credentials were not found" in detailed_error_message.lower() or \
"could not find default credentials" in detailed_error_message.lower() or \
"api_key" in detailed_error_message.lower():
user_facing_error = "Gemini LLM initialization failed: API key issue or missing credentials. " \
"Ensure GEMINI_API_KEY is correctly set in Hugging Face Secrets and is valid."
app_logger.error(user_facing_error + f" Original error details: {detailed_error_message}", exc_info=False)
else:
app_logger.error(user_facing_error, exc_info=True)
raise ValueError(user_facing_error)
# --- Initialize Tools List ---
tools_list = [
UMLSLookupTool(),
BioPortalLookupTool(),
QuantumTreatmentOptimizerTool(),
]
app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
# --- Agent Prompt (for Structured Chat with Gemini and your tools) ---
SYSTEM_PROMPT_TEMPLATE = (
"You are 'Quantum Health Navigator', an advanced AI assistant for healthcare professionals. "
"Your primary goal is to provide accurate information and insights based on user queries and available tools. "
"You must adhere to the following guidelines:\n"
"1. Disclaimers: Always remind the user that you are an AI, not a human medical professional, and your information "
"is for support, not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases "
"unless it's the direct output of a specialized tool like 'quantum_treatment_optimizer'.\n"
"2. Patient Context: The user may provide patient context at the start of the session. This context is available as: {patient_context}. "
"You MUST consider this context when it's relevant to the query, especially for the 'quantum_treatment_optimizer' tool.\n"
"3. Tool Usage: You have access to the following tools (names: {tool_names}):\n{tools}\n" # <--- {tool_names} ADDED HERE
" To use a tool, respond *only* with a JSON markdown code block with 'action' and 'action_input' keys. "
" The 'action_input' must match the schema for the specified tool. Examples:\n"
" For `umls_lookup`: ```json\n{{\"action\": \"umls_lookup\", \"action_input\": \"myocardial infarction\"}}\n```\n"
" For `bioportal_lookup`: ```json\n{{\"action\": \"bioportal_lookup\", \"action_input\": {{\"term\": \"diabetes mellitus\", \"ontology\": \"SNOMEDCT\"}}}}\n```\n"
" For `quantum_treatment_optimizer`: ```json\n{{\"action\": \"quantum_treatment_optimizer\", \"action_input\": {{\"patient_data\": {{\"age\": 55, \"gender\": \"Male\", \"symptoms\": [\"chest pain\"]}}, \"current_treatments\": [\"metformin\"], \"conditions\": [\"Type 2 Diabetes\"]}}}}\n```\n"
" Ensure the `action_input` for `quantum_treatment_optimizer` includes a `patient_data` dictionary populated from the overall {patient_context}.\n"
"4. Responding to User: After using a tool, you will receive an observation. Use this observation and your knowledge to formulate a comprehensive final answer to the human. Cite the tool if you used one (e.g., 'According to UMLS Lookup...'). Do not output a tool call again unless necessary for a multi-step process.\n"
"5. Specific Tool Guidance:\n"
" - If asked about treatment optimization for a specific patient (especially if patient context is provided), you MUST use the `quantum_treatment_optimizer` tool.\n"
" - For definitions, codes, or general medical concepts, `umls_lookup` or `bioportal_lookup` are appropriate.\n"
"6. Conversation Flow: Refer to the `Previous conversation history` to maintain context.\n\n"
"Begin!\n\n"
"Previous conversation history:\n"
"{chat_history}\n\n"
"New human question: {input}\n"
"{agent_scratchpad}"
)
prompt = ChatPromptTemplate.from_messages([
("system", SYSTEM_PROMPT_TEMPLATE),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
app_logger.info("Agent prompt template created for Gemini structured chat agent.")
# --- Create Agent ---
if llm is None:
app_logger.critical("LLM object is None at agent creation stage. Cannot proceed.")
raise SystemExit("Agent LLM failed to initialize. Application cannot start.")
try:
agent = create_structured_chat_agent(llm=llm, tools=tools_list, prompt=prompt)
app_logger.info("Structured chat agent created successfully with Gemini LLM and tools.")
except Exception as e:
# The error "Prompt missing required variables: {'tool_names'}" would be caught here
# if the placeholder wasn't correctly handled or if others are missing.
app_logger.error(f"Failed to create structured chat agent: {e}", exc_info=True)
raise ValueError(f"Gemini agent creation failed: {e}") # This is what you saw in the UI
# --- Create Agent Executor ---
agent_executor = AgentExecutor(
agent=agent,
tools=tools_list,
verbose=True,
handle_parsing_errors=True,
max_iterations=10,
early_stopping_method="generate",
)
app_logger.info("AgentExecutor with Gemini agent created successfully.")
# --- Getter Function for Streamlit App ---
_agent_executor_instance = agent_executor
def get_agent_executor():
global _agent_executor_instance
if _agent_executor_instance is None:
app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called.")
raise RuntimeError("Agent executor was not properly initialized. Check application startup logs.")
return _agent_executor_instance
# --- Example Usage (for local testing) ---
if __name__ == "__main__":
main_test_api_key = settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")
if not main_test_api_key:
print("🚨 Please set your GOOGLE_API_KEY (for Gemini) in .env file or as an environment variable to run the test.")
else:
print("\nπŸš€ Quantum Health Navigator (Gemini Agent Test Console) πŸš€")
print("-----------------------------------------------------------")
# ... (rest of __main__ block from previous full agent.py) ...
try:
test_executor = get_agent_executor()
except ValueError as e_init:
print(f"⚠️ Agent initialization failed during test startup: {e_init}")
print("Ensure your API key is correctly configured.")
exit()
current_chat_history_for_test_run = []
test_patient_context_summary_str = (
"Age: 62; Gender: Female; Chief Complaint: Recent onset of blurry vision and fatigue; "
"Key Medical History: Prediabetes, Mild dyslipidemia; "
"Current Medications: None reported; Allergies: Sulfa drugs."
)
print(f"ℹ️ Simulated Patient Context for this test run: {test_patient_context_summary_str}\n")
while True:
user_input_str = input("πŸ‘€ You: ").strip()
if user_input_str.lower() in ["exit", "quit"]:
print("πŸ‘‹ Exiting test console.")
break
if not user_input_str:
continue
try:
app_logger.info(f"__main__ test: Invoking agent with input: '{user_input_str}'")
response_dict = test_executor.invoke({
"input": user_input_str,
"chat_history": current_chat_history_for_test_run,
"patient_context": test_patient_context_summary_str
})
ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.")
print(f"πŸ€– Agent: {ai_output_str}")
current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
if len(current_chat_history_for_test_run) > 10:
current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]
except Exception as e:
print(f"⚠️ Error during agent invocation: {e}")
app_logger.error(f"Error in __main__ agent test invocation: {e}", exc_info=True)