File size: 10,939 Bytes
a6d04e1 be49f6d a6d04e1 be49f6d b37e303 f194991 d68e573 b37e303 d68e573 b37e303 d68e573 be49f6d f194991 a6d04e1 be49f6d b37e303 be49f6d a6d04e1 b37e303 a6d04e1 b37e303 a6d04e1 b37e303 be49f6d f194991 b37e303 f194991 b37e303 f194991 b37e303 a6d04e1 be49f6d b37e303 be49f6d b37e303 be49f6d b37e303 be49f6d a6d04e1 b37e303 a6d04e1 f194991 b37e303 f194991 be49f6d f194991 b37e303 f194991 a6d04e1 b37e303 a6d04e1 be49f6d a6d04e1 f194991 b37e303 f194991 be49f6d f194991 a6d04e1 f194991 b37e303 be49f6d b37e303 be49f6d f194991 be49f6d f194991 be49f6d b37e303 f194991 b37e303 be49f6d b37e303 be49f6d b37e303 a6d04e1 be49f6d b37e303 be49f6d b37e303 a6d04e1 f194991 b37e303 be49f6d b37e303 f194991 b37e303 a6d04e1 be49f6d b37e303 be49f6d b37e303 a6d04e1 b37e303 be49f6d a6d04e1 b37e303 a6d04e1 be49f6d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
# /home/user/app/agent.py
import os
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.agents import AgentExecutor, create_structured_chat_agent
# from langchain_google_genai import HarmBlockThreshold, HarmCategory # Optional for safety
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
# --- Import your defined tools FROM THE 'tools' PACKAGE ---
# This relies on tools/__init__.py correctly exporting these names.
from tools import (
BioPortalLookupTool,
UMLSLookupTool,
QuantumTreatmentOptimizerTool,
# QuantumOptimizerInput, # Only if needed for type hints directly in this file
# GeminiTool, # Uncomment and add to __all__ in tools/__init__.py if you decide to use it
)
from config.settings import settings
from services.logger import app_logger
# --- Initialize LLM (Gemini) ---
try:
if not (settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")):
# This check is crucial. If no key, LLM init will fail.
app_logger.error("CRITICAL: GOOGLE_API_KEY (for Gemini) not found in settings or environment. Agent cannot initialize.")
raise ValueError("GOOGLE_API_KEY (for Gemini) not configured.")
llm = ChatGoogleGenerativeAI(
model="gemini-1.5-pro-latest", # Or "gemini-pro"
temperature=0.2, # Lower temperature for more deterministic tool use
# google_api_key=settings.GEMINI_API_KEY, # Explicitly pass if GOOGLE_API_KEY env var might not be picked up
convert_system_message_to_human=True, # Can help with models that don't strictly follow system role
# safety_settings={...} # Optional safety settings
)
app_logger.info(f"ChatGoogleGenerativeAI ({llm.model}) initialized successfully for agent.")
except Exception as e:
app_logger.error(f"Failed to initialize ChatGoogleGenerativeAI for agent: {e}", exc_info=True)
# This error needs to be propagated so get_agent_executor fails clearly
raise ValueError(f"Gemini LLM initialization failed: {e}. Check API key and configurations in HF Secrets.")
# --- Initialize Tools List ---
# The tool instances are created here. Their internal logic (like API calls)
# will be executed when the agent calls their .run() or ._run() method.
tools_list = [
UMLSLookupTool(),
BioPortalLookupTool(),
QuantumTreatmentOptimizerTool(),
# GeminiTool(), # Add if using
]
app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
# --- Agent Prompt (Adapted for Structured Chat with Gemini and your tools) ---
SYSTEM_PROMPT_TEMPLATE = (
"You are 'Quantum Health Navigator', an advanced AI assistant for healthcare professionals. "
"Your primary goal is to provide accurate information and insights based on user queries and available tools. "
"You must adhere to the following guidelines:\n"
"1. Disclaimers: Always remind the user that you are an AI, not a human medical professional, and your information "
"is for support, not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases "
"unless it's the direct output of a specialized tool like 'quantum_treatment_optimizer'.\n"
"2. Patient Context: The user may provide patient context at the start of the session. This context is available as: {patient_context}. "
"You MUST consider this context when it's relevant to the query, especially for the 'quantum_treatment_optimizer' tool.\n"
"3. Tool Usage: You have access to the following tools:\n{tools}\n" # {tools} is filled by the agent with tool names and descriptions
" To use a tool, respond with a JSON markdown code block with the 'action' and 'action_input' keys. "
" The 'action_input' should match the schema for the specified tool. Examples:\n"
" For `umls_lookup`: ```json\n{{\"action\": \"umls_lookup\", \"action_input\": \"myocardial infarction\"}}\n```\n"
" For `bioportal_lookup`: ```json\n{{\"action\": \"bioportal_lookup\", \"action_input\": {{\"term\": \"diabetes mellitus\", \"ontology\": \"SNOMEDCT\"}}}}\n```\n"
" For `quantum_treatment_optimizer`: ```json\n{{\"action\": \"quantum_treatment_optimizer\", \"action_input\": {{\"patient_data\": {{\"age\": 55, \"gender\": \"Male\"}}, \"current_treatments\": [\"metformin\"], \"conditions\": [\"Type 2 Diabetes\"]}}}}\n```\n"
" Ensure the `action_input` for `quantum_treatment_optimizer` includes a `patient_data` dictionary populated from the overall {patient_context}.\n"
"4. Responding to User: After using a tool, you will receive an observation. Use this observation and your knowledge to formulate a comprehensive answer. Cite the tool if you used one (e.g., 'According to UMLS Lookup...').\n"
"5. Specific Tool Guidance:\n"
" - If asked about treatment optimization for a specific patient (especially if patient context is provided), you MUST use the `quantum_treatment_optimizer` tool.\n"
" - For definitions, codes, or general medical concepts, `umls_lookup` or `bioportal_lookup` are appropriate.\n"
# " - If the query is very general, complex, or creative beyond simple lookups, you might consider using `google_gemini_chat` (if enabled as a tool) or answering directly if confident.\n" # If GeminiTool is used
"6. Conversation Flow: Refer to the `Previous conversation history` to maintain context.\n\n"
"Begin!\n\n"
"Previous conversation history:\n"
"{chat_history}\n\n"
"New human question: {input}\n"
"{agent_scratchpad}" # Placeholder for agent's thoughts and tool outputs
)
# Create the prompt template
# The input_variables are what agent_executor.invoke expects, plus what create_structured_chat_agent adds.
# create_structured_chat_agent uses 'tools' and 'tool_names' internally when formatting the prompt for the LLM.
# The primary inputs we pass to invoke are 'input', 'chat_history', and 'patient_context'.
prompt = ChatPromptTemplate.from_messages([
("system", SYSTEM_PROMPT_TEMPLATE),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
app_logger.info("Agent prompt template created for Gemini structured chat agent.")
# --- Create Agent ---
try:
# create_structured_chat_agent is suitable for LLMs that can follow instructions
# to produce structured output (like JSON for tool calls) when prompted.
agent = create_structured_chat_agent(llm=llm, tools=tools_list, prompt=prompt)
app_logger.info("Structured chat agent created successfully with Gemini LLM and tools.")
except Exception as e:
app_logger.error(f"Failed to create structured chat agent: {e}", exc_info=True)
raise ValueError(f"Gemini agent creation failed: {e}")
# --- Create Agent Executor ---
agent_executor = AgentExecutor(
agent=agent,
tools=tools_list,
verbose=True, # Essential for debugging tool usage
handle_parsing_errors=True, # Gracefully handle if LLM output for tool call isn't perfect JSON
max_iterations=10, # Prevents overly long or runaway chains
# return_intermediate_steps=True, # Set to True to get thoughts/actions in the response dict
early_stopping_method="generate", # Sensible default
)
app_logger.info("AgentExecutor with Gemini agent created successfully.")
# --- Getter Function for Streamlit App ---
def get_agent_executor():
"""
Returns the configured agent executor for Gemini.
Initialization of LLM, tools, agent, and executor happens when this module is imported.
"""
# A final check for API key availability, though LLM initialization should have caught it.
if not (settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")):
app_logger.critical("CRITICAL: GOOGLE_API_KEY (for Gemini) is not available when get_agent_executor is called. This indicates an earlier init failure or misconfiguration.")
raise ValueError("Google API Key for Gemini not configured. Agent cannot function.")
return agent_executor
# --- Example Usage (for local testing of this agent.py file) ---
if __name__ == "__main__":
if not (settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")):
print("π¨ Please set your GOOGLE_API_KEY in .env file or as an environment variable to run the test.")
else:
print("\nπ Quantum Health Navigator (Gemini Agent Test Console) π")
print("-----------------------------------------------------------")
print("Type 'exit' or 'quit' to stop.")
print("Example topics: medical definitions, treatment optimization (will use simulated patient context).")
print("-" * 59)
test_executor = get_agent_executor() # Get the globally defined executor
current_chat_history_for_test_run = [] # List of HumanMessage, AIMessage
# Simulated patient context for testing the {patient_context} variable
test_patient_context_summary_str = (
"Age: 62; Gender: Female; Chief Complaint: Fatigue and increased thirst; "
"Key Medical History: Obesity, family history of diabetes; "
"Current Medications: None reported; Allergies: Sulfa drugs."
)
print(f"βΉοΈ Simulated Patient Context for this test run: {test_patient_context_summary_str}\n")
while True:
user_input_str = input("π€ You: ")
if user_input_str.lower() in ["exit", "quit"]:
print("π Exiting test console.")
break
if not user_input_str.strip():
continue
try:
app_logger.info(f"__main__ test: Invoking agent with input: '{user_input_str}'")
# These are the keys expected by the prompt template
# and processed by create_structured_chat_agent
response_dict = test_executor.invoke({
"input": user_input_str,
"chat_history": current_chat_history_for_test_run,
"patient_context": test_patient_context_summary_str
})
ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.")
print(f"π€ Agent: {ai_output_str}")
# Update history for the next turn
current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
# Optional: Limit history length
if len(current_chat_history_for_test_run) > 10: # Keep last 5 pairs
current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]
except Exception as e:
print(f"β οΈ Error during agent invocation: {e}")
app_logger.error(f"Error in __main__ agent test invocation: {e}", exc_info=True) |