File size: 10,942 Bytes
93406ed be49f6d 2a0415a 93406ed 3a36946 93406ed 5b4e203 93406ed 5b4e203 93406ed 2a0415a 4a3a03e f194991 5b4e203 b856671 93406ed 00f82d0 5b4e203 93406ed 5b4e203 93406ed 29659ac 93406ed 5b4e203 93406ed 5b4e203 93406ed 5b4e203 b856671 00f82d0 5b4e203 93406ed 5b4e203 b856671 5b4e203 93406ed 5b4e203 b856671 3a36946 5b4e203 b856671 5b4e203 b856671 93406ed 5b4e203 93406ed 5b4e203 93406ed 3a36946 5b4e203 00f82d0 5b4e203 b856671 5b4e203 b856671 5b4e203 93406ed 5b4e203 93406ed 5b4e203 93406ed 5b4e203 93406ed 5b4e203 b856671 3a36946 5b4e203 93406ed 5b4e203 b856671 5b4e203 93406ed 4a3a03e 93406ed 3a36946 5b4e203 00f82d0 216ab9a f194991 5b4e203 f194991 93406ed 3a36946 93406ed 3a36946 5b4e203 3a36946 5b4e203 93406ed 3a36946 93406ed 5b4e203 3a36946 5b4e203 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
# /home/user/app/agent.py
import os
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, HumanMessage
# --- Import your defined tools FROM THE 'tools' PACKAGE ---
# This relies on tools/__init__.py correctly exporting these names.
from tools import (
BioPortalLookupTool,
UMLSLookupTool,
QuantumTreatmentOptimizerTool,
# QuantumOptimizerInput, # Only if needed for type hints directly in this file
)
from config.settings import settings
from services.logger import app_logger
# --- Initialize LLM (OpenAI) ---
llm = None
try:
if not settings.OPENAI_API_KEY:
app_logger.error("CRITICAL: OPENAI_API_KEY not found in settings. Agent cannot initialize.")
raise ValueError("OpenAI API Key not configured. Please set it in Hugging Face Space secrets as OPENAI_API_KEY.")
llm = ChatOpenAI(
model_name="gpt-4-turbo-preview", # More capable for function calling & instruction following
temperature=0.1, # Low for more deterministic tool use
openai_api_key=settings.OPENAI_API_KEY
)
app_logger.info(f"ChatOpenAI ({llm.model_name}) initialized successfully for agent.")
except Exception as e:
detailed_error_message = str(e)
user_facing_error = f"OpenAI LLM initialization failed: {detailed_error_message}. Check API key and model name."
if "api_key" in detailed_error_message.lower() or "authenticate" in detailed_error_message.lower():
user_facing_error = "OpenAI LLM initialization failed: API key issue. Ensure OPENAI_API_KEY is correctly set in Hugging Face Secrets and is valid."
app_logger.error(user_facing_error + f" Original: {detailed_error_message}", exc_info=False)
else:
app_logger.error(user_facing_error, exc_info=True)
raise ValueError(user_facing_error)
# --- Initialize Tools List ---
tools_list = [
UMLSLookupTool(),
BioPortalLookupTool(),
QuantumTreatmentOptimizerTool(),
]
app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
# --- Agent Prompt (for OpenAI Functions Agent - Explicitly including {tools} and {tool_names}) ---
# The KeyError indicated that ChatPromptTemplate was expecting 'tools' and 'tool_names' as input variables.
# create_openai_functions_agent should populate these if these placeholders are in the system message.
OPENAI_SYSTEM_PROMPT_WITH_EXPLICIT_TOOLS_VARS = (
"You are 'Quantum Health Navigator', an AI assistant for healthcare professionals. "
"Your primary goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. "
"You have access to a set of specialized tools. Their names are: {tool_names}. Their detailed descriptions are: {tools}. Use them when a user's query can be best answered by one of them.\n"
"Disclaimers: Always state that you are for informational support and not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases without using the 'quantum_treatment_optimizer' tool if relevant.\n"
"Patient Context for this session (if provided by the user earlier): {patient_context}\n" # This variable is passed from invoke
"Tool Usage Guidelines:\n"
"1. When using the 'quantum_treatment_optimizer' tool, its 'action_input' argument requires three main keys: 'patient_data', 'current_treatments', and 'conditions'.\n"
" - The 'patient_data' key MUST be a dictionary. Populate this dictionary by extracting relevant details from the {patient_context}. "
" For example, if {patient_context} is 'Age: 50; Gender: Male; Key Medical History: Hypertension; Chief Complaint: headache', "
" then 'patient_data' could be {{\"age\": 50, \"gender\": \"Male\", \"relevant_history\": [\"Hypertension\"], \"symptoms\": [\"headache\"]}}. "
" Include details like age, gender, chief complaint, key medical history, and current medications from {patient_context} within this 'patient_data' dictionary.\n"
" - 'current_treatments' should be a list of strings derived from the 'Current Medications' part of {patient_context}.\n"
" - 'conditions' should be a list of strings, including primary conditions from the 'Key Medical History' or 'Chief Complaint' parts of {patient_context}, and any conditions explicitly mentioned or implied by the current user query.\n"
"2. For `bioportal_lookup`, the 'action_input' should be a dictionary like {{\"term\": \"search_term\", \"ontology\": \"ONTOLOGY_ACRONYM\"}}. If the user doesn't specify an ontology, you may ask for clarification or default to 'SNOMEDCT_US'.\n"
"3. For `umls_lookup`, the 'action_input' is a single string: the medical term to search.\n"
"4. After using a tool, you will receive an observation. Use this observation and your general knowledge to formulate a comprehensive final answer to the human. Clearly cite the tool if its output forms a key part of your answer.\n"
"5. If a user's query seems to ask for treatment advice or medication suggestions for a specific scenario (especially if patient context is available), you MUST prioritize using the 'quantum_treatment_optimizer' tool.\n"
"6. For general medical knowledge questions not requiring patient-specific optimization or specific ontology/CUI lookups, you may answer directly from your training data, but always include the standard disclaimer."
)
# ChatPromptTemplate defines the sequence of messages.
# Variables here are what the agent_executor.invoke will ultimately need to provide or what the agent manages.
prompt = ChatPromptTemplate.from_messages([
("system", OPENAI_SYSTEM_PROMPT_WITH_EXPLICIT_TOOLS_VARS), # System instructions, expects {patient_context}, {tools}, {tool_names}
MessagesPlaceholder(variable_name="chat_history"), # For past Human/AI messages
("human", "{input}"), # For the current user query
MessagesPlaceholder(variable_name="agent_scratchpad") # For agent's internal work (function calls/responses)
])
app_logger.info("Agent prompt template (with explicit tools/tool_names in system message) created.")
# Log the input variables that this prompt structure will expect.
# `create_openai_functions_agent` should provide 'tools' and 'tool_names' to this prompt.
# The user (via invoke) provides 'input', 'chat_history', 'patient_context'.
# 'agent_scratchpad' is managed by the AgentExecutor.
app_logger.debug(f"Prompt expected input variables: {prompt.input_variables}")
# --- Create Agent ---
if llm is None:
app_logger.critical("LLM object is None at agent creation (OpenAI). Application cannot proceed.")
raise SystemExit("Agent LLM failed to initialize.")
try:
# `create_openai_functions_agent` is given the llm, the raw tools_list, and the prompt.
# It should process `tools_list` to make them available as OpenAI functions AND
# populate the `{tools}` and `{tool_names}` placeholders in the prompt.
agent = create_openai_functions_agent(llm=llm, tools=tools_list, prompt=prompt)
app_logger.info("OpenAI Functions agent created successfully.")
except Exception as e:
# This is where the KeyError "Input to ChatPromptTemplate is missing variables {'tools', 'tool_names'}"
# was occurring.
app_logger.error(f"Failed to create OpenAI Functions agent: {e}", exc_info=True)
raise ValueError(f"OpenAI agent creation failed: {e}")
# --- Create Agent Executor ---
agent_executor = AgentExecutor(
agent=agent,
tools=tools_list, # Tools are also provided to the executor
verbose=True,
handle_parsing_errors=True,
max_iterations=7,
# return_intermediate_steps=True, # Good for debugging
)
app_logger.info("AgentExecutor with OpenAI agent created successfully.")
# --- Getter Function for Streamlit App ---
_agent_executor_instance = agent_executor
def get_agent_executor():
global _agent_executor_instance
if _agent_executor_instance is None:
app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called (OpenAI).")
raise RuntimeError("Agent executor (OpenAI) was not properly initialized. Check application startup logs.")
if not settings.OPENAI_API_KEY: # Final check
app_logger.error("OpenAI API Key is missing at get_agent_executor call. Agent will fail.")
raise ValueError("OpenAI API Key not configured.")
return _agent_executor_instance
# --- Example Usage (for local testing) ---
if __name__ == "__main__":
if not settings.OPENAI_API_KEY:
print("π¨ Please set your OPENAI_API_KEY in .env or environment.")
else:
print("\nπ Quantum Health Navigator (OpenAI Agent Test Console) π")
try: test_executor = get_agent_executor()
except ValueError as e_init: print(f"β οΈ Agent init failed: {e_init}"); exit()
history = []
context_str = ("Age: 60; Gender: Male; Chief Complaint: general fatigue and occasional dizziness; "
"Key Medical History: Type 2 Diabetes, Hypertension; "
"Current Medications: Metformin 1000mg daily, Lisinopril 20mg daily; Allergies: None.")
print(f"βΉοΈ Simulated Context: {context_str}\n")
while True:
usr_in = input("π€ You: ").strip()
if usr_in.lower() in ["exit", "quit"]: print("π Exiting."); break
if not usr_in: continue
try:
# The keys here ('input', 'chat_history', 'patient_context') must match
# what the ChatPromptTemplate ultimately expects after create_openai_functions_agent
# has done its work with 'tools' and 'tool_names'.
payload = {
"input": usr_in,
"chat_history": history,
"patient_context": context_str,
# Note: We do NOT explicitly pass 'tools' or 'tool_names' in invoke.
# The `create_openai_functions_agent` is responsible for making these available
# to the `prompt` object during its formatting process.
}
app_logger.info(f"__main__ test (OpenAI): Invoking with payload keys: {list(payload.keys())}")
res = test_executor.invoke(payload)
ai_out = res.get('output', "No output.")
print(f"π€ Agent: {ai_out}")
history.extend([HumanMessage(content=usr_in), AIMessage(content=ai_out)])
if len(history) > 8: history = history[-8:]
except Exception as e_invoke:
print(f"β οΈ Invoke Error: {type(e_invoke).__name__} - {e_invoke}")
app_logger.error(f"Error in __main__ OpenAI agent test invocation: {e_invoke}", exc_info=True) |