File size: 8,256 Bytes
93406ed be49f6d 2a0415a 93406ed 3a36946 93406ed 2a0415a 4a3a03e f194991 b856671 93406ed 00f82d0 93406ed 3a36946 93406ed 29659ac 93406ed 3a36946 93406ed 3a36946 b856671 00f82d0 93406ed 3a36946 b856671 3a36946 93406ed 3a36946 b856671 3a36946 b856671 93406ed 3a36946 b856671 93406ed 3a36946 00f82d0 b856671 3a36946 b856671 93406ed b856671 3a36946 93406ed b856671 93406ed 4a3a03e 93406ed 3a36946 00f82d0 216ab9a f194991 93406ed 3a36946 93406ed 3a36946 93406ed 3a36946 93406ed 3a36946 00f82d0 3a36946 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
# /home/user/app/agent.py
import os
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, HumanMessage
from tools import (
BioPortalLookupTool,
UMLSLookupTool,
QuantumTreatmentOptimizerTool,
)
from config.settings import settings
from services.logger import app_logger
llm = None
try:
if not settings.OPENAI_API_KEY:
app_logger.error("CRITICAL: OPENAI_API_KEY not found in settings. Agent cannot initialize.")
raise ValueError("OpenAI API Key not configured. Please set it in Hugging Face Space secrets as OPENAI_API_KEY.")
llm = ChatOpenAI(
model_name="gpt-4-turbo-preview",
temperature=0.1,
openai_api_key=settings.OPENAI_API_KEY
)
app_logger.info(f"ChatOpenAI ({llm.model_name}) initialized successfully for agent.")
except Exception as e:
detailed_error_message = str(e)
user_facing_error = f"OpenAI LLM initialization failed: {detailed_error_message}."
if "api_key" in detailed_error_message.lower() or "authenticate" in detailed_error_message.lower():
user_facing_error = "OpenAI LLM initialization failed: API key issue. Check HF Secrets."
app_logger.error(user_facing_error, exc_info=True)
raise ValueError(user_facing_error)
tools_list = [
UMLSLookupTool(),
BioPortalLookupTool(),
QuantumTreatmentOptimizerTool(),
]
app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
# --- Agent Prompt (Reverting to version that EXPLICITLY includes {tools} and {tool_names} in the system message string) ---
OPENAI_SYSTEM_PROMPT_TEXT_WITH_TOOLS_EXPLICIT = (
"You are 'Quantum Health Navigator', an advanced AI assistant for healthcare professionals. "
"Your primary goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. "
"You have access to a set of specialized tools (their names are: {tool_names}). Their detailed descriptions are available to you: {tools}. Use them when a user's query can be best answered by one of them.\n" # Explicit {tools} and {tool_names}
"Disclaimers: Always state that you are for informational support and not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases without using the 'quantum_treatment_optimizer' tool if relevant.\n"
"Patient Context for this session (if provided by the user earlier): {patient_context}\n"
"Tool Usage Guidelines:\n"
"1. When using the 'quantum_treatment_optimizer' tool, its 'action_input' argument requires three main keys: 'patient_data', 'current_treatments', and 'conditions'.\n"
" - The 'patient_data' key MUST be a dictionary. Populate this dictionary by extracting relevant details from the {patient_context}. "
" For example, if {patient_context} is 'Age: 50; Gender: Male; Key Medical History: Hypertension; Chief Complaint: headache', "
" then 'patient_data' could be {{\"age\": 50, \"gender\": \"Male\", \"relevant_history\": [\"Hypertension\"], \"symptoms\": [\"headache\"]}}. "
" Include details like age, gender, chief complaint, key medical history, and current medications from {patient_context} within this 'patient_data' dictionary. If a value is not present in context, omit the key or use null/None if appropriate for the tool, but prioritize providing what is available.\n"
" - 'current_treatments' should be a list of strings derived from the 'Current Medications' part of {patient_context}.\n"
" - 'conditions' should be a list of strings, including primary conditions from the 'Key Medical History' or 'Chief Complaint' parts of {patient_context}, and any conditions explicitly mentioned or implied by the current user query.\n"
"2. For `bioportal_lookup`, the 'action_input' should be a dictionary like {{\"term\": \"search_term\", \"ontology\": \"ONTOLOGY_ACRONYM\"}}. If the user doesn't specify an ontology, you may ask for clarification or default to 'SNOMEDCT_US'.\n"
"3. For `umls_lookup`, the 'action_input' is a single string: the medical term to search.\n"
"4. After using a tool, you will receive an observation. Use this observation and your general knowledge to formulate a comprehensive final answer to the human. Clearly cite the tool if its output forms a key part of your answer (e.g., 'According to UMLS Lookup...').\n"
"5. If a user's query seems to ask for treatment advice or medication suggestions for a specific scenario (especially if patient context is available), you MUST prioritize using the 'quantum_treatment_optimizer' tool.\n"
"6. For general medical knowledge questions not requiring patient-specific optimization or specific ontology/CUI lookups, you may answer directly from your training data, but always include the standard disclaimer."
)
prompt = ChatPromptTemplate.from_messages([
("system", OPENAI_SYSTEM_PROMPT_TEXT_WITH_TOOLS_EXPLICIT), # Using the version with explicit {tools} and {tool_names}
MessagesPlaceholder(variable_name="chat_history"),
("human", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad")
])
app_logger.info("Agent prompt template (with explicit tools/tool_names in system message) created.")
if llm is None:
app_logger.critical("LLM object is None at agent creation (OpenAI). Cannot proceed.")
raise SystemExit("Agent LLM failed to initialize.")
try:
agent = create_openai_functions_agent(llm=llm, tools=tools_list, prompt=prompt)
app_logger.info("OpenAI Functions agent created successfully.")
except Exception as e:
app_logger.error(f"Failed to create OpenAI Functions agent: {e}", exc_info=True)
raise ValueError(f"OpenAI agent creation failed: {e}")
agent_executor = AgentExecutor(
agent=agent,
tools=tools_list,
verbose=True,
handle_parsing_errors=True,
max_iterations=7,
)
app_logger.info("AgentExecutor with OpenAI agent created successfully.")
_agent_executor_instance = agent_executor
def get_agent_executor():
global _agent_executor_instance
if _agent_executor_instance is None:
app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called (OpenAI).")
raise RuntimeError("Agent executor (OpenAI) was not properly initialized.")
if not settings.OPENAI_API_KEY:
app_logger.error("OpenAI API Key is missing at get_agent_executor call. Agent will fail.")
raise ValueError("OpenAI API Key not configured.")
return _agent_executor_instance
if __name__ == "__main__":
if not settings.OPENAI_API_KEY:
print("π¨ Please set your OPENAI_API_KEY in .env or environment.")
else:
print("\nπ Quantum Health Navigator (OpenAI Agent Test Console) π")
try: test_executor = get_agent_executor()
except ValueError as e_init: print(f"β οΈ Agent init failed: {e_init}"); exit()
history = []
context = ("Age: 60; Gender: Male; Chief Complaint: general fatigue and occasional dizziness; "
"Key Medical History: Type 2 Diabetes, Hypertension; "
"Current Medications: Metformin 1000mg daily, Lisinopril 20mg daily; Allergies: None.")
print(f"βΉοΈ Simulated Context: {context}\n")
while True:
usr_in = input("π€ You: ").strip()
if usr_in.lower() in ["exit", "quit"]: print("π Exiting."); break
if not usr_in: continue
try:
res = test_executor.invoke({
"input": usr_in,
"chat_history": history,
"patient_context": context,
# DO NOT PASS "tools" or "tool_names" here; the agent constructor does that
})
ai_out = res.get('output', "No output.")
print(f"π€ Agent: {ai_out}")
history.extend([HumanMessage(content=usr_in), AIMessage(content=ai_out)])
if len(history) > 8: history = history[-8:]
except Exception as e_invoke: print(f"β οΈ Invoke Error: {type(e_invoke).__name__} - {e_invoke}") |