File size: 6,292 Bytes
be49f6d 4a3a03e 5a1303c b37e303 4a3a03e 5a1303c 4a3a03e f194991 4a3a03e 5a1303c f194991 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 29659ac 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c e11982a 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 216ab9a f194991 4a3a03e 5a1303c 4a3a03e f194991 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 216ab9a 4a3a03e 5a1303c 4a3a03e 5a1303c 4a3a03e 5a1303c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
import os
import sys
from typing import List, Union
from langchain.prompts import ChatPromptTemplate
from langchain.prompts.chat import MessagesPlaceholder
from langchain.schema import BaseMessage, AIMessage, HumanMessage, SystemMessage
from langchain.agents import AgentExecutor, create_structured_chat_agent
from langchain_google_genai import ChatGoogleGenerativeAI
from config.settings import settings
from services.logger import app_logger
from tools import BioPortalLookupTool, UMLSLookupTool, QuantumTreatmentOptimizerTool
# -----------------------------------------------------------------------------
# 1. Initialize the Gemini LLM
# -----------------------------------------------------------------------------
def _init_llm() -> ChatGoogleGenerativeAI:
"""
Initialize the Google Gemini LLM with the configured API key.
Raises ValueError if no key is found or initialization fails.
"""
api_key = settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")
if not api_key:
err = "Gemini API key not found: set GEMINI_API_KEY in settings or GOOGLE_API_KEY in env"
app_logger.error(err)
raise ValueError(err)
try:
llm = ChatGoogleGenerativeAI(
model="gemini-1.5-pro-latest",
temperature=0.2,
google_api_key=api_key,
convert_system_message_to_human=True,
)
app_logger.info(f"Gemini LLM initialized ({llm.model})")
return llm
except Exception as e:
err = f"Failed to initialize Gemini LLM: {e}"
app_logger.error(err, exc_info=True)
raise ValueError(err)
# -----------------------------------------------------------------------------
# 2. Build the structured chat prompt
# -----------------------------------------------------------------------------
def _build_prompt_template(tool_names: List[str], tools) -> ChatPromptTemplate:
"""
Construct a ChatPromptTemplate that includes:
- a system instruction block,
- a placeholder for chat_history (List[BaseMessage]),
- the current human input,
- a placeholder for agent_scratchpad (List[BaseMessage]) to manage tool calls.
"""
system_text = (
"You are Quantum Health Navigator, an AI assistant for healthcare professionals.\n\n"
"β’ Disclaim: you are an AI, not a substitute for clinical judgment.\n"
"β’ Patient context: {patient_context}\n"
"β’ Available tools: {tool_names}\n"
"{tools}\n\n"
"To call a tool, reply *only* with a JSON code block:\n"
"{{\"action\": \"<tool_name>\", \"action_input\": <input>}}\n\n"
"After you receive the toolβs output, craft a full answer for the user, citing any tools used."
)
return ChatPromptTemplate.from_messages([
("system", system_text),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
# -----------------------------------------------------------------------------
# 3. Lazily build and return the AgentExecutor singleton
# -----------------------------------------------------------------------------
def get_agent_executor() -> AgentExecutor:
"""
Returns a singleton AgentExecutor, creating it on first call.
Sets up LLM, tools, prompt, and executor params.
"""
global _agent_executor_instance
if "_agent_executor_instance" not in globals():
# 3.1 Initialize LLM
llm = _init_llm()
# 3.2 Prepare tools
tools_list = [
UMLSLookupTool(),
BioPortalLookupTool(),
QuantumTreatmentOptimizerTool(),
]
app_logger.info(f"Loaded tools: {[t.name for t in tools_list]}")
# 3.3 Build prompt
prompt = _build_prompt_template(
tool_names=[t.name for t in tools_list],
tools=tools_list
)
app_logger.info("Prompt template built")
# 3.4 Create the structured agent
agent = create_structured_chat_agent(
llm=llm,
tools=tools_list,
prompt=prompt
)
app_logger.info("Structured chat agent created")
# 3.5 Create the executor
executor = AgentExecutor(
agent=agent,
tools=tools_list,
verbose=True,
handle_parsing_errors=True,
max_iterations=10,
early_stopping_method="generate",
)
app_logger.info("AgentExecutor initialized")
_agent_executor_instance = executor
return _agent_executor_instance
# -----------------------------------------------------------------------------
# 4. Optional REPL for local testing
# -----------------------------------------------------------------------------
if __name__ == "__main__":
try:
executor = get_agent_executor()
except Exception as e:
print(f"β Initialization failed: {e}")
sys.exit(1)
# Sample patient context for testing
patient_context = (
"Age: 58; Gender: Female; Chief Complaint: Blurry vision & fatigue; "
"History: Prediabetes, mild dyslipidemia; Medications: None."
)
chat_history: List[Union[SystemMessage, HumanMessage, AIMessage]] = []
print("π Quantum Health Navigator Console (type 'exit' to quit)")
while True:
user_input = input("π€ You: ").strip()
if user_input.lower() in {"exit", "quit"}:
print("π Goodbye!")
break
if not user_input:
continue
try:
result = executor.invoke({
"input": user_input,
"chat_history": chat_history,
"patient_context": patient_context
})
reply = result.get("output", "")
print(f"π€ Agent: {reply}\n")
# Update history
chat_history.append(HumanMessage(content=user_input))
chat_history.append(AIMessage(content=reply))
# Trim to last 20 messages
if len(chat_history) > 20:
chat_history = chat_history[-20:]
except Exception as err:
print(f"β οΈ Inference error: {err}")
app_logger.error("Runtime error in REPL", exc_info=True)
|