mgbam commited on
Commit
4a3a03e
Β·
verified Β·
1 Parent(s): e11982a

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +152 -171
agent.py CHANGED
@@ -1,187 +1,168 @@
1
  # /home/user/app/agent.py
2
  import os
3
- from langchain_google_genai import ChatGoogleGenerativeAI
4
- from langchain.agents import AgentExecutor, create_structured_chat_agent
5
- # from langchain_google_genai import HarmBlockThreshold, HarmCategory # Optional for safety settings
6
 
7
- from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
8
- from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
 
 
 
9
 
10
- # --- Import your defined tools FROM THE 'tools' PACKAGE ---
 
11
  from tools import (
12
  BioPortalLookupTool,
13
  UMLSLookupTool,
14
  QuantumTreatmentOptimizerTool,
15
- # QuantumOptimizerInput, # Import if needed for type hints directly in this file
16
- # GeminiTool, # Uncomment if using
17
- )
18
-
19
- from config.settings import settings
20
- from services.logger import app_logger
21
-
22
- # --- Initialize LLM (Gemini) ---
23
- llm = None
24
- try:
25
- gemini_api_key_from_settings = settings.GEMINI_API_KEY
26
- api_key_to_use = gemini_api_key_from_settings or os.getenv("GOOGLE_API_KEY")
27
-
28
- if not api_key_to_use:
29
- app_logger.error("CRITICAL: Gemini API Key not found in settings or environment.")
30
- raise ValueError("Gemini API Key not configured.")
31
-
32
- llm = ChatGoogleGenerativeAI(
33
- model="gemini-1.5-pro-latest",
34
- temperature=0.2,
35
- google_api_key=api_key_to_use,
36
- convert_system_message_to_human=True,
37
- )
38
- app_logger.info(f"ChatGoogleGenerativeAI ({llm.model}) initialized successfully for agent.")
39
- except Exception as e:
40
- detailed_error_message = str(e)
41
- user_facing_error = f"Gemini LLM initialization failed: {detailed_error_message}."
42
- if "credential" in detailed_error_message.lower() or "api_key" in detailed_error_message.lower():
43
- user_facing_error = "Gemini LLM initialization failed: API key/credential issue. Check HF Secrets."
44
- app_logger.error(user_facing_error + f" Original: {detailed_error_message}", exc_info=False)
45
- else:
46
- app_logger.error(user_facing_error, exc_info=True)
47
- raise ValueError(user_facing_error)
48
-
49
-
50
- # --- Initialize Tools List ---
51
- tools_list = [
52
- UMLSLookupTool(),
53
- BioPortalLookupTool(),
54
- QuantumTreatmentOptimizerTool(),
55
- ]
56
- app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
57
-
58
-
59
- # --- Agent Prompt (Revised Structure) ---
60
- # System prompt contains general instructions and placeholders for context/tools.
61
- # Chat history, current input, and agent scratchpad are handled as separate message sequence parts.
62
- REVISED_SYSTEM_PROMPT_TEXT = (
63
- "You are 'Quantum Health Navigator', an advanced AI assistant for healthcare professionals. "
64
- "Your primary goal is to provide accurate information and insights based on user queries and available tools. "
65
- "You must adhere to the following guidelines:\n"
66
- "1. Disclaimers: Always remind the user that you are an AI, not a human medical professional, and your information "
67
- "is for support, not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases "
68
- "unless it's the direct output of a specialized tool like 'quantum_treatment_optimizer'.\n"
69
- "2. Patient Context: The user may provide patient context at the start of the session. This context is available as: {patient_context}. "
70
- "You MUST consider this context when it's relevant to the query, especially for the 'quantum_treatment_optimizer' tool.\n"
71
- "3. Tool Usage: You have access to the following tools (names: {tool_names}):\n{tools}\n"
72
- " To use a tool, respond *only* with a JSON markdown code block with 'action' and 'action_input' keys. "
73
- " The 'action_input' must match the schema for the specified tool. Examples:\n"
74
- " For `umls_lookup`: ```json\n{{\"action\": \"umls_lookup\", \"action_input\": \"myocardial infarction\"}}\n```\n"
75
- " For `bioportal_lookup`: ```json\n{{\"action\": \"bioportal_lookup\", \"action_input\": {{\"term\": \"diabetes mellitus\", \"ontology\": \"SNOMEDCT\"}}}}\n```\n"
76
- " For `quantum_treatment_optimizer`: ```json\n{{\"action\": \"quantum_treatment_optimizer\", \"action_input\": {{\"patient_data\": {{\"age\": 55, \"gender\": \"Male\", \"symptoms\": [\"chest pain\"]}}, \"current_treatments\": [\"metformin\"], \"conditions\": [\"Type 2 Diabetes\"]}}}}\n```\n"
77
- " Ensure the `action_input` for `quantum_treatment_optimizer` includes a `patient_data` dictionary populated from the overall {patient_context}.\n"
78
- "4. Responding to User: After using a tool, you will receive an observation. Use this observation and your knowledge to formulate a comprehensive final answer to the human. Cite the tool if you used one (e.g., 'According to UMLS Lookup...'). Do not output a tool call again unless necessary for a multi-step process.\n"
79
- "5. Specific Tool Guidance:\n"
80
- " - If asked about treatment optimization for a specific patient (especially if patient context is provided), you MUST use the `quantum_treatment_optimizer` tool.\n"
81
- " - For definitions, codes, or general medical concepts, `umls_lookup` or `bioportal_lookup` are appropriate.\n"
82
- "6. Conversation Flow: Maintain context from the chat history.\n\n"
83
- "Begin!"
84
- # Note: {chat_history}, {input}, and {agent_scratchpad} are NOT in this string anymore.
85
- # They are handled by the ChatPromptTemplate.from_messages structure.
86
- )
87
-
88
- # Create the prompt template
89
- # Input variables for this prompt will be:
90
- # - patient_context (from invoke call)
91
- # - tool_names (provided by create_structured_chat_agent)
92
- # - tools (provided by create_structured_chat_agent)
93
- # - chat_history (from invoke call, via MessagesPlaceholder)
94
- # - input (from invoke call, via ("human", "{input}"))
95
- # - agent_scratchpad (managed by agent, via MessagesPlaceholder)
96
- prompt = ChatPromptTemplate.from_messages([
97
- ("system", REVISED_SYSTEM_PROMPT_TEXT),
98
- MessagesPlaceholder(variable_name="chat_history"),
99
- ("human", "{input}"), # The current human input
100
- MessagesPlaceholder(variable_name="agent_scratchpad") # For agent's intermediate work (must be list of BaseMessages)
101
- ])
102
- app_logger.info("Agent prompt template created for Gemini structured chat agent.")
103
-
104
- # --- Create Agent ---
105
- if llm is None: # Should have been caught by now, but defensive check
106
- app_logger.critical("LLM object is None at agent creation stage. Application cannot proceed.")
107
- raise SystemExit("Agent LLM failed to initialize. Application cannot start.")
108
-
109
- try:
110
- agent = create_structured_chat_agent(llm=llm, tools=tools_list, prompt=prompt)
111
- app_logger.info("Structured chat agent created successfully with Gemini LLM and tools.")
112
- except Exception as e:
113
- app_logger.error(f"Failed to create structured chat agent: {e}", exc_info=True)
114
- # This is where "Prompt missing required variables: {'tool_names'}" was caught previously.
115
- # Or "variable agent_scratchpad should be a list of base messages" if structure is wrong.
116
- raise ValueError(f"Gemini agent creation failed: {e}")
117
-
118
-
119
- # --- Create Agent Executor ---
120
- agent_executor = AgentExecutor(
121
- agent=agent,
122
- tools=tools_list,
123
- verbose=True,
124
- handle_parsing_errors=True, # Important for structured output parsing
125
- max_iterations=10,
126
- early_stopping_method="generate",
127
  )
128
- app_logger.info("AgentExecutor with Gemini agent created successfully.")
129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
- # --- Getter Function for Streamlit App ---
132
- _agent_executor_instance = agent_executor
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
 
134
- def get_agent_executor():
135
- global _agent_executor_instance
136
- if _agent_executor_instance is None:
137
- app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called.")
138
- raise RuntimeError("Agent executor was not properly initialized. Check application startup logs.")
139
  return _agent_executor_instance
140
 
141
- # --- Example Usage (for local testing) ---
 
 
142
  if __name__ == "__main__":
143
- main_test_api_key = settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")
144
- if not main_test_api_key:
145
- print("🚨 Please set your GOOGLE_API_KEY (for Gemini) in .env or environment to run test.")
146
- else:
147
- print("\nπŸš€ Quantum Health Navigator (Gemini Agent Test Console) πŸš€")
148
- # ... (rest of the __main__ block from the previous full agent.py, it should work with this prompt structure) ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  try:
150
- test_executor = get_agent_executor()
151
- except ValueError as e_init:
152
- print(f"⚠️ Agent initialization failed: {e_init}")
153
- exit()
154
-
155
- current_chat_history_for_test_run = []
156
- test_patient_context_summary_str = (
157
- "Age: 58; Gender: Female; Chief Complaint: Recent onset of blurry vision and fatigue; "
158
- "Key Medical History: Prediabetes, Mild dyslipidemia; "
159
- "Current Medications: None reported; Allergies: Sulfa drugs."
160
- )
161
- print(f"ℹ️ Simulated Patient Context: {test_patient_context_summary_str}\n")
162
-
163
- while True:
164
- user_input_str = input("πŸ‘€ You: ").strip()
165
- if user_input_str.lower() in ["exit", "quit"]:
166
- print("πŸ‘‹ Exiting.")
167
- break
168
- if not user_input_str:
169
- continue
170
- try:
171
- app_logger.info(f"__main__ test: Invoking with: '{user_input_str}'")
172
- # The keys here must match the input variables expected by the combined prompt and agent
173
- response_dict = test_executor.invoke({
174
- "input": user_input_str,
175
- "chat_history": current_chat_history_for_test_run, # List of BaseMessage
176
- "patient_context": test_patient_context_summary_str,
177
- # `tools`, `tool_names`, `agent_scratchpad` are handled internally by the agent executor
178
- })
179
- ai_output_str = response_dict.get('output', "No 'output' key in response.")
180
- print(f"πŸ€– Agent: {ai_output_str}")
181
- current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
182
- current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
183
- if len(current_chat_history_for_test_run) > 10: # Limit history
184
- current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]
185
- except Exception as e:
186
- print(f"⚠️ Error: {e}")
187
- app_logger.error(f"Error in __main__ test invocation: {e}", exc_info=True)
 
1
  # /home/user/app/agent.py
2
  import os
3
+ import sys
4
+ from typing import List
 
5
 
6
+ from langchain.agents import AgentExecutor, create_structured_chat_agent
7
+ from langchain.prompts import ChatPromptTemplate
8
+ from langchain.prompts.chat import MessagesPlaceholder
9
+ from langchain.messages import AIMessage, HumanMessage, SystemMessage
10
+ from langchain_google_genai import ChatGoogleGenerativeAI
11
 
12
+ from config.settings import settings
13
+ from services.logger import app_logger
14
  from tools import (
15
  BioPortalLookupTool,
16
  UMLSLookupTool,
17
  QuantumTreatmentOptimizerTool,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  )
 
19
 
20
+ # -----------------------------------------------------------------------------
21
+ # 1. Initialize Gemini LLM
22
+ # -----------------------------------------------------------------------------
23
+ def _init_llm() -> ChatGoogleGenerativeAI:
24
+ """
25
+ Initialize the Google Gemini LLM client with the configured API key.
26
+ Raises ValueError if no key is found or initialization fails.
27
+ """
28
+ api_key = settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")
29
+ if not api_key:
30
+ msg = "Gemini API key missing: set GEMINI_API_KEY in settings or GOOGLE_API_KEY env var"
31
+ app_logger.error(msg)
32
+ raise ValueError(msg)
33
+
34
+ try:
35
+ llm = ChatGoogleGenerativeAI(
36
+ model="gemini-1.5-pro-latest",
37
+ temperature=0.2,
38
+ google_api_key=api_key,
39
+ convert_system_message_to_human=True,
40
+ )
41
+ app_logger.info(f"Initialized Gemini LLM ({llm.model}) successfully.")
42
+ return llm
43
+ except Exception as e:
44
+ msg = f"Failed to initialize Gemini LLM: {e}"
45
+ app_logger.error(msg, exc_info=True)
46
+ raise ValueError(msg)
47
+
48
+ # -----------------------------------------------------------------------------
49
+ # 2. Build Prompt Template
50
+ # -----------------------------------------------------------------------------
51
+ def _build_prompt_template(tool_names: List[str], tools) -> ChatPromptTemplate:
52
+ """
53
+ Build a structured chat prompt template including system instructions,
54
+ chat history, user input, and an agent scratchpad for intermediate tool calls.
55
+ """
56
+ system_text = """
57
+ You are Quantum Health Navigator, an AI assistant for healthcare professionals.
58
+ β€’ Always disclose you are an AI and not a substitute for clinical judgment.
59
+ β€’ Patient context: {patient_context}
60
+ β€’ Tools available: {tool_names}
61
+ {tools}
62
+
63
+ To invoke a tool, respond only with a JSON code block containing:
64
+ {"action": "<tool_name>", "action_input": <input>}
65
+
66
+ After the tool observation, craft a user-facing answer, citing the tool when used.
67
+ """.strip()
68
+
69
+ return ChatPromptTemplate.from_messages([
70
+ ("system", system_text),
71
+ MessagesPlaceholder(variable_name="chat_history"), # List[BaseMessage]
72
+ ("human", "{input}"), # Current user input
73
+ MessagesPlaceholder(variable_name="agent_scratchpad"), # List[BaseMessage]
74
+ ])
75
+
76
+ # -----------------------------------------------------------------------------
77
+ # 3. Assemble Agent and Executor
78
+ # -----------------------------------------------------------------------------
79
+ def get_agent_executor() -> AgentExecutor:
80
+ """
81
+ Lazily initialize and return the singleton AgentExecutor.
82
+ """
83
+ global _agent_executor_instance
84
+ if '_agent_executor_instance' not in globals():
85
+ # Initialize LLM
86
+ llm = _init_llm()
87
+
88
+ # Prepare tools
89
+ tools_list = [
90
+ UMLSLookupTool(),
91
+ BioPortalLookupTool(),
92
+ QuantumTreatmentOptimizerTool(),
93
+ ]
94
+ app_logger.info(f"Loaded tools: {[t.name for t in tools_list]}")
95
+
96
+ # Build prompt
97
+ prompt = _build_prompt_template(
98
+ tool_names=[t.name for t in tools_list],
99
+ tools=tools_list
100
+ )
101
+ app_logger.info("Prompt template built successfully.")
102
 
103
+ # Create structured agent
104
+ agent = create_structured_chat_agent(
105
+ llm=llm,
106
+ tools=tools_list,
107
+ prompt=prompt
108
+ )
109
+ app_logger.info("Structured chat agent created.")
110
+
111
+ # Create executor
112
+ executor = AgentExecutor(
113
+ agent=agent,
114
+ tools=tools_list,
115
+ verbose=True,
116
+ handle_parsing_errors=True,
117
+ max_iterations=10,
118
+ early_stopping_method="generate",
119
+ )
120
+ app_logger.info("AgentExecutor initialized.")
121
+ _agent_executor_instance = executor
122
 
 
 
 
 
 
123
  return _agent_executor_instance
124
 
125
+ # -----------------------------------------------------------------------------
126
+ # 4. If run as script, provide a simple REPL for testing
127
+ # -----------------------------------------------------------------------------
128
  if __name__ == "__main__":
129
+ try:
130
+ executor = get_agent_executor()
131
+ except Exception as e:
132
+ print(f"❌ Initialization error: {e}")
133
+ sys.exit(1)
134
+
135
+ # Example patient context
136
+ patient_context = (
137
+ "Age: 58; Gender: Female; Chief Complaint: Blurry vision & fatigue; "
138
+ "Key History: Prediabetes, mild dyslipidemia; Medications: None."
139
+ )
140
+ chat_history: List[SystemMessage | HumanMessage | AIMessage] = []
141
+
142
+ print("πŸš€ Quantum Health Navigator Console (type 'exit' to quit)")
143
+ while True:
144
+ user_input = input("πŸ‘€ You: ").strip()
145
+ if user_input.lower() in {"exit", "quit"}:
146
+ print("πŸ‘‹ Goodbye!")
147
+ break
148
+ if not user_input:
149
+ continue
150
+
151
+ # Invoke the agent
152
  try:
153
+ result = executor.invoke({
154
+ "input": user_input,
155
+ "chat_history": chat_history,
156
+ "patient_context": patient_context
157
+ })
158
+ reply = result.get("output", "")
159
+ print(f"πŸ€– Agent: {reply}\n")
160
+ # Update local history
161
+ chat_history.append(HumanMessage(content=user_input))
162
+ chat_history.append(AIMessage(content=reply))
163
+ # Keep history manageable
164
+ if len(chat_history) > 20:
165
+ chat_history = chat_history[-20:]
166
+ except Exception as err:
167
+ print(f"⚠️ Error during inference: {err}")
168
+ app_logger.error("Inference error", exc_info=True)