MedQA / agent.py
mgbam's picture
Update agent.py
f194991 verified
raw
history blame
4.13 kB
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from langchain_community.chat_message_histories import ChatMessageHistory # For in-memory history if not using DB for agent turn
from tools import (
GeminiTool, UMLSLookupTool, BioPortalLookupTool, QuantumTreatmentOptimizerTool
)
from config.settings import settings
from services.logger import app_logger
# Initialize LLM
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0.2, openai_api_key=settings.OPENAI_API_KEY)
# If you have gpt-4 access and budget, it's generally better for agentic tasks:
# llm = ChatOpenAI(model="gpt-4-turbo-preview", temperature=0.2, openai_api_key=settings.OPENAI_API_KEY)
# Initialize Tools
tools = [
UMLSLookupTool(),
BioPortalLookupTool(),
QuantumTreatmentOptimizerTool(),
# GeminiTool(), # Add if you want the agent to be able to call Gemini as a sub-LLM.
# Be mindful of costs and latency.
]
# Agent Prompt
# You can pull a prompt from Langchain Hub or define your own
# e.g., prompt = hub.pull("hwchase17/openai-functions-agent")
prompt = ChatPromptTemplate.from_messages([
("system", (
"You are a helpful AI assistant for healthcare professionals, named 'Quantum Health Navigator'. "
"Your goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. "
"When using tools, be precise with your inputs. "
"Always cite the tool you used if its output is part of your response. "
"If asked about treatment for a specific patient, you MUST use the 'quantum_treatment_optimizer' tool. "
"Do not provide medical advice directly without tool usage for specific patient cases. "
"For general medical knowledge, you can answer directly or use UMLS/BioPortal for definitions and codes."
)),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
# Create Agent
# This agent is optimized for OpenAI function calling.
agent = create_openai_functions_agent(llm, tools, prompt)
# Create Agent Executor
agent_executor = AgentExecutor(
agent=agent,
tools=tools,
verbose=True, # Set to False in production if too noisy
handle_parsing_errors=True, # Gracefully handle errors if LLM output is not parsable
# max_iterations=5, # Prevent runaway agents
)
def get_agent_executor():
"""Returns the configured agent executor."""
if not settings.OPENAI_API_KEY:
app_logger.error("OPENAI_API_KEY not set. Agent will not function.")
raise ValueError("OpenAI API Key not configured. Agent cannot be initialized.")
return agent_executor
# Example usage (for testing, not part of Streamlit app directly here)
if __name__ == "__main__":
if not settings.OPENAI_API_KEY:
print("Please set your OPENAI_API_KEY in .env file.")
else:
executor = get_agent_executor()
chat_history = [] # In a real app, this comes from DB or session state
while True:
user_input = input("You: ")
if user_input.lower() in ["exit", "quit"]:
break
# Convert simple list history to LangChain Message objects for the agent
langchain_chat_history = []
for role, content in chat_history:
if role == "user":
langchain_chat_history.append(HumanMessage(content=content))
elif role == "assistant":
langchain_chat_history.append(AIMessage(content=content))
response = executor.invoke({
"input": user_input,
"chat_history": langchain_chat_history
})
print(f"Agent: {response['output']}")
chat_history.append(("user", user_input))
chat_history.append(("assistant", response['output']))