Medical_agent / app.py
Goodnight7's picture
Create app.py
3be4504 verified
raw
history blame
5.23 kB
import os
from langchain_core.prompts import ChatPromptTemplate
from langsmith import Client, traceable
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langsmith import Client, traceable
from langchain_core.output_parsers import StrOutputParser
from langchain_nomic.embeddings import NomicEmbeddings
from langchain_groq import ChatGroq
load_dotenv()
GROQ_API_KEY = os.getenv('GROQ_API_KEY')
HF_API_KEY = os.getenv("HF_API_KEY")
COHERE_API_KEY = os.getenv("COHERE_API_KEY")
LANGSMITH_TRACING="true"
LANGSMITH_ENDPOINT="https://api.smith.langchain.com"
LANGSMITH_API_KEY=os.getenv("LANGSMITH_API_KEY")
LANGSMITH_PROJECT="pr-smug-rancher-51"
model_name="llama-3.1-70b-versatile"
llm = ChatGroq(
temperature=0,
model= "llama-3.3-70b-versatile", #"llama-3.1-70b-versatile", #"llama3-70b-8192",
api_key=GROQ_API_KEY,
)
@traceable
def get_answer(question):
prompt = ChatPromptTemplate.from_messages([
("system", "You are a medical expert called Dr.Med! Here are some info about cancer: {facts}"),
("user", "{question}")
])
chain = prompt | llm
parser = StrOutputParser()
chain = prompt | llm | parser
answer= chain.invoke({"question": question, "facts": fake_db_retrieval()})
return answer
embedding_model = NomicEmbeddings(model="nomic-embed-text-v1.5", inference_mode="local")
db = "db1"
from langchain.vectorstores import Chroma
vector_store = Chroma(
collection_name="chromadb3",
persist_directory=db,
embedding_function=embedding_model,
)
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain.chains import RetrievalQA
conversational_memory = ConversationBufferWindowMemory(
memory_key='chat_history',
k=5, #Number of messages stored in memory
return_messages=True #Must return the messages in the response.
)
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=vector_store.as_retriever(k=5)
)
from langchain.agents import Tool
#Defining the list of tool objects to be used by LangChain.
tools = [
Tool(
name='Medical_KB',
func=qa.run,
description=(
'use this tool when answering medical knowledge queries to get '
'more information about the topic'
)
)
]
from langchain.agents import create_react_agent
from langchain import hub
prompt = hub.pull("hwchase17/react-chat")
agent = create_react_agent(
tools=tools,
llm=llm,
prompt=prompt,
)
# Create an agent executor by passing in the agent and tools
from langchain.agents import AgentExecutor
agent_executor = AgentExecutor(agent=agent,
tools=tools,
verbose=True,
memory=conversational_memory,
max_iterations=30,
max_execution_time=600,
#early_stopping_method='generate',
handle_parsing_errors=True
)
# Function for continuing the conversation
import streamlit as st
# Function for continuing the conversation
def continue_conversation(input, history):
# Invoke the agent and get the response
response = agent_executor.invoke({"input": input})
output = response['output']
# Prepend the new input and output to the history (latest conversation comes first)
history.insert(0, {"role": "Patient", "message": input})
history.insert(0, {"role": "Doctor", "message": output})
# Return the current response and the full history (hidden state)
return output, history
# Streamlit UI
def main():
st.set_page_config(page_title="Medical Chatbot", page_icon="πŸ‘¨β€βš•οΈ")
st.title("Medical Chatbot")
# Initialize the conversation history
if 'history' not in st.session_state:
st.session_state.history = []
# Sidebar for memory display
with st.sidebar:
st.header("Conversation History")
st.write("This section contains the conversation history.")
# Create a container for the chat
chat_container = st.container()
# Display the chat history with the latest conversation at the top
for chat in st.session_state.history:
if chat['role'] == 'Patient':
chat_container.markdown(f"**Patient:** {chat['message']}")
else:
chat_container.markdown(f"**Doctor:** {chat['message']}")
# User input text box at the bottom
user_input = st.text_input("Ask a question:", key="input", placeholder="Describe your symptoms or medical questions ?")
if user_input:
# Get the response and update the conversation history
output, updated_history = continue_conversation(user_input, st.session_state.history)
# Update the session state with the new history
st.session_state.history = updated_history
# Display memory of past conversation in an expandable section
with st.expander("Memory", expanded=True):
for chat in st.session_state.history:
st.write(f"**{chat['role']}:** {chat['message']}")
if __name__ == "__main__":
main()