ask-candid / tools /question_reformulation.py
brainsqueeze's picture
Re-add /tools
50bbd7c verified
raw
history blame
1.69 kB
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
def reformulate_question_using_history(state, llm):
"""
Transform the query to produce a better query with details from previous messages.
Args:
state (messages): The current state
llm: LLM to use
Returns:
dict: The updated state with re-phrased question and original user_input for UI
"""
print("---REFORMULATE THE USER INPUT---")
messages = state["messages"]
question = messages[-1].content
if len(messages) > 1:
contextualize_q_system_prompt = """Given a chat history and the latest user input \
which might reference context in the chat history, formulate a standalone input \
which can be understood without the chat history.
Chat history:
\n ------- \n
{chat_history}
\n ------- \n
User input:
\n ------- \n
{question}
\n ------- \n
Do NOT answer the question, \
just reformulate it if needed and otherwise return it as is.
"""
contextualize_q_prompt = ChatPromptTemplate([
("system", contextualize_q_system_prompt),
("human", question),
])
rag_chain = contextualize_q_prompt | llm | StrOutputParser()
new_question = rag_chain.invoke({"chat_history": messages, "question": question})
print(f"user asked: '{question}', agent reformulated the question basing on the chat history: {new_question}")
return {"messages": [new_question], "user_input" : question}
return {"messages": [question], "user_input" : question}