Spaces:
Sleeping
Sleeping
Update document_chat.py
Browse files- document_chat.py +3 -4
document_chat.py
CHANGED
@@ -8,7 +8,7 @@ from langchain.memory import ConversationBufferMemory
|
|
8 |
from langchain.llms import HuggingFaceHub
|
9 |
from langchain.prompts import PromptTemplate
|
10 |
from langchain.chains import LLMChain
|
11 |
-
from langchain.chains.combine_documents import
|
12 |
|
13 |
# Constants
|
14 |
CHROMA_DB_PATH = "chroma_db"
|
@@ -50,8 +50,8 @@ def process_query_with_memory(query, chat_history=[]):
|
|
50 |
question_generator_template = "Generate a question based on the user's request: {query}"
|
51 |
question_generator = LLMChain(llm=llm, prompt=PromptTemplate(template=question_generator_template, input_variables=["query"]))
|
52 |
|
53 |
-
#
|
54 |
-
combine_docs_chain =
|
55 |
|
56 |
# Create a ConversationalRetrievalChain with the loaded model and retriever
|
57 |
qa_chain = ConversationalRetrievalChain(
|
@@ -65,4 +65,3 @@ def process_query_with_memory(query, chat_history=[]):
|
|
65 |
# Run the query with the current chat history and return the response
|
66 |
response = qa_chain.run({"question": query, "chat_history": chat_history})
|
67 |
return response
|
68 |
-
|
|
|
8 |
from langchain.llms import HuggingFaceHub
|
9 |
from langchain.prompts import PromptTemplate
|
10 |
from langchain.chains import LLMChain
|
11 |
+
from langchain.chains.combine_documents import StuffDocumentsChain # Corrected import
|
12 |
|
13 |
# Constants
|
14 |
CHROMA_DB_PATH = "chroma_db"
|
|
|
50 |
question_generator_template = "Generate a question based on the user's request: {query}"
|
51 |
question_generator = LLMChain(llm=llm, prompt=PromptTemplate(template=question_generator_template, input_variables=["query"]))
|
52 |
|
53 |
+
# Use StuffDocumentsChain to combine the retrieved documents
|
54 |
+
combine_docs_chain = StuffDocumentsChain(llm=llm) # Corrected use of StuffDocumentsChain
|
55 |
|
56 |
# Create a ConversationalRetrievalChain with the loaded model and retriever
|
57 |
qa_chain = ConversationalRetrievalChain(
|
|
|
65 |
# Run the query with the current chat history and return the response
|
66 |
response = qa_chain.run({"question": query, "chat_history": chat_history})
|
67 |
return response
|
|