Spaces:
Sleeping
Sleeping
Update document_chat.py
Browse files- document_chat.py +18 -7
document_chat.py
CHANGED
@@ -6,7 +6,10 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
6 |
from langchain.chains import ConversationalRetrievalChain
|
7 |
from langchain.memory import ConversationBufferMemory
|
8 |
from langchain.llms import HuggingFaceHub
|
9 |
-
|
|
|
|
|
|
|
10 |
# Constants
|
11 |
CHROMA_DB_PATH = "chroma_db"
|
12 |
SENTENCE_TRANSFORMER_MODEL = "sentence-transformers/all-MiniLM-L6-v2"
|
@@ -32,14 +35,22 @@ def ingest_pdf(pdf_path):
|
|
32 |
vector_store.add_documents(split_docs)
|
33 |
vector_store.persist()
|
34 |
|
35 |
-
def process_query_with_memory(query,
|
36 |
"""Processes user queries while maintaining conversational memory."""
|
37 |
retriever = vector_store.as_retriever()
|
38 |
|
39 |
-
#
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
retriever=retriever,
|
43 |
-
memory=
|
|
|
44 |
)
|
45 |
-
|
|
|
|
6 |
from langchain.chains import ConversationalRetrievalChain
|
7 |
from langchain.memory import ConversationBufferMemory
|
8 |
from langchain.llms import HuggingFaceHub
|
9 |
+
om langchain.chains import ConversationalRetrievalChain
|
10 |
+
from langchain.chains.question_answering import load_qa_chain
|
11 |
+
from langchain.llms import HuggingFaceHub
|
12 |
+
from langchain.memory import ConversationBufferMemory
|
13 |
# Constants
|
14 |
CHROMA_DB_PATH = "chroma_db"
|
15 |
SENTENCE_TRANSFORMER_MODEL = "sentence-transformers/all-MiniLM-L6-v2"
|
|
|
35 |
vector_store.add_documents(split_docs)
|
36 |
vector_store.persist()
|
37 |
|
38 |
+
def process_query_with_memory(query, chat_memory):
|
39 |
"""Processes user queries while maintaining conversational memory."""
|
40 |
retriever = vector_store.as_retriever()
|
41 |
|
42 |
+
# Initialize LLM
|
43 |
+
llm = HuggingFaceHub(repo_id=LLM_MODEL, model_kwargs={"max_new_tokens": 500})
|
44 |
+
|
45 |
+
# Load QA Chain
|
46 |
+
qa_chain = load_qa_chain(llm, chain_type="stuff")
|
47 |
+
|
48 |
+
# Create Conversational Retrieval Chain correctly
|
49 |
+
conversation_chain = ConversationalRetrievalChain.from_llm(
|
50 |
+
llm=llm,
|
51 |
retriever=retriever,
|
52 |
+
memory=chat_memory,
|
53 |
+
combine_docs_chain=qa_chain
|
54 |
)
|
55 |
+
|
56 |
+
return conversation_chain.run({"question": query, "chat_history": chat_memory.memory if chat_memory else []})
|