Anirudh1993 commited on
Commit
16e58b7
·
verified ·
1 Parent(s): 209b0a4

Update document_chat.py

Browse files
Files changed (1) hide show
  1. document_chat.py +13 -1
document_chat.py CHANGED
@@ -6,6 +6,9 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
  from langchain.chains import ConversationalRetrievalChain
7
  from langchain.memory import ConversationBufferMemory
8
  from langchain.llms import HuggingFaceHub
 
 
 
9
 
10
  # Constants
11
  CHROMA_DB_PATH = "chroma_db"
@@ -43,11 +46,20 @@ def process_query_with_memory(query, chat_history=[]):
43
  # Load the LLM model from Hugging Face
44
  llm = HuggingFaceHub(repo_id=LLM_Model, model_kwargs={"max_new_tokens": 500})
45
 
 
 
 
 
 
 
 
46
  # Create a ConversationalRetrievalChain with the loaded model and retriever
47
  qa_chain = ConversationalRetrievalChain(
48
  llm=llm,
49
  retriever=retriever,
50
- memory=memory
 
 
51
  )
52
 
53
  # Run the query with the current chat history and return the response
 
6
  from langchain.chains import ConversationalRetrievalChain
7
  from langchain.memory import ConversationBufferMemory
8
  from langchain.llms import HuggingFaceHub
9
+ from langchain.prompts import PromptTemplate
10
+ from langchain.chains import LLMChain
11
+ from langchain.chains.combine_documents import DocumentCompressor
12
 
13
  # Constants
14
  CHROMA_DB_PATH = "chroma_db"
 
46
  # Load the LLM model from Hugging Face
47
  llm = HuggingFaceHub(repo_id=LLM_Model, model_kwargs={"max_new_tokens": 500})
48
 
49
+ # Create a PromptTemplate for the question generator
50
+ question_generator_template = "Generate a question based on the user's request: {query}"
51
+ question_generator = LLMChain(llm=llm, prompt=PromptTemplate(template=question_generator_template, input_variables=["query"]))
52
+
53
+ # Document combiner (example: just concatenate documents)
54
+ combine_docs_chain = DocumentCompressor(combine_fn=lambda docs: " ".join([doc.page_content for doc in docs]))
55
+
56
  # Create a ConversationalRetrievalChain with the loaded model and retriever
57
  qa_chain = ConversationalRetrievalChain(
58
  llm=llm,
59
  retriever=retriever,
60
+ memory=memory,
61
+ question_generator=question_generator,
62
+ combine_docs_chain=combine_docs_chain
63
  )
64
 
65
  # Run the query with the current chat history and return the response