Anirudh1993 commited on
Commit
6e4cda5
·
verified ·
1 Parent(s): c5844af

Update document_chat.py

Browse files
Files changed (1) hide show
  1. document_chat.py +5 -5
document_chat.py CHANGED
@@ -28,7 +28,7 @@ def ingest_pdf(pdf_path):
28
  documents = loader.load()
29
 
30
  # Split text into smaller chunks
31
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
32
  split_docs = text_splitter.split_documents(documents)
33
 
34
  # Re-initialize vector store to ensure persistence
@@ -37,7 +37,7 @@ def ingest_pdf(pdf_path):
37
 
38
  def process_query_with_memory(query, chat_memory):
39
  """Processes user queries while maintaining conversational memory."""
40
- retriever = vector_store.as_retriever()
41
 
42
  # Initialize LLM
43
  llm = HuggingFaceHub(repo_id=LLM_MODEL, model_kwargs={"max_new_tokens": 500})
@@ -49,7 +49,7 @@ def process_query_with_memory(query, chat_memory):
49
  memory=chat_memory
50
  )
51
 
52
- # Fix: Properly load chat history
53
- chat_history = chat_memory.load_memory_variables({}).get("chat_history", [])
54
 
55
- return conversation_chain.run({"question": query, "chat_history": chat_history})
 
28
  documents = loader.load()
29
 
30
  # Split text into smaller chunks
31
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=50)
32
  split_docs = text_splitter.split_documents(documents)
33
 
34
  # Re-initialize vector store to ensure persistence
 
37
 
38
  def process_query_with_memory(query, chat_memory):
39
  """Processes user queries while maintaining conversational memory."""
40
+ retriever = vector_store.as_retriever(search_kwargs={"k": 3}) # Limit retrieved chunks
41
 
42
  # Initialize LLM
43
  llm = HuggingFaceHub(repo_id=LLM_MODEL, model_kwargs={"max_new_tokens": 500})
 
49
  memory=chat_memory
50
  )
51
 
52
+ # Fix: Properly filter chat history to avoid repetition
53
+ chat_history = list(set(chat_memory.load_memory_variables({}).get("chat_history", [])))
54
 
55
+ return conversation_chain.run({"question": query, "chat_history": chat_history})