jarif commited on
Commit
3220381
·
verified ·
1 Parent(s): be6c976

Update ingest.py

Browse files
Files changed (1) hide show
  1. ingest.py +47 -87
ingest.py CHANGED
@@ -1,6 +1,6 @@
1
  import os
2
  import logging
3
- from langchain_community.document_loaders import PyPDFLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langchain_community.embeddings import HuggingFaceEmbeddings
6
  from langchain_community.vectorstores import FAISS
@@ -9,66 +9,6 @@ from langchain_community.vectorstores import FAISS
9
  logging.basicConfig(level=logging.INFO)
10
  logger = logging.getLogger(__name__)
11
 
12
- def load_documents(docs_dir):
13
- """
14
- Load documents from a directory.
15
- :param docs_dir: Directory containing PDF documents.
16
- :return: List of loaded documents.
17
- """
18
- documents = []
19
- for root, dirs, files in os.walk(docs_dir):
20
- for file in files:
21
- if file.endswith(".pdf"):
22
- file_path = os.path.join(root, file)
23
- logger.info(f"Loading document: {file_path}")
24
- try:
25
- loader = PyPDFLoader(file_path)
26
- loaded_docs = loader.load()
27
- if loaded_docs:
28
- documents.extend(loaded_docs)
29
- logger.info(f"Loaded {len(loaded_docs)} pages from {file_path}.")
30
- else:
31
- logger.warning(f"No content extracted from {file_path}. Possibly encrypted or empty.")
32
- except Exception as e:
33
- logger.error(f"Error loading {file_path}: {e}")
34
- return documents
35
-
36
- def split_text(documents):
37
- """
38
- Split documents into text chunks.
39
- :param documents: List of documents to be split.
40
- :return: List of text chunks.
41
- """
42
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
43
- texts = text_splitter.split_documents(documents)
44
-
45
- if not texts:
46
- logger.error("No text chunks were created. Check the text splitting process.")
47
- return None
48
-
49
- logger.info(f"Created {len(texts)} text chunks.")
50
- for i, text in enumerate(texts[:5]): # Sample first 5 chunks
51
- logger.debug(f"Sample chunk {i}: {text[:100]}...") # Print first 100 characters
52
-
53
- return texts
54
-
55
- def create_embeddings():
56
- """
57
- Create embeddings using a HuggingFace model.
58
- :return: HuggingFaceEmbeddings object.
59
- """
60
- model_name = "sentence-transformers/all-MiniLM-L6-v2"
61
- embeddings = HuggingFaceEmbeddings(model_name=model_name)
62
-
63
- try:
64
- sample_embedding = embeddings.embed_query("sample text")
65
- logger.debug(f"Sample embedding: {sample_embedding[:5]}... (truncated for brevity)")
66
- except Exception as e:
67
- logger.error(f"Error generating sample embedding: {e}")
68
- return None
69
-
70
- return embeddings
71
-
72
  def create_faiss_index(texts, embeddings):
73
  """
74
  Create a FAISS index from text chunks and embeddings.
@@ -79,7 +19,7 @@ def create_faiss_index(texts, embeddings):
79
  try:
80
  db = FAISS.from_documents(texts, embeddings)
81
  logger.info(f"Created FAISS index with {len(texts)} vectors")
82
- # Directly check the FAISS index size
83
  if len(db.index) > 0:
84
  logger.info(f"FAISS index contains {len(db.index)} vectors.")
85
  else:
@@ -98,42 +38,62 @@ def save_faiss_index(db, index_path):
98
  """
99
  try:
100
  db.save_local(index_path)
 
 
 
101
  logger.info(f"FAISS index saved to {index_path}")
 
 
 
102
  except Exception as e:
103
  logger.error(f"Failed to save FAISS index to {index_path}: {e}")
104
 
105
- def main():
106
- docs_dir = "docs" # Adjust to your document directory
107
- index_path = "faiss_index"
108
 
109
- logger.info("Starting document processing...")
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
- # Load documents
112
- documents = load_documents(docs_dir)
113
  if not documents:
114
- logger.error("No documents were loaded. Exiting.")
115
  return
116
 
117
- # Split text into chunks
118
- texts = split_text(documents)
119
- if texts is None:
120
- logger.error("Text splitting failed. Exiting.")
121
- return
122
 
123
- # Create embeddings
124
- embeddings = create_embeddings()
125
- if embeddings is None:
126
- logger.error("Embeddings creation failed. Exiting.")
127
- return
128
 
129
- # Create FAISS index
130
- db = create_faiss_index(texts, embeddings)
131
- if db is None:
132
- logger.error("FAISS index creation failed. Exiting.")
133
  return
134
 
135
- # Save FAISS index
136
- save_faiss_index(db, index_path)
 
 
 
 
 
137
 
138
- if __name__ == '__main__':
139
- main()
 
 
 
 
 
 
 
 
1
  import os
2
  import logging
3
+ from langchain_community.document_loaders import PDFMinerLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langchain_community.embeddings import HuggingFaceEmbeddings
6
  from langchain_community.vectorstores import FAISS
 
9
  logging.basicConfig(level=logging.INFO)
10
  logger = logging.getLogger(__name__)
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  def create_faiss_index(texts, embeddings):
13
  """
14
  Create a FAISS index from text chunks and embeddings.
 
19
  try:
20
  db = FAISS.from_documents(texts, embeddings)
21
  logger.info(f"Created FAISS index with {len(texts)} vectors")
22
+ # Check the FAISS index size
23
  if len(db.index) > 0:
24
  logger.info(f"FAISS index contains {len(db.index)} vectors.")
25
  else:
 
38
  """
39
  try:
40
  db.save_local(index_path)
41
+ # Check the file size
42
+ index_file_path = os.path.join(index_path, "index.faiss")
43
+ file_size = os.path.getsize(index_file_path)
44
  logger.info(f"FAISS index saved to {index_path}")
45
+ logger.info(f"Index file size: {file_size} bytes")
46
+ if file_size == 0:
47
+ logger.error(f"Index file '{index_file_path}' is empty.")
48
  except Exception as e:
49
  logger.error(f"Failed to save FAISS index to {index_path}: {e}")
50
 
51
+ def create_faiss_index_from_pdfs():
52
+ documents = []
53
+ docs_dir = "docs"
54
 
55
+ if not os.path.exists(docs_dir):
56
+ logger.error(f"The directory '{docs_dir}' does not exist.")
57
+ return
58
+
59
+ for root, dirs, files in os.walk(docs_dir):
60
+ for file in files:
61
+ if file.endswith(".pdf"):
62
+ file_path = os.path.join(root, file)
63
+ logger.info(f"Loading document: {file_path}")
64
+ try:
65
+ loader = PDFMinerLoader(file_path)
66
+ documents.extend(loader.load())
67
+ except Exception as e:
68
+ logger.error(f"Error loading {file_path}: {e}")
69
 
 
 
70
  if not documents:
71
+ logger.error("No documents were loaded. Check the 'docs' directory and file paths.")
72
  return
73
 
74
+ logger.info(f"Loaded {len(documents)} documents.")
 
 
 
 
75
 
76
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
77
+ texts = text_splitter.split_documents(documents)
 
 
 
78
 
79
+ if not texts:
80
+ logger.error("No text chunks were created. Check the text splitting process.")
 
 
81
  return
82
 
83
+ logger.info(f"Created {len(texts)} text chunks.")
84
+
85
+ try:
86
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
87
+ except Exception as e:
88
+ logger.error(f"Failed to initialize embeddings: {e}")
89
+ return
90
 
91
+ db = create_faiss_index(texts, embeddings)
92
+ if db:
93
+ index_dir = "faiss_index"
94
+ if not os.path.exists(index_dir):
95
+ os.makedirs(index_dir)
96
+ save_faiss_index(db, index_dir)
97
+
98
+ if __name__ == "__main__":
99
+ create_faiss_index_from_pdfs()