jarif commited on
Commit
64e8ee9
·
verified ·
1 Parent(s): 356cc10

Update ingest.py

Browse files
Files changed (1) hide show
  1. ingest.py +67 -57
ingest.py CHANGED
@@ -2,26 +2,15 @@ import os
2
  import logging
3
  from langchain.document_loaders import PyPDFLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
- from langchain_community.embeddings import HuggingFaceEmbeddings
6
- from langchain_community.vectorstores import FAISS
7
 
8
- # Set up logging
9
- logging.basicConfig(level=logging.DEBUG)
10
  logger = logging.getLogger(__name__)
11
 
12
- def create_faiss_index():
13
  documents = []
14
- docs_dir = "docs"
15
-
16
- if not os.path.exists(docs_dir):
17
- logger.error(f"The directory '{docs_dir}' does not exist.")
18
- return
19
-
20
- if not os.listdir(docs_dir):
21
- logger.error(f"The directory '{docs_dir}' is empty.")
22
- return
23
-
24
- # Load documents
25
  for root, dirs, files in os.walk(docs_dir):
26
  for file in files:
27
  if file.endswith(".pdf"):
@@ -34,71 +23,92 @@ def create_faiss_index():
34
  documents.extend(loaded_docs)
35
  logger.info(f"Loaded {len(loaded_docs)} pages from {file_path}.")
36
  else:
37
- logger.warning(f"No content extracted from {file_path}.")
38
  except Exception as e:
39
  logger.error(f"Error loading {file_path}: {e}")
 
40
 
41
- if not documents:
42
- logger.error("No documents were loaded. Check the 'docs' directory and file paths.")
43
- return
44
-
45
- logger.info(f"Total loaded documents: {len(documents)}")
46
-
47
- # Split text into chunks
48
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
49
  texts = text_splitter.split_documents(documents)
50
 
51
  if not texts:
52
  logger.error("No text chunks were created. Check the text splitting process.")
53
- return
54
 
55
  logger.info(f"Created {len(texts)} text chunks.")
56
-
57
- # Check a sample of text chunks
58
- for i, text in enumerate(texts[:5]):
59
- logger.debug(f"Sample chunk {i}: {text[:100]}...")
60
-
61
- # Create embeddings
62
- try:
63
- embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
64
- except Exception as e:
65
- logger.error(f"Failed to initialize embeddings: {e}")
66
- return
67
-
68
- # Verify embeddings are valid by generating one
69
  try:
70
  sample_embedding = embeddings.embed_query("sample text")
71
  logger.debug(f"Sample embedding: {sample_embedding[:5]}... (truncated for brevity)")
72
  except Exception as e:
73
  logger.error(f"Error generating sample embedding: {e}")
74
- return
75
 
76
- # Create FAISS index
 
 
77
  try:
78
  db = FAISS.from_documents(texts, embeddings)
79
  logger.info(f"Created FAISS index with {len(texts)} vectors")
 
 
 
 
 
80
  except Exception as e:
81
  logger.error(f"Failed to create FAISS index: {e}")
82
- return
83
 
84
- # Save FAISS index locally
85
- index_dir = "faiss_index"
86
- if not os.path.exists(index_dir):
87
- os.makedirs(index_dir)
88
 
 
89
  try:
90
- db.save_local(index_dir)
91
- index_path = os.path.join(index_dir, "index.faiss")
92
- if os.path.exists(index_path):
93
- logger.info(f"FAISS index file exists. Size: {os.path.getsize(index_path)} bytes.")
94
- if os.path.getsize(index_path) > 0:
95
- logger.info(f"FAISS index successfully saved to {index_path}")
96
- else:
97
- logger.error(f"FAISS index file '{index_path}' is empty.")
98
- else:
99
- logger.error(f"FAISS index file '{index_path}' does not exist after save attempt.")
100
  except Exception as e:
101
- logger.error(f"Failed to save FAISS index: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
  if __name__ == "__main__":
104
- create_faiss_index()
 
2
  import logging
3
  from langchain.document_loaders import PyPDFLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
+ from langchain.embeddings import HuggingFaceEmbeddings
6
+ from langchain.vectorstores import FAISS
7
 
8
+ # Setup logging
9
+ logging.basicConfig(level=logging.INFO)
10
  logger = logging.getLogger(__name__)
11
 
12
+ def load_documents(docs_dir):
13
  documents = []
 
 
 
 
 
 
 
 
 
 
 
14
  for root, dirs, files in os.walk(docs_dir):
15
  for file in files:
16
  if file.endswith(".pdf"):
 
23
  documents.extend(loaded_docs)
24
  logger.info(f"Loaded {len(loaded_docs)} pages from {file_path}.")
25
  else:
26
+ logger.warning(f"No content extracted from {file_path}. Possibly encrypted or empty.")
27
  except Exception as e:
28
  logger.error(f"Error loading {file_path}: {e}")
29
+ return documents
30
 
31
+ def split_text(documents):
 
 
 
 
 
 
32
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
33
  texts = text_splitter.split_documents(documents)
34
 
35
  if not texts:
36
  logger.error("No text chunks were created. Check the text splitting process.")
37
+ return None
38
 
39
  logger.info(f"Created {len(texts)} text chunks.")
40
+ for i, text in enumerate(texts[:5]): # Sample first 5 chunks
41
+ logger.debug(f"Sample chunk {i}: {text[:100]}...") # Print first 100 characters
42
+
43
+ return texts
44
+
45
+ def create_embeddings():
46
+ model_name = "sentence-transformers/all-MiniLM-L6-v2"
47
+ embeddings = HuggingFaceEmbeddings(model_name=model_name)
48
+
 
 
 
 
49
  try:
50
  sample_embedding = embeddings.embed_query("sample text")
51
  logger.debug(f"Sample embedding: {sample_embedding[:5]}... (truncated for brevity)")
52
  except Exception as e:
53
  logger.error(f"Error generating sample embedding: {e}")
54
+ return None
55
 
56
+ return embeddings
57
+
58
+ def create_faiss_index(texts, embeddings):
59
  try:
60
  db = FAISS.from_documents(texts, embeddings)
61
  logger.info(f"Created FAISS index with {len(texts)} vectors")
62
+ # Directly check the FAISS index size
63
+ if len(db.index) > 0:
64
+ logger.info(f"FAISS index contains {len(db.index)} vectors.")
65
+ else:
66
+ logger.error("FAISS index contains 0 vectors after creation. Check the data and embeddings.")
67
  except Exception as e:
68
  logger.error(f"Failed to create FAISS index: {e}")
69
+ return None
70
 
71
+ return db
 
 
 
72
 
73
+ def save_faiss_index(db, index_path):
74
  try:
75
+ db.save_local(index_path)
76
+ logger.info(f"FAISS index saved to {index_path}")
 
 
 
 
 
 
 
 
77
  except Exception as e:
78
+ logger.error(f"Failed to save FAISS index to {index_path}: {e}")
79
+
80
+ def main():
81
+ docs_dir = "docs" # Adjust to your document directory
82
+ index_path = "faiss_index"
83
+
84
+ logger.info("Starting document processing...")
85
+
86
+ # Load documents
87
+ documents = load_documents(docs_dir)
88
+ if not documents:
89
+ logger.error("No documents were loaded. Exiting.")
90
+ return
91
+
92
+ # Split text into chunks
93
+ texts = split_text(documents)
94
+ if texts is None:
95
+ logger.error("Text splitting failed. Exiting.")
96
+ return
97
+
98
+ # Create embeddings
99
+ embeddings = create_embeddings()
100
+ if embeddings is None:
101
+ logger.error("Embeddings creation failed. Exiting.")
102
+ return
103
+
104
+ # Create FAISS index
105
+ db = create_faiss_index(texts, embeddings)
106
+ if db is None:
107
+ logger.error("FAISS index creation failed. Exiting.")
108
+ return
109
+
110
+ # Save FAISS index
111
+ save_faiss_index(db, index_path)
112
 
113
  if __name__ == "__main__":
114
+ main()