jarif commited on
Commit
0bf85d0
·
verified ·
1 Parent(s): db19c60

Update ingest.py

Browse files
Files changed (1) hide show
  1. ingest.py +7 -14
ingest.py CHANGED
@@ -1,9 +1,9 @@
1
  import os
2
  import logging
3
  import faiss
4
- from langchain_community.document_loaders import PDFMinerLoader
5
  from langchain.text_splitter import RecursiveCharacterTextSplitter
6
- from langchain_community.embeddings import HuggingFaceEmbeddings
7
  from langchain.vectorstores import FAISS
8
 
9
  # Configure logging
@@ -11,15 +11,16 @@ logging.basicConfig(level=logging.INFO)
11
  logger = logging.getLogger(__name__)
12
 
13
  def create_faiss_index():
 
 
 
14
  documents = []
15
- docs_dir = "docs" # Directory where PDF files are stored
16
 
17
- # Check if the 'docs' directory exists
18
  if not os.path.exists(docs_dir):
19
  logger.error(f"The directory '{docs_dir}' does not exist.")
20
  return
21
 
22
- # Walk through the 'docs' directory and load PDF files
23
  for root, dirs, files in os.walk(docs_dir):
24
  for file in files:
25
  if file.endswith(".pdf"):
@@ -36,25 +37,20 @@ def create_faiss_index():
36
  except Exception as e:
37
  logger.error(f"Error loading {file_path}: {e}")
38
 
39
- # Check if any documents were loaded
40
  if not documents:
41
  logger.error("No documents were loaded. Check the 'docs' directory and file paths.")
42
  return
43
 
44
  logger.info(f"Loaded {len(documents)} documents.")
45
 
46
- # Split documents into text chunks
47
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
48
  texts = text_splitter.split_documents(documents)
49
  logger.info(f"Created {len(texts)} text chunks.")
50
-
51
- # Check if text chunks were created
52
  if not texts:
53
  logger.error("No text chunks created. Check the text splitting process.")
54
  return
55
 
56
  try:
57
- # Initialize embeddings using HuggingFace models
58
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
59
  logger.info("Embeddings initialized successfully.")
60
  except Exception as e:
@@ -62,10 +58,7 @@ def create_faiss_index():
62
  return
63
 
64
  try:
65
- # Create a FAISS index and save it
66
- index = faiss.IndexFlatL2(embeddings.embedding_size)
67
- vector_store = FAISS.from_documents(texts, embeddings, index)
68
- vector_store.save_local("faiss_index")
69
  logger.info(f"Created FAISS index with {len(texts)} vectors.")
70
  except Exception as e:
71
  logger.error(f"Failed to create FAISS index: {e}")
 
1
  import os
2
  import logging
3
  import faiss
4
+ from langchain.document_loaders import PDFMinerLoader
5
  from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from langchain.embeddings import HuggingFaceEmbeddings
7
  from langchain.vectorstores import FAISS
8
 
9
  # Configure logging
 
11
  logger = logging.getLogger(__name__)
12
 
13
  def create_faiss_index():
14
+ """
15
+ Create a FAISS index from documents in the 'docs' directory.
16
+ """
17
  documents = []
18
+ docs_dir = "docs"
19
 
 
20
  if not os.path.exists(docs_dir):
21
  logger.error(f"The directory '{docs_dir}' does not exist.")
22
  return
23
 
 
24
  for root, dirs, files in os.walk(docs_dir):
25
  for file in files:
26
  if file.endswith(".pdf"):
 
37
  except Exception as e:
38
  logger.error(f"Error loading {file_path}: {e}")
39
 
 
40
  if not documents:
41
  logger.error("No documents were loaded. Check the 'docs' directory and file paths.")
42
  return
43
 
44
  logger.info(f"Loaded {len(documents)} documents.")
45
 
 
46
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
47
  texts = text_splitter.split_documents(documents)
48
  logger.info(f"Created {len(texts)} text chunks.")
 
 
49
  if not texts:
50
  logger.error("No text chunks created. Check the text splitting process.")
51
  return
52
 
53
  try:
 
54
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
55
  logger.info("Embeddings initialized successfully.")
56
  except Exception as e:
 
58
  return
59
 
60
  try:
61
+ faiss_index = FAISS.from_documents(texts, embeddings, index_path="faiss_index/index.faiss")
 
 
 
62
  logger.info(f"Created FAISS index with {len(texts)} vectors.")
63
  except Exception as e:
64
  logger.error(f"Failed to create FAISS index: {e}")