jarif commited on
Commit
b2b16b5
·
verified ·
1 Parent(s): 4616152

Update ingest.py

Browse files
Files changed (1) hide show
  1. ingest.py +70 -64
ingest.py CHANGED
@@ -1,64 +1,70 @@
1
- import os
2
- import logging
3
- from langchain.document_loaders import PDFMinerLoader
4
- from langchain.text_splitter import RecursiveCharacterTextSplitter
5
- from langchain.embeddings import HuggingFaceEmbeddings
6
- from langchain.vectorstores import Chroma
7
-
8
- # Configure logging
9
- logging.basicConfig(level=logging.INFO)
10
- logger = logging.getLogger(__name__)
11
-
12
- def create_vector_store():
13
- documents = []
14
- docs_dir = "docs"
15
- if not os.path.exists(docs_dir):
16
- logger.error(f"The directory '{docs_dir}' does not exist.")
17
- return
18
-
19
- for root, dirs, files in os.walk(docs_dir):
20
- for file in files:
21
- if file.endswith(".pdf"):
22
- file_path = os.path.join(root, file)
23
- logger.info(f"Loading document: {file_path}")
24
- try:
25
- loader = PDFMinerLoader(file_path)
26
- loaded_docs = loader.load()
27
- if loaded_docs:
28
- logger.info(f"Loaded {len(loaded_docs)} documents from {file_path}")
29
- documents.extend(loaded_docs)
30
- else:
31
- logger.warning(f"No documents loaded from {file_path}")
32
- except Exception as e:
33
- logger.error(f"Error loading {file_path}: {e}")
34
-
35
- if not documents:
36
- logger.error("No documents were loaded. Check the 'docs' directory and file paths.")
37
- return
38
-
39
- logger.info(f"Loaded {len(documents)} documents.")
40
-
41
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
42
- texts = text_splitter.split_documents(documents)
43
- logger.info(f"Created {len(texts)} text chunks.")
44
-
45
- if not texts:
46
- logger.error("No text chunks created. Check the text splitting process.")
47
- return
48
-
49
- try:
50
- embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
51
- logger.info("Embeddings initialized successfully.")
52
- except Exception as e:
53
- logger.error(f"Failed to initialize embeddings: {e}")
54
- return
55
-
56
- try:
57
- vector_store = Chroma.from_documents(texts, embeddings, persist_directory="./chroma_db")
58
- vector_store.persist()
59
- logger.info(f"Created Chroma vector store with {len(texts)} vectors.")
60
- except Exception as e:
61
- logger.error(f"Failed to create Chroma vector store: {e}")
62
-
63
- if __name__ == "__main__":
64
- create_vector_store()
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ from langchain.document_loaders import PDFMinerLoader
4
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
5
+ from langchain.embeddings import HuggingFaceEmbeddings
6
+ from langchain.vectorstores import Chroma
7
+
8
+ # Configure logging
9
+ logging.basicConfig(level=logging.INFO)
10
+ logger = logging.getLogger(__name__)
11
+
12
+ def create_vector_store():
13
+ # Ensure the directory exists
14
+ persist_directory = "./chroma_db"
15
+ if not os.path.exists(persist_directory):
16
+ os.makedirs(persist_directory)
17
+ logger.info(f"Created directory '{persist_directory}'")
18
+
19
+ documents = []
20
+ docs_dir = "docs"
21
+ if not os.path.exists(docs_dir):
22
+ logger.error(f"The directory '{docs_dir}' does not exist.")
23
+ return
24
+
25
+ for root, dirs, files in os.walk(docs_dir):
26
+ for file in files:
27
+ if file.endswith(".pdf"):
28
+ file_path = os.path.join(root, file)
29
+ logger.info(f"Loading document: {file_path}")
30
+ try:
31
+ loader = PDFMinerLoader(file_path)
32
+ loaded_docs = loader.load()
33
+ if loaded_docs:
34
+ logger.info(f"Loaded {len(loaded_docs)} documents from {file_path}")
35
+ documents.extend(loaded_docs)
36
+ else:
37
+ logger.warning(f"No documents loaded from {file_path}")
38
+ except Exception as e:
39
+ logger.error(f"Error loading {file_path}: {e}")
40
+
41
+ if not documents:
42
+ logger.error("No documents were loaded. Check the 'docs' directory and file paths.")
43
+ return
44
+
45
+ logger.info(f"Loaded {len(documents)} documents.")
46
+
47
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
48
+ texts = text_splitter.split_documents(documents)
49
+ logger.info(f"Created {len(texts)} text chunks.")
50
+
51
+ if not texts:
52
+ logger.error("No text chunks created. Check the text splitting process.")
53
+ return
54
+
55
+ try:
56
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
57
+ logger.info("Embeddings initialized successfully.")
58
+ except Exception as e:
59
+ logger.error(f"Failed to initialize embeddings: {e}")
60
+ return
61
+
62
+ try:
63
+ vector_store = Chroma.from_documents(texts, embeddings, persist_directory=persist_directory)
64
+ vector_store.persist()
65
+ logger.info(f"Created Chroma vector store with {len(texts)} vectors.")
66
+ except Exception as e:
67
+ logger.error(f"Failed to create Chroma vector store: {e}")
68
+
69
+ if __name__ == "__main__":
70
+ create_vector_store()