File size: 2,644 Bytes
575af7a 6cdbf89 575af7a 6cdbf89 575af7a 6cdbf89 575af7a 6cdbf89 0624502 6cdbf89 575af7a 6cdbf89 575af7a 6cdbf89 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
import os
import logging
import faiss
import numpy as np
from langchain_community.document_loaders import PDFMinerLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def create_faiss_index():
documents = []
docs_dir = "docs"
if not os.path.exists(docs_dir):
logger.error(f"The directory '{docs_dir}' does not exist.")
return
for root, dirs, files in os.walk(docs_dir):
for file in files:
if file.endswith(".pdf"):
file_path = os.path.join(root, file)
logger.info(f"Loading document: {file_path}")
try:
loader = PDFMinerLoader(file_path)
loaded_docs = loader.load()
if loaded_docs:
logger.info(f"Loaded {len(loaded_docs)} documents from {file_path}")
documents.extend(loaded_docs)
else:
logger.warning(f"No documents loaded from {file_path}")
except Exception as e:
logger.error(f"Error loading {file_path}: {e}")
if not documents:
logger.error("No documents were loaded. Check the 'docs' directory and file paths.")
return
logger.info(f"Loaded {len(documents)} documents.")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
texts = text_splitter.split_documents(documents)
logger.info(f"Created {len(texts)} text chunks.")
if not texts:
logger.error("No text chunks created. Check the text splitting process.")
return
try:
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
logger.info("Embeddings initialized successfully.")
except Exception as e:
logger.error(f"Failed to initialize embeddings: {e}")
return
embedding_vectors = np.array([embeddings.embed(text) for text in texts])
dimension = embedding_vectors.shape[1]
try:
faiss_index = faiss.IndexFlatL2(dimension)
faiss_index.add(embedding_vectors)
os.makedirs("faiss_index", exist_ok=True) # Create directory if it doesn't exist
faiss.write_index(faiss_index, "faiss_index/index.faiss")
logger.info(f"Created FAISS index with {len(texts)} vectors.")
except Exception as e:
logger.error(f"Failed to create FAISS index: {e}")
return
if __name__ == "__main__":
create_faiss_index()
|