Spaces:
Runtime error
Runtime error
Update document_processor_image_test.py
Browse files- document_processor_image_test.py +66 -66
document_processor_image_test.py
CHANGED
@@ -9,16 +9,16 @@ from langchain_core.documents import Document
|
|
9 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
10 |
from langchain_community.vectorstores import FAISS
|
11 |
|
12 |
-
# PyMuPDF
|
13 |
try:
|
14 |
import fitz # PyMuPDF
|
15 |
PYMUPDF_AVAILABLE = True
|
16 |
-
print("β
PyMuPDF
|
17 |
except ImportError:
|
18 |
PYMUPDF_AVAILABLE = False
|
19 |
-
print("β οΈ PyMuPDF
|
20 |
|
21 |
-
# PDF
|
22 |
import pytesseract
|
23 |
from PIL import Image
|
24 |
from pdf2image import convert_from_path
|
@@ -26,14 +26,14 @@ import pdfplumber
|
|
26 |
from pymupdf4llm import LlamaMarkdownReader
|
27 |
|
28 |
# --------------------------------
|
29 |
-
#
|
30 |
# --------------------------------
|
31 |
|
32 |
def log(msg):
|
33 |
print(f"[{time.strftime('%H:%M:%S')}] {msg}")
|
34 |
|
35 |
# --------------------------------
|
36 |
-
#
|
37 |
# --------------------------------
|
38 |
|
39 |
def clean_text(text):
|
@@ -41,7 +41,7 @@ def clean_text(text):
|
|
41 |
|
42 |
def apply_corrections(text):
|
43 |
corrections = {
|
44 |
-
'ΒΊΒ©': '
|
45 |
'Γ’β¬β’': "'", 'Γ’β¬Ε': '"', 'Γ’β¬': '"'
|
46 |
}
|
47 |
for k, v in corrections.items():
|
@@ -49,16 +49,16 @@ def apply_corrections(text):
|
|
49 |
return text
|
50 |
|
51 |
# --------------------------------
|
52 |
-
# HWPX
|
53 |
# --------------------------------
|
54 |
|
55 |
def load_hwpx(file_path):
|
56 |
-
"""HWPX
|
57 |
import zipfile
|
58 |
import xml.etree.ElementTree as ET
|
59 |
import chardet
|
60 |
|
61 |
-
log(f"π₯ HWPX
|
62 |
start = time.time()
|
63 |
documents = []
|
64 |
|
@@ -67,9 +67,9 @@ def load_hwpx(file_path):
|
|
67 |
file_list = zip_ref.namelist()
|
68 |
section_files = [f for f in file_list
|
69 |
if f.startswith('Contents/section') and f.endswith('.xml')]
|
70 |
-
section_files.sort() # section0.xml, section1.xml
|
71 |
|
72 |
-
log(f"π
|
73 |
|
74 |
for section_idx, section_file in enumerate(section_files):
|
75 |
with zip_ref.open(section_file) as xml_file:
|
@@ -83,14 +83,14 @@ def load_hwpx(file_path):
|
|
83 |
tree = ET.ElementTree(ET.fromstring(text))
|
84 |
root = tree.getroot()
|
85 |
|
86 |
-
#
|
87 |
t_elements = [elem for elem in root.iter() if elem.tag.endswith('}t') or elem.tag == 't']
|
88 |
body_text = ""
|
89 |
for elem in t_elements:
|
90 |
if elem.text:
|
91 |
body_text += clean_text(elem.text) + " "
|
92 |
|
93 |
-
# page
|
94 |
page_value = ""
|
95 |
|
96 |
if body_text.strip():
|
@@ -104,9 +104,9 @@ def load_hwpx(file_path):
|
|
104 |
"total_sections": len(section_files)
|
105 |
}
|
106 |
))
|
107 |
-
log(f"β
|
108 |
|
109 |
-
#
|
110 |
table_elements = [elem for elem in root.iter() if elem.tag.endswith('}table') or elem.tag == 'table']
|
111 |
if table_elements:
|
112 |
table_text = ""
|
@@ -136,12 +136,12 @@ def load_hwpx(file_path):
|
|
136 |
"total_sections": len(section_files)
|
137 |
}
|
138 |
))
|
139 |
-
log(f"π
|
140 |
|
141 |
-
#
|
142 |
if [elem for elem in root.iter() if elem.tag.endswith('}picture') or elem.tag == 'picture']:
|
143 |
documents.append(Document(
|
144 |
-
page_content="[
|
145 |
metadata={
|
146 |
"source": file_path,
|
147 |
"filename": os.path.basename(file_path),
|
@@ -150,22 +150,22 @@ def load_hwpx(file_path):
|
|
150 |
"total_sections": len(section_files)
|
151 |
}
|
152 |
))
|
153 |
-
log(f"πΌοΈ
|
154 |
|
155 |
except Exception as e:
|
156 |
-
log(f"β HWPX
|
157 |
|
158 |
duration = time.time() - start
|
159 |
|
160 |
-
#
|
161 |
if documents:
|
162 |
-
log(f"π
|
163 |
|
164 |
-
log(f"β
HWPX
|
165 |
return documents
|
166 |
|
167 |
# --------------------------------
|
168 |
-
# PDF
|
169 |
# --------------------------------
|
170 |
|
171 |
def run_ocr_on_image(image: Image.Image, lang='kor+eng'):
|
@@ -182,7 +182,7 @@ def extract_images_with_ocr(pdf_path, lang='kor+eng'):
|
|
182 |
page_ocr_data[page_num] = text.strip()
|
183 |
return page_ocr_data
|
184 |
except Exception as e:
|
185 |
-
print(f"β
|
186 |
return {}
|
187 |
|
188 |
def extract_tables_with_pdfplumber(pdf_path):
|
@@ -203,7 +203,7 @@ def extract_tables_with_pdfplumber(pdf_path):
|
|
203 |
page_table_data[page_num] = table_text.strip()
|
204 |
return page_table_data
|
205 |
except Exception as e:
|
206 |
-
print(f"β
|
207 |
return {}
|
208 |
|
209 |
def extract_body_text_with_pages(pdf_path):
|
@@ -239,57 +239,57 @@ def extract_body_text_with_pages(pdf_path):
|
|
239 |
start = end - 100
|
240 |
|
241 |
except Exception as e:
|
242 |
-
print(f"β
|
243 |
|
244 |
return page_body_data
|
245 |
|
246 |
def load_pdf_with_metadata(pdf_path):
|
247 |
-
"""
|
248 |
-
log(f"π PDF
|
249 |
start = time.time()
|
250 |
|
251 |
-
#
|
252 |
try:
|
253 |
from langchain_community.document_loaders import PyPDFLoader
|
254 |
loader = PyPDFLoader(pdf_path)
|
255 |
pdf_pages = loader.load()
|
256 |
actual_total_pages = len(pdf_pages)
|
257 |
-
log(f"π
|
258 |
except Exception as e:
|
259 |
-
log(f"β PyPDFLoader
|
260 |
actual_total_pages = 1
|
261 |
|
262 |
try:
|
263 |
page_tables = extract_tables_with_pdfplumber(pdf_path)
|
264 |
except Exception as e:
|
265 |
page_tables = {}
|
266 |
-
print(f"β
|
267 |
|
268 |
try:
|
269 |
page_ocr = extract_images_with_ocr(pdf_path)
|
270 |
except Exception as e:
|
271 |
page_ocr = {}
|
272 |
-
print(f"β
|
273 |
|
274 |
try:
|
275 |
page_body = extract_body_text_with_pages(pdf_path)
|
276 |
except Exception as e:
|
277 |
page_body = {}
|
278 |
-
print(f"β
|
279 |
|
280 |
duration = time.time() - start
|
281 |
-
log(f"β
PDF
|
282 |
|
283 |
-
#
|
284 |
all_pages = set(page_tables.keys()) | set(page_ocr.keys()) | set(page_body.keys())
|
285 |
if all_pages:
|
286 |
max_extracted_page = max(all_pages)
|
287 |
-
#
|
288 |
total_pages = max(actual_total_pages, max_extracted_page)
|
289 |
else:
|
290 |
total_pages = actual_total_pages
|
291 |
|
292 |
-
log(f"π
|
293 |
|
294 |
docs = []
|
295 |
|
@@ -305,7 +305,7 @@ def load_pdf_with_metadata(pdf_path):
|
|
305 |
"total_pages": total_pages
|
306 |
}
|
307 |
))
|
308 |
-
log(f"π
|
309 |
|
310 |
if page_num in page_body and page_body[page_num].strip():
|
311 |
docs.append(Document(
|
@@ -318,7 +318,7 @@ def load_pdf_with_metadata(pdf_path):
|
|
318 |
"total_pages": total_pages
|
319 |
}
|
320 |
))
|
321 |
-
log(f"π
|
322 |
|
323 |
if page_num in page_ocr and page_ocr[page_num].strip():
|
324 |
docs.append(Document(
|
@@ -331,11 +331,11 @@ def load_pdf_with_metadata(pdf_path):
|
|
331 |
"total_pages": total_pages
|
332 |
}
|
333 |
))
|
334 |
-
log(f"πΌοΈ
|
335 |
|
336 |
if not docs:
|
337 |
docs.append(Document(
|
338 |
-
page_content="[
|
339 |
metadata={
|
340 |
"source": pdf_path,
|
341 |
"filename": os.path.basename(pdf_path),
|
@@ -345,36 +345,36 @@ def load_pdf_with_metadata(pdf_path):
|
|
345 |
}
|
346 |
))
|
347 |
|
348 |
-
#
|
349 |
if docs:
|
350 |
page_numbers = [doc.metadata.get('page', 0) for doc in docs if doc.metadata.get('page')]
|
351 |
if page_numbers:
|
352 |
-
log(f"π
|
353 |
|
354 |
-
log(f"π
|
355 |
return docs
|
356 |
|
357 |
# --------------------------------
|
358 |
-
#
|
359 |
# --------------------------------
|
360 |
|
361 |
def load_documents(folder_path):
|
362 |
documents = []
|
363 |
|
364 |
for file in glob.glob(os.path.join(folder_path, "*.hwpx")):
|
365 |
-
log(f"π HWPX
|
366 |
docs = load_hwpx(file)
|
367 |
documents.extend(docs)
|
368 |
|
369 |
for file in glob.glob(os.path.join(folder_path, "*.pdf")):
|
370 |
-
log(f"π PDF
|
371 |
documents.extend(load_pdf_with_metadata(file))
|
372 |
|
373 |
-
log(f"π
|
374 |
return documents
|
375 |
|
376 |
def split_documents(documents, chunk_size=800, chunk_overlap=100):
|
377 |
-
log("πͺ
|
378 |
splitter = RecursiveCharacterTextSplitter(
|
379 |
chunk_size=chunk_size,
|
380 |
chunk_overlap=chunk_overlap,
|
@@ -389,21 +389,21 @@ def split_documents(documents, chunk_size=800, chunk_overlap=100):
|
|
389 |
page_content=enriched_chunk,
|
390 |
metadata={**doc.metadata, "chunk_index": i}
|
391 |
))
|
392 |
-
log(f"β
|
393 |
return chunks
|
394 |
|
395 |
# --------------------------------
|
396 |
-
#
|
397 |
# --------------------------------
|
398 |
|
399 |
if __name__ == "__main__":
|
400 |
folder = "dataset_test"
|
401 |
-
log("π PyMuPDF
|
402 |
docs = load_documents(folder)
|
403 |
-
log("π¦
|
404 |
|
405 |
-
#
|
406 |
-
log("π
|
407 |
page_info = {}
|
408 |
for doc in docs:
|
409 |
source = doc.metadata.get('source', 'unknown')
|
@@ -417,10 +417,10 @@ if __name__ == "__main__":
|
|
417 |
|
418 |
for source, info in page_info.items():
|
419 |
max_page = max(info['pages']) if info['pages'] and isinstance(max(info['pages']), int) else 'unknown'
|
420 |
-
log(f" π {os.path.basename(source)}: {max_page}
|
421 |
|
422 |
chunks = split_documents(docs)
|
423 |
-
log("π‘ E5-Large-Instruct
|
424 |
embedding_model = HuggingFaceEmbeddings(
|
425 |
model_name="intfloat/e5-large-v2",
|
426 |
model_kwargs={"device": "cuda"}
|
@@ -429,12 +429,12 @@ if __name__ == "__main__":
|
|
429 |
vectorstore = FAISS.from_documents(chunks, embedding_model)
|
430 |
vectorstore.save_local("vector_db")
|
431 |
|
432 |
-
log(f"π
|
433 |
-
log(f"π
|
434 |
-
log("β
FAISS
|
435 |
|
436 |
-
#
|
437 |
-
log("\nπ
|
438 |
for i, chunk in enumerate(chunks[:5]):
|
439 |
meta = chunk.metadata
|
440 |
-
log(f"
|
|
|
9 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
10 |
from langchain_community.vectorstores import FAISS
|
11 |
|
12 |
+
# PyMuPDF library
|
13 |
try:
|
14 |
import fitz # PyMuPDF
|
15 |
PYMUPDF_AVAILABLE = True
|
16 |
+
print("β
PyMuPDF library available")
|
17 |
except ImportError:
|
18 |
PYMUPDF_AVAILABLE = False
|
19 |
+
print("β οΈ PyMuPDF library is not installed. Install with: pip install PyMuPDF")
|
20 |
|
21 |
+
# PDF processing utilities
|
22 |
import pytesseract
|
23 |
from PIL import Image
|
24 |
from pdf2image import convert_from_path
|
|
|
26 |
from pymupdf4llm import LlamaMarkdownReader
|
27 |
|
28 |
# --------------------------------
|
29 |
+
# Log Output
|
30 |
# --------------------------------
|
31 |
|
32 |
def log(msg):
|
33 |
print(f"[{time.strftime('%H:%M:%S')}] {msg}")
|
34 |
|
35 |
# --------------------------------
|
36 |
+
# Text Cleaning Function
|
37 |
# --------------------------------
|
38 |
|
39 |
def clean_text(text):
|
|
|
41 |
|
42 |
def apply_corrections(text):
|
43 |
corrections = {
|
44 |
+
'ΒΊΒ©': 'info', 'Γ': 'of', 'Β½': 'operation', 'Γ': '', 'Β©': '',
|
45 |
'Γ’β¬β’': "'", 'Γ’β¬Ε': '"', 'Γ’β¬': '"'
|
46 |
}
|
47 |
for k, v in corrections.items():
|
|
|
49 |
return text
|
50 |
|
51 |
# --------------------------------
|
52 |
+
# HWPX Processing (Section-wise Processing Only)
|
53 |
# --------------------------------
|
54 |
|
55 |
def load_hwpx(file_path):
|
56 |
+
"""Loading HWPX file (using XML parsing method only)"""
|
57 |
import zipfile
|
58 |
import xml.etree.ElementTree as ET
|
59 |
import chardet
|
60 |
|
61 |
+
log(f"π₯ Starting HWPX section-wise processing: {file_path}")
|
62 |
start = time.time()
|
63 |
documents = []
|
64 |
|
|
|
67 |
file_list = zip_ref.namelist()
|
68 |
section_files = [f for f in file_list
|
69 |
if f.startswith('Contents/section') and f.endswith('.xml')]
|
70 |
+
section_files.sort() # Sort by section0.xml, section1.xml order
|
71 |
|
72 |
+
log(f"π Found section files: {len(section_files)}")
|
73 |
|
74 |
for section_idx, section_file in enumerate(section_files):
|
75 |
with zip_ref.open(section_file) as xml_file:
|
|
|
83 |
tree = ET.ElementTree(ET.fromstring(text))
|
84 |
root = tree.getroot()
|
85 |
|
86 |
+
# Find text without namespace
|
87 |
t_elements = [elem for elem in root.iter() if elem.tag.endswith('}t') or elem.tag == 't']
|
88 |
body_text = ""
|
89 |
for elem in t_elements:
|
90 |
if elem.text:
|
91 |
body_text += clean_text(elem.text) + " "
|
92 |
|
93 |
+
# Set page metadata to empty
|
94 |
page_value = ""
|
95 |
|
96 |
if body_text.strip():
|
|
|
104 |
"total_sections": len(section_files)
|
105 |
}
|
106 |
))
|
107 |
+
log(f"β
Section text extraction complete (chars: {len(body_text)})")
|
108 |
|
109 |
+
# Find tables
|
110 |
table_elements = [elem for elem in root.iter() if elem.tag.endswith('}table') or elem.tag == 'table']
|
111 |
if table_elements:
|
112 |
table_text = ""
|
|
|
136 |
"total_sections": len(section_files)
|
137 |
}
|
138 |
))
|
139 |
+
log(f"π Table extraction complete")
|
140 |
|
141 |
+
# Find images
|
142 |
if [elem for elem in root.iter() if elem.tag.endswith('}picture') or elem.tag == 'picture']:
|
143 |
documents.append(Document(
|
144 |
+
page_content="[Image included]",
|
145 |
metadata={
|
146 |
"source": file_path,
|
147 |
"filename": os.path.basename(file_path),
|
|
|
150 |
"total_sections": len(section_files)
|
151 |
}
|
152 |
))
|
153 |
+
log(f"πΌοΈ Image found")
|
154 |
|
155 |
except Exception as e:
|
156 |
+
log(f"β HWPX processing error: {e}")
|
157 |
|
158 |
duration = time.time() - start
|
159 |
|
160 |
+
# Print summary of document information
|
161 |
if documents:
|
162 |
+
log(f"π Number of extracted documents: {len(documents)}")
|
163 |
|
164 |
+
log(f"β
HWPX processing complete: {file_path} β±οΈ {duration:.2f}s, total {len(documents)} documents")
|
165 |
return documents
|
166 |
|
167 |
# --------------------------------
|
168 |
+
# PDF Processing Functions (same as before)
|
169 |
# --------------------------------
|
170 |
|
171 |
def run_ocr_on_image(image: Image.Image, lang='kor+eng'):
|
|
|
182 |
page_ocr_data[page_num] = text.strip()
|
183 |
return page_ocr_data
|
184 |
except Exception as e:
|
185 |
+
print(f"β Image OCR failed: {e}")
|
186 |
return {}
|
187 |
|
188 |
def extract_tables_with_pdfplumber(pdf_path):
|
|
|
203 |
page_table_data[page_num] = table_text.strip()
|
204 |
return page_table_data
|
205 |
except Exception as e:
|
206 |
+
print(f"β Table extraction failed: {e}")
|
207 |
return {}
|
208 |
|
209 |
def extract_body_text_with_pages(pdf_path):
|
|
|
239 |
start = end - 100
|
240 |
|
241 |
except Exception as e:
|
242 |
+
print(f"β Body extraction failed: {e}")
|
243 |
|
244 |
return page_body_data
|
245 |
|
246 |
def load_pdf_with_metadata(pdf_path):
|
247 |
+
"""Extracts page-specific information from a PDF file"""
|
248 |
+
log(f"π Starting PDF page-wise processing: {pdf_path}")
|
249 |
start = time.time()
|
250 |
|
251 |
+
# First, check the actual number of pages using PyPDFLoader
|
252 |
try:
|
253 |
from langchain_community.document_loaders import PyPDFLoader
|
254 |
loader = PyPDFLoader(pdf_path)
|
255 |
pdf_pages = loader.load()
|
256 |
actual_total_pages = len(pdf_pages)
|
257 |
+
log(f"π Actual page count as verified by PyPDFLoader: {actual_total_pages}")
|
258 |
except Exception as e:
|
259 |
+
log(f"β PyPDFLoader page count verification failed: {e}")
|
260 |
actual_total_pages = 1
|
261 |
|
262 |
try:
|
263 |
page_tables = extract_tables_with_pdfplumber(pdf_path)
|
264 |
except Exception as e:
|
265 |
page_tables = {}
|
266 |
+
print(f"β Table extraction failed: {e}")
|
267 |
|
268 |
try:
|
269 |
page_ocr = extract_images_with_ocr(pdf_path)
|
270 |
except Exception as e:
|
271 |
page_ocr = {}
|
272 |
+
print(f"β Image OCR failed: {e}")
|
273 |
|
274 |
try:
|
275 |
page_body = extract_body_text_with_pages(pdf_path)
|
276 |
except Exception as e:
|
277 |
page_body = {}
|
278 |
+
print(f"β Body extraction failed: {e}")
|
279 |
|
280 |
duration = time.time() - start
|
281 |
+
log(f"β
PDF page-wise processing complete: {pdf_path} β±οΈ {duration:.2f}s")
|
282 |
|
283 |
+
# Set the total number of pages based on the actual number of pages
|
284 |
all_pages = set(page_tables.keys()) | set(page_ocr.keys()) | set(page_body.keys())
|
285 |
if all_pages:
|
286 |
max_extracted_page = max(all_pages)
|
287 |
+
# Use the greater of the actual and extracted page numbers
|
288 |
total_pages = max(actual_total_pages, max_extracted_page)
|
289 |
else:
|
290 |
total_pages = actual_total_pages
|
291 |
|
292 |
+
log(f"π Final total page count set to: {total_pages}")
|
293 |
|
294 |
docs = []
|
295 |
|
|
|
305 |
"total_pages": total_pages
|
306 |
}
|
307 |
))
|
308 |
+
log(f"π Page {page_num}: Table extraction complete")
|
309 |
|
310 |
if page_num in page_body and page_body[page_num].strip():
|
311 |
docs.append(Document(
|
|
|
318 |
"total_pages": total_pages
|
319 |
}
|
320 |
))
|
321 |
+
log(f"π Page {page_num}: Body extraction complete")
|
322 |
|
323 |
if page_num in page_ocr and page_ocr[page_num].strip():
|
324 |
docs.append(Document(
|
|
|
331 |
"total_pages": total_pages
|
332 |
}
|
333 |
))
|
334 |
+
log(f"πΌοΈ Page {page_num}: OCR extraction complete")
|
335 |
|
336 |
if not docs:
|
337 |
docs.append(Document(
|
338 |
+
page_content="[Content extraction failed]",
|
339 |
metadata={
|
340 |
"source": pdf_path,
|
341 |
"filename": os.path.basename(pdf_path),
|
|
|
345 |
}
|
346 |
))
|
347 |
|
348 |
+
# Print summary of page information
|
349 |
if docs:
|
350 |
page_numbers = [doc.metadata.get('page', 0) for doc in docs if doc.metadata.get('page')]
|
351 |
if page_numbers:
|
352 |
+
log(f"π Extracted page range: {min(page_numbers)} ~ {max(page_numbers)}")
|
353 |
|
354 |
+
log(f"π PDF documents with extracted pages: {len(docs)} documents (total {total_pages} pages)")
|
355 |
return docs
|
356 |
|
357 |
# --------------------------------
|
358 |
+
# Document Loading and Splitting
|
359 |
# --------------------------------
|
360 |
|
361 |
def load_documents(folder_path):
|
362 |
documents = []
|
363 |
|
364 |
for file in glob.glob(os.path.join(folder_path, "*.hwpx")):
|
365 |
+
log(f"π HWPX file found: {file}")
|
366 |
docs = load_hwpx(file)
|
367 |
documents.extend(docs)
|
368 |
|
369 |
for file in glob.glob(os.path.join(folder_path, "*.pdf")):
|
370 |
+
log(f"π PDF file found: {file}")
|
371 |
documents.extend(load_pdf_with_metadata(file))
|
372 |
|
373 |
+
log(f"π Document loading complete! Total documents: {len(documents)}")
|
374 |
return documents
|
375 |
|
376 |
def split_documents(documents, chunk_size=800, chunk_overlap=100):
|
377 |
+
log("πͺ Starting chunk splitting")
|
378 |
splitter = RecursiveCharacterTextSplitter(
|
379 |
chunk_size=chunk_size,
|
380 |
chunk_overlap=chunk_overlap,
|
|
|
389 |
page_content=enriched_chunk,
|
390 |
metadata={**doc.metadata, "chunk_index": i}
|
391 |
))
|
392 |
+
log(f"β
Chunk splitting complete: Created {len(chunks)} chunks")
|
393 |
return chunks
|
394 |
|
395 |
# --------------------------------
|
396 |
+
# Main Execution
|
397 |
# --------------------------------
|
398 |
|
399 |
if __name__ == "__main__":
|
400 |
folder = "dataset_test"
|
401 |
+
log("π PyMuPDF-based document processing started")
|
402 |
docs = load_documents(folder)
|
403 |
+
log("π¦ Document loading complete")
|
404 |
|
405 |
+
# Page information check
|
406 |
+
log("π Page information summary:")
|
407 |
page_info = {}
|
408 |
for doc in docs:
|
409 |
source = doc.metadata.get('source', 'unknown')
|
|
|
417 |
|
418 |
for source, info in page_info.items():
|
419 |
max_page = max(info['pages']) if info['pages'] and isinstance(max(info['pages']), int) else 'unknown'
|
420 |
+
log(f" π {os.path.basename(source)}: {max_page} pages, type: {info['types']}")
|
421 |
|
422 |
chunks = split_documents(docs)
|
423 |
+
log("π‘ E5-Large-Instruct embedding preparation")
|
424 |
embedding_model = HuggingFaceEmbeddings(
|
425 |
model_name="intfloat/e5-large-v2",
|
426 |
model_kwargs={"device": "cuda"}
|
|
|
429 |
vectorstore = FAISS.from_documents(chunks, embedding_model)
|
430 |
vectorstore.save_local("vector_db")
|
431 |
|
432 |
+
log(f"π Total number of documents: {len(docs)}")
|
433 |
+
log(f"π Total number of chunks: {len(chunks)}")
|
434 |
+
log("β
FAISS save complete: vector_db")
|
435 |
|
436 |
+
# Sample output with page information
|
437 |
+
log("\nπ Sample including actual page information:")
|
438 |
for i, chunk in enumerate(chunks[:5]):
|
439 |
meta = chunk.metadata
|
440 |
+
log(f" Chunk {i+1}: {meta.get('type')} | Page {meta.get('page')} | {os.path.basename(meta.get('source', 'unknown'))}")
|