Alignment-Lab-AI commited on
Commit
2b11880
·
verified ·
1 Parent(s): b9ab648

Upload scrpt27.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scrpt27.py +93 -97
scrpt27.py CHANGED
@@ -20,6 +20,7 @@ from magic_pdf.filter.pdf_classify_by_type import classify
20
  import fitz # PyMuPDF
21
  import time
22
  import signal
 
23
 
24
  # Set up logging
25
  logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
@@ -28,6 +29,7 @@ logger = logging.getLogger(__name__)
28
  # Minimum batch size
29
  MIN_BATCH_SIZE = 1
30
 
 
31
  def parse_arguments():
32
  parser = argparse.ArgumentParser(description="Process multiple PDFs using Magic PDF")
33
  parser.add_argument("--input", default="input", help="Input folder containing PDF files")
@@ -39,31 +41,35 @@ def parse_arguments():
39
  parser.add_argument("--initial-batch-size", type=int, default=1, help="Initial batch size for processing")
40
  return parser.parse_args()
41
 
 
42
  def load_config(config_path):
43
  with open(config_path, 'r') as f:
44
  return json.load(f)
45
 
 
46
  def get_available_memory(gpu_id):
47
  return torch.cuda.get_device_properties(gpu_id).total_memory - torch.cuda.memory_allocated(gpu_id)
48
 
 
49
  def extract_images(pdf_path, output_folder):
50
  doc = fitz.open(pdf_path)
51
  pdf_name = os.path.splitext(os.path.basename(pdf_path))[0]
52
  images_folder = os.path.join(output_folder, 'images')
53
  os.makedirs(images_folder, exist_ok=True)
54
-
55
  for page_num, page in enumerate(doc):
56
  for img_index, img in enumerate(page.get_images(full=True)):
57
  xref = img[0]
58
  base_image = doc.extract_image(xref)
59
  image_bytes = base_image["image"]
60
  image_ext = base_image["ext"]
61
- image_filename = f'{pdf_name}_{page_num+1:03d}_{img_index+1:03d}.{image_ext}'
62
  image_path = os.path.join(images_folder, image_filename)
63
  with open(image_path, "wb") as image_file:
64
  image_file.write(image_bytes)
65
  doc.close()
66
 
 
67
  class MagicModel:
68
  def __init__(self, config):
69
  self.config = config
@@ -73,7 +79,8 @@ class MagicModel:
73
  with open(log_file_path, 'a') as log_file:
74
  log_file.write(f"Entering process_pdf\n")
75
  log_file.write(f" parse_type: {parse_type}, (expected: str)\n")
76
- log_file.write(f" layout_info (length: {len(layout_info)}), (expected: list of dicts): {layout_info}\n")
 
77
  for page_index, page_info in enumerate(layout_info):
78
  try:
79
  with open(log_file_path, 'a') as log_file:
@@ -106,6 +113,7 @@ class MagicModel:
106
  log_file.write(f"Exiting process_page\n")
107
  return result
108
 
 
109
  def process_single_pdf(input_file, output_folder, gpu_id, config, timeout, use_bf16, model, log_file_path):
110
  start_time = time.time()
111
  pdf_name = os.path.splitext(os.path.basename(input_file))[0]
@@ -134,17 +142,18 @@ def process_single_pdf(input_file, output_folder, gpu_id, config, timeout, use_b
134
  torch.set_default_device('cpu')
135
 
136
  pdf_data = read_file(input_file, 'rb')
137
-
138
  # Perform PDF metadata scan
139
  metadata = pdf_meta_scan(pdf_data)
140
  with open(log_file_path, 'a') as log_file:
141
  log_file.write(f"Processing PDF: {input_file}\n")
142
  log_file.write(f"Metadata (expected: dict): {json.dumps(metadata, indent=2)}\n")
143
-
144
  # Check if metadata indicates the PDF should be dropped
145
  if metadata.get("_need_drop", False):
146
  with open(log_file_path, 'a') as log_file:
147
- log_file.write(f"Dropping PDF {input_file}: {metadata.get('_drop_reason', 'Unknown reason')}\n")
 
148
  return input_file, "Dropped", None
149
 
150
  # Check if all required fields are present in metadata
@@ -153,7 +162,7 @@ def process_single_pdf(input_file, output_folder, gpu_id, config, timeout, use_b
153
  for field in required_fields:
154
  if field not in metadata:
155
  raise ValueError(f"Required field '{field}' not found in metadata for {input_file}")
156
-
157
  # Extract required fields for classify function
158
  total_page = metadata['total_page']
159
  page_width = metadata['page_width_pts']
@@ -172,26 +181,22 @@ def process_single_pdf(input_file, output_folder, gpu_id, config, timeout, use_b
172
  log_file.write(f" img_sz_list (expected: list of lists): {img_sz_list[:5]}...\n")
173
  log_file.write(f" text_len_list (expected: list of ints): {text_len_list[:5]}...\n")
174
  log_file.write(f" img_num_list (expected: list of ints): {img_num_list[:5]}...\n")
175
- log_file.write(f" text_layout_list (expected: list of strs): {text_layout_list[:5]}...\n")
 
176
  log_file.write(f" invalid_chars (expected: bool): {invalid_chars}\n")
177
 
178
  # Classify PDF
179
- try:
180
- is_text_pdf, classification_results = classify(
181
- total_page, page_width, page_height, img_sz_list[:total_page],
182
- text_len_list[:total_page], img_num_list[:total_page],
183
- text_layout_list[:len(text_layout_list)], invalid_chars
184
- )
185
- with open(log_file_path, 'a') as log_file:
186
- log_file.write(f"Classification Results:\n")
187
- log_file.write(f" is_text_pdf (expected: bool): {is_text_pdf}\n")
188
- log_file.write(f" classification_results (expected: dict): {classification_results}\n")
189
 
190
- except Exception as e:
191
- with open(log_file_path, 'a') as log_file:
192
- log_file.write(f"Error in classify function for {input_file}: {str(e)}\n")
193
- return input_file, f"Classification Error: {str(e)}", None
194
-
195
  image_writer = DiskReaderWriter(output_subfolder)
196
  with open(log_file_path, 'a') as log_file:
197
  log_file.write(f"Image writer initialized: {image_writer}\n")
@@ -203,7 +208,7 @@ def process_single_pdf(input_file, output_folder, gpu_id, config, timeout, use_b
203
  unipipe = UNIPipe(pdf_data, jso_useful_key, image_writer)
204
  with open(log_file_path, 'a') as log_file:
205
  log_file.write(f"UNIPipe initialized: {unipipe}\n")
206
-
207
  parse_type = unipipe.pipe_classify()
208
  with open(log_file_path, 'a') as log_file:
209
  log_file.write(f"pipe_classify result (expected: str): {parse_type}\n")
@@ -212,81 +217,58 @@ def process_single_pdf(input_file, output_folder, gpu_id, config, timeout, use_b
212
  with open(log_file_path, 'a') as log_file:
213
  log_file.write(f"Detailed pipe_analyze Inputs for {input_file}:\n")
214
  log_file.write(f" parse_type (expected: str): {parse_type}\n")
215
- try:
216
- layout_info = unipipe.pipe_analyze()
217
- with open(log_file_path, 'a') as log_file:
218
- log_file.write(f"pipe_analyze Results (expected: list of dicts, length: {len(layout_info)}): {layout_info}\n")
219
- except Exception as e:
220
- with open(log_file_path, 'a') as log_file:
221
- log_file.write(f"Error in pipe_analyze for {input_file}: {str(e)}\n")
222
- return input_file, f"pipe_analyze Error: {str(e)}", None
223
 
224
  # Use OCR if it's not classified as a text PDF
225
  if not is_text_pdf:
226
  parse_type = 'ocr'
227
  with open(log_file_path, 'a') as log_file:
228
- log_file.write(f"parse_type after OCR check (expected: str): {parse_type}\n")
229
-
 
230
  # Process the PDF using the model
231
- try:
232
- parse_result = model.process_pdf(pdf_data, parse_type, layout_info, log_file_path)
233
- with open(log_file_path, 'a') as log_file:
234
- log_file.write(f"Model process_pdf result (expected: dict): {parse_result}\n")
235
- except Exception as e:
236
- with open(log_file_path, 'a') as log_file:
237
- log_file.write(f"Error in model processing for {input_file}: {str(e)}\n")
238
- return input_file, f"Model Processing Error: {str(e)}", None
239
 
240
- try:
241
- markdown_content = unipipe.pipe_mk_markdown(parse_result)
242
- with open(log_file_path, 'a') as log_file:
243
- log_file.write(f"pipe_mk_markdown result (expected: str, length: {len(markdown_content)}): {markdown_content}\n")
244
- except Exception as e:
245
- with open(log_file_path, 'a') as log_file:
246
- log_file.write(f"Error in pipe_mk_markdown for {input_file}: {str(e)}\n")
247
- log_file.write(f" parse_result (expected: dict): {parse_result}\n")
248
- return input_file, f"pipe_mk_markdown Error: {str(e)}", None
249
 
250
- try:
251
- uni_format = unipipe.pipe_mk_uni_format(parse_result)
252
- with open(log_file_path, 'a') as log_file:
253
- log_file.write(f"pipe_mk_uni_format result (expected: dict): {uni_format}\n")
254
- except Exception as e:
255
- with open(log_file_path, 'a') as log_file:
256
- log_file.write(f"Error in pipe_mk_uni_format for {input_file}: {str(e)}\n")
257
- log_file.write(f" parse_result (expected: dict): {parse_result}\n")
258
- return input_file, f"pipe_mk_uni_format Error: {str(e)}", None
259
-
260
  # Write markdown content
261
  with open(os.path.join(output_subfolder, f'{pdf_name}.md'), 'w', encoding='utf-8') as f:
262
  f.write(markdown_content)
263
-
264
  # Write middle.json
265
  with open(os.path.join(output_subfolder, 'middle.json'), 'w', encoding='utf-8') as f:
266
  json.dump(parse_result, f, ensure_ascii=False, indent=2)
267
-
268
  # Write model.json
269
  with open(os.path.join(output_subfolder, 'model.json'), 'w', encoding='utf-8') as f:
270
  json.dump(uni_format, f, ensure_ascii=False, indent=2)
271
-
272
  # Copy original PDF
273
  shutil.copy(input_file, os.path.join(output_subfolder, f'{pdf_name}.pdf'))
274
-
275
  # Generate layout.pdf and spans.pdf
276
- try:
277
- do_parse(input_file, parse_type, output_subfolder, draw_bbox=True)
278
- except Exception as e:
279
- with open(log_file_path, 'a') as log_file:
280
- log_file.write(f"Error in do_parse for {input_file}: {str(e)}\n")
281
- return input_file, f"do_parse Error: {str(e)}", None
282
-
283
  # Extract images
284
  extract_images(input_file, output_subfolder)
285
-
286
  processing_time = time.time() - start_time
287
  with open(log_file_path, 'a') as log_file:
288
- log_file.write(f"Successfully processed {input_file} on GPU {gpu_id} in {processing_time:.2f} seconds\n")
289
-
 
290
  # Prepare result for JSONL output
291
  result = {
292
  "file_name": pdf_name,
@@ -296,7 +278,7 @@ def process_single_pdf(input_file, output_folder, gpu_id, config, timeout, use_b
296
  "classification": classification_results,
297
  "is_text_pdf": is_text_pdf
298
  }
299
-
300
  return input_file, "Success", result
301
 
302
  except ValueError as ve:
@@ -310,15 +292,22 @@ def process_single_pdf(input_file, output_folder, gpu_id, config, timeout, use_b
310
  return input_file, "Timeout", None
311
 
312
  except Exception as e:
313
- with open(log_file_path, 'a') as log_file:
314
- log_file.write(f"Error occurred: {str(e)}\n")
315
- return input_file, f"Error: {str(e)}", None
 
 
 
 
 
 
316
 
317
  finally:
318
  signal.alarm(0) # Cancel the alarm
319
  if gpu_id >= 0:
320
  torch.cuda.empty_cache()
321
 
 
322
  def process_pdf_batch(batch, output_folder, gpu_id, config, timeout, use_bf16, model, log_file_path):
323
  results = []
324
  for pdf_file in batch:
@@ -326,6 +315,7 @@ def process_pdf_batch(batch, output_folder, gpu_id, config, timeout, use_bf16, m
326
  results.append(result)
327
  return results
328
 
 
329
  def write_to_jsonl(results, output_file):
330
  with open(output_file, 'a') as f:
331
  for result in results:
@@ -333,6 +323,7 @@ def write_to_jsonl(results, output_file):
333
  json.dump(result[2], f)
334
  f.write('\n')
335
 
 
336
  def get_gpu_memory_usage(gpu_id):
337
  if gpu_id < 0:
338
  return 0, 0 # CPU mode
@@ -340,18 +331,19 @@ def get_gpu_memory_usage(gpu_id):
340
  allocated_memory = torch.cuda.memory_allocated(gpu_id)
341
  return allocated_memory, total_memory
342
 
 
343
  def main():
344
  mp.set_start_method('spawn', force=True)
345
-
346
  args = parse_arguments()
347
  config = load_config(args.config)
348
-
349
  input_folder = args.input
350
  output_folder = args.output
351
  os.makedirs(output_folder, exist_ok=True)
352
-
353
  pdf_files = [os.path.join(input_folder, f) for f in os.listdir(input_folder) if f.endswith('.pdf')]
354
-
355
  num_gpus = torch.cuda.device_count()
356
  if num_gpus == 0:
357
  print("No GPUs available. Using CPU.")
@@ -359,20 +351,20 @@ def main():
359
  gpu_ids = [-1]
360
  else:
361
  gpu_ids = list(range(num_gpus))
362
-
363
  num_workers = args.max_workers or min(num_gpus, os.cpu_count())
364
-
365
  main_jsonl = os.path.join(output_folder, 'processing_results.jsonl')
366
  temp_jsonl = os.path.join(output_folder, 'temp_results.jsonl')
367
  log_file_path = os.path.join(output_folder, 'processing_log.txt')
368
-
369
  # Enable deterministic mode
370
  torch.backends.cudnn.deterministic = True
371
  torch.backends.cudnn.benchmark = False
372
-
373
  # Load the model
374
  model = MagicModel(config)
375
-
376
  results = []
377
  with ProcessPoolExecutor(max_workers=num_workers) as executor:
378
  for gpu_id in gpu_ids:
@@ -382,16 +374,18 @@ def main():
382
  while pdf_index < len(pdf_files):
383
  batch = pdf_files[pdf_index:pdf_index + batch_size]
384
  try:
385
- future = executor.submit(process_pdf_batch, batch, output_folder, gpu_id, config, args.timeout, args.use_bf16, model, log_file_path)
 
386
  batch_results = future.result()
387
  results.extend(batch_results)
388
  for result in batch_results:
389
  write_to_jsonl([result], temp_jsonl)
390
-
391
  # Print VRAM usage
392
  allocated, total = get_gpu_memory_usage(gpu_id)
393
  with open(log_file_path, 'a') as log_file:
394
- log_file.write(f"GPU {gpu_id} - Batch size: {batch_size}, VRAM usage: {allocated/1024**3:.2f}GB / {total/1024**3:.2f}GB\n")
 
395
  # If successful and OOM hasn't occurred yet, increase batch size
396
  if not oom_occurred:
397
  batch_size += 1
@@ -404,24 +398,25 @@ def main():
404
  log_file.write(f"OOM error occurred. Reducing batch size to {batch_size}\n")
405
  torch.cuda.empty_cache()
406
  continue
407
-
408
  # After processing each batch, move temp JSONL to main JSONL
409
  if os.path.exists(temp_jsonl):
410
  with open(temp_jsonl, 'r') as temp, open(main_jsonl, 'a') as main:
411
  shutil.copyfileobj(temp, main)
412
  os.remove(temp_jsonl)
413
-
414
  # Clear GPU cache after each batch
415
  if gpu_id >= 0:
416
  torch.cuda.empty_cache()
417
-
418
  success_count = sum(1 for _, status, _ in results if status == "Success")
419
  timeout_count = sum(1 for _, status, _ in results if status == "Timeout")
420
  error_count = len(results) - success_count - timeout_count
421
-
422
  with open(log_file_path, 'a') as log_file:
423
- log_file.write(f"Processed {len(results)} PDFs. {success_count} succeeded, {timeout_count} timed out, {error_count} failed.\n")
424
-
 
425
  with open(os.path.join(output_folder, 'processing_summary.txt'), 'w') as summary:
426
  summary.write(f"Total PDFs processed: {len(results)}\n")
427
  summary.write(f"Successful: {success_count}\n")
@@ -431,5 +426,6 @@ def main():
431
  for pdf, status, _ in [result for result in results if result[1] != "Success"]:
432
  summary.write(f" - {pdf}: {status}\n")
433
 
 
434
  if __name__ == '__main__':
435
  main()
 
20
  import fitz # PyMuPDF
21
  import time
22
  import signal
23
+ import traceback
24
 
25
  # Set up logging
26
  logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
 
29
  # Minimum batch size
30
  MIN_BATCH_SIZE = 1
31
 
32
+
33
  def parse_arguments():
34
  parser = argparse.ArgumentParser(description="Process multiple PDFs using Magic PDF")
35
  parser.add_argument("--input", default="input", help="Input folder containing PDF files")
 
41
  parser.add_argument("--initial-batch-size", type=int, default=1, help="Initial batch size for processing")
42
  return parser.parse_args()
43
 
44
+
45
  def load_config(config_path):
46
  with open(config_path, 'r') as f:
47
  return json.load(f)
48
 
49
+
50
  def get_available_memory(gpu_id):
51
  return torch.cuda.get_device_properties(gpu_id).total_memory - torch.cuda.memory_allocated(gpu_id)
52
 
53
+
54
  def extract_images(pdf_path, output_folder):
55
  doc = fitz.open(pdf_path)
56
  pdf_name = os.path.splitext(os.path.basename(pdf_path))[0]
57
  images_folder = os.path.join(output_folder, 'images')
58
  os.makedirs(images_folder, exist_ok=True)
59
+
60
  for page_num, page in enumerate(doc):
61
  for img_index, img in enumerate(page.get_images(full=True)):
62
  xref = img[0]
63
  base_image = doc.extract_image(xref)
64
  image_bytes = base_image["image"]
65
  image_ext = base_image["ext"]
66
+ image_filename = f'{pdf_name}_{page_num + 1:03d}_{img_index + 1:03d}.{image_ext}'
67
  image_path = os.path.join(images_folder, image_filename)
68
  with open(image_path, "wb") as image_file:
69
  image_file.write(image_bytes)
70
  doc.close()
71
 
72
+
73
  class MagicModel:
74
  def __init__(self, config):
75
  self.config = config
 
79
  with open(log_file_path, 'a') as log_file:
80
  log_file.write(f"Entering process_pdf\n")
81
  log_file.write(f" parse_type: {parse_type}, (expected: str)\n")
82
+ log_file.write(
83
+ f" layout_info (length: {len(layout_info)}), (expected: list of dicts): {layout_info}\n")
84
  for page_index, page_info in enumerate(layout_info):
85
  try:
86
  with open(log_file_path, 'a') as log_file:
 
113
  log_file.write(f"Exiting process_page\n")
114
  return result
115
 
116
+
117
  def process_single_pdf(input_file, output_folder, gpu_id, config, timeout, use_bf16, model, log_file_path):
118
  start_time = time.time()
119
  pdf_name = os.path.splitext(os.path.basename(input_file))[0]
 
142
  torch.set_default_device('cpu')
143
 
144
  pdf_data = read_file(input_file, 'rb')
145
+
146
  # Perform PDF metadata scan
147
  metadata = pdf_meta_scan(pdf_data)
148
  with open(log_file_path, 'a') as log_file:
149
  log_file.write(f"Processing PDF: {input_file}\n")
150
  log_file.write(f"Metadata (expected: dict): {json.dumps(metadata, indent=2)}\n")
151
+
152
  # Check if metadata indicates the PDF should be dropped
153
  if metadata.get("_need_drop", False):
154
  with open(log_file_path, 'a') as log_file:
155
+ log_file.write(
156
+ f"Dropping PDF {input_file}: {metadata.get('_drop_reason', 'Unknown reason')}\n")
157
  return input_file, "Dropped", None
158
 
159
  # Check if all required fields are present in metadata
 
162
  for field in required_fields:
163
  if field not in metadata:
164
  raise ValueError(f"Required field '{field}' not found in metadata for {input_file}")
165
+
166
  # Extract required fields for classify function
167
  total_page = metadata['total_page']
168
  page_width = metadata['page_width_pts']
 
181
  log_file.write(f" img_sz_list (expected: list of lists): {img_sz_list[:5]}...\n")
182
  log_file.write(f" text_len_list (expected: list of ints): {text_len_list[:5]}...\n")
183
  log_file.write(f" img_num_list (expected: list of ints): {img_num_list[:5]}...\n")
184
+ log_file.write(
185
+ f" text_layout_list (expected: list of strs): {text_layout_list[:5]}...\n")
186
  log_file.write(f" invalid_chars (expected: bool): {invalid_chars}\n")
187
 
188
  # Classify PDF
189
+ is_text_pdf, classification_results = classify(
190
+ total_page, page_width, page_height, img_sz_list[:total_page],
191
+ text_len_list[:total_page], img_num_list[:total_page],
192
+ text_layout_list[:len(text_layout_list)], invalid_chars
193
+ )
194
+ with open(log_file_path, 'a') as log_file:
195
+ log_file.write(f"Classification Results:\n")
196
+ log_file.write(f" is_text_pdf (expected: bool): {is_text_pdf}\n")
197
+ log_file.write(
198
+ f" classification_results (expected: dict): {classification_results}\n")
199
 
 
 
 
 
 
200
  image_writer = DiskReaderWriter(output_subfolder)
201
  with open(log_file_path, 'a') as log_file:
202
  log_file.write(f"Image writer initialized: {image_writer}\n")
 
208
  unipipe = UNIPipe(pdf_data, jso_useful_key, image_writer)
209
  with open(log_file_path, 'a') as log_file:
210
  log_file.write(f"UNIPipe initialized: {unipipe}\n")
211
+
212
  parse_type = unipipe.pipe_classify()
213
  with open(log_file_path, 'a') as log_file:
214
  log_file.write(f"pipe_classify result (expected: str): {parse_type}\n")
 
217
  with open(log_file_path, 'a') as log_file:
218
  log_file.write(f"Detailed pipe_analyze Inputs for {input_file}:\n")
219
  log_file.write(f" parse_type (expected: str): {parse_type}\n")
220
+ layout_info = unipipe.pipe_analyze()
221
+ with open(log_file_path, 'a') as log_file:
222
+ log_file.write(
223
+ f"pipe_analyze Results (expected: list of dicts, length: {len(layout_info)}): {layout_info}\n")
 
 
 
 
224
 
225
  # Use OCR if it's not classified as a text PDF
226
  if not is_text_pdf:
227
  parse_type = 'ocr'
228
  with open(log_file_path, 'a') as log_file:
229
+ log_file.write(
230
+ f"parse_type after OCR check (expected: str): {parse_type}\n")
231
+
232
  # Process the PDF using the model
233
+ parse_result = model.process_pdf(pdf_data, parse_type, layout_info, log_file_path)
234
+ with open(log_file_path, 'a') as log_file:
235
+ log_file.write(f"Model process_pdf result (expected: dict): {parse_result}\n")
 
 
 
 
 
236
 
237
+ markdown_content = unipipe.pipe_mk_markdown(parse_result)
238
+ with open(log_file_path, 'a') as log_file:
239
+ log_file.write(
240
+ f"pipe_mk_markdown result (expected: str, length: {len(markdown_content)}): {markdown_content}\n")
241
+
242
+ uni_format = unipipe.pipe_mk_uni_format(parse_result)
243
+ with open(log_file_path, 'a') as log_file:
244
+ log_file.write(f"pipe_mk_uni_format result (expected: dict): {uni_format}\n")
 
245
 
 
 
 
 
 
 
 
 
 
 
246
  # Write markdown content
247
  with open(os.path.join(output_subfolder, f'{pdf_name}.md'), 'w', encoding='utf-8') as f:
248
  f.write(markdown_content)
249
+
250
  # Write middle.json
251
  with open(os.path.join(output_subfolder, 'middle.json'), 'w', encoding='utf-8') as f:
252
  json.dump(parse_result, f, ensure_ascii=False, indent=2)
253
+
254
  # Write model.json
255
  with open(os.path.join(output_subfolder, 'model.json'), 'w', encoding='utf-8') as f:
256
  json.dump(uni_format, f, ensure_ascii=False, indent=2)
257
+
258
  # Copy original PDF
259
  shutil.copy(input_file, os.path.join(output_subfolder, f'{pdf_name}.pdf'))
260
+
261
  # Generate layout.pdf and spans.pdf
262
+ do_parse(input_file, parse_type, output_subfolder, draw_bbox=True)
263
+
 
 
 
 
 
264
  # Extract images
265
  extract_images(input_file, output_subfolder)
266
+
267
  processing_time = time.time() - start_time
268
  with open(log_file_path, 'a') as log_file:
269
+ log_file.write(
270
+ f"Successfully processed {input_file} on GPU {gpu_id} in {processing_time:.2f} seconds\n")
271
+
272
  # Prepare result for JSONL output
273
  result = {
274
  "file_name": pdf_name,
 
278
  "classification": classification_results,
279
  "is_text_pdf": is_text_pdf
280
  }
281
+
282
  return input_file, "Success", result
283
 
284
  except ValueError as ve:
 
292
  return input_file, "Timeout", None
293
 
294
  except Exception as e:
295
+ # Save full traceback to a file
296
+ traceback_file = os.path.join(output_folder, 'traceback.txt')
297
+ with open(traceback_file, 'w') as f:
298
+ f.write(traceback.format_exc())
299
+
300
+ # Print error message and traceback location to CLI
301
+ print(f"Error occurred: {e}")
302
+ print(f"Full traceback saved to: {traceback_file}")
303
+ exit(1) # Terminate the script
304
 
305
  finally:
306
  signal.alarm(0) # Cancel the alarm
307
  if gpu_id >= 0:
308
  torch.cuda.empty_cache()
309
 
310
+
311
  def process_pdf_batch(batch, output_folder, gpu_id, config, timeout, use_bf16, model, log_file_path):
312
  results = []
313
  for pdf_file in batch:
 
315
  results.append(result)
316
  return results
317
 
318
+
319
  def write_to_jsonl(results, output_file):
320
  with open(output_file, 'a') as f:
321
  for result in results:
 
323
  json.dump(result[2], f)
324
  f.write('\n')
325
 
326
+
327
  def get_gpu_memory_usage(gpu_id):
328
  if gpu_id < 0:
329
  return 0, 0 # CPU mode
 
331
  allocated_memory = torch.cuda.memory_allocated(gpu_id)
332
  return allocated_memory, total_memory
333
 
334
+
335
  def main():
336
  mp.set_start_method('spawn', force=True)
337
+
338
  args = parse_arguments()
339
  config = load_config(args.config)
340
+
341
  input_folder = args.input
342
  output_folder = args.output
343
  os.makedirs(output_folder, exist_ok=True)
344
+
345
  pdf_files = [os.path.join(input_folder, f) for f in os.listdir(input_folder) if f.endswith('.pdf')]
346
+
347
  num_gpus = torch.cuda.device_count()
348
  if num_gpus == 0:
349
  print("No GPUs available. Using CPU.")
 
351
  gpu_ids = [-1]
352
  else:
353
  gpu_ids = list(range(num_gpus))
354
+
355
  num_workers = args.max_workers or min(num_gpus, os.cpu_count())
356
+
357
  main_jsonl = os.path.join(output_folder, 'processing_results.jsonl')
358
  temp_jsonl = os.path.join(output_folder, 'temp_results.jsonl')
359
  log_file_path = os.path.join(output_folder, 'processing_log.txt')
360
+
361
  # Enable deterministic mode
362
  torch.backends.cudnn.deterministic = True
363
  torch.backends.cudnn.benchmark = False
364
+
365
  # Load the model
366
  model = MagicModel(config)
367
+
368
  results = []
369
  with ProcessPoolExecutor(max_workers=num_workers) as executor:
370
  for gpu_id in gpu_ids:
 
374
  while pdf_index < len(pdf_files):
375
  batch = pdf_files[pdf_index:pdf_index + batch_size]
376
  try:
377
+ future = executor.submit(process_pdf_batch, batch, output_folder, gpu_id, config, args.timeout,
378
+ args.use_bf16, model, log_file_path)
379
  batch_results = future.result()
380
  results.extend(batch_results)
381
  for result in batch_results:
382
  write_to_jsonl([result], temp_jsonl)
383
+
384
  # Print VRAM usage
385
  allocated, total = get_gpu_memory_usage(gpu_id)
386
  with open(log_file_path, 'a') as log_file:
387
+ log_file.write(
388
+ f"GPU {gpu_id} - Batch size: {batch_size}, VRAM usage: {allocated / 1024 ** 3:.2f}GB / {total / 1024 ** 3:.2f}GB\n")
389
  # If successful and OOM hasn't occurred yet, increase batch size
390
  if not oom_occurred:
391
  batch_size += 1
 
398
  log_file.write(f"OOM error occurred. Reducing batch size to {batch_size}\n")
399
  torch.cuda.empty_cache()
400
  continue
401
+
402
  # After processing each batch, move temp JSONL to main JSONL
403
  if os.path.exists(temp_jsonl):
404
  with open(temp_jsonl, 'r') as temp, open(main_jsonl, 'a') as main:
405
  shutil.copyfileobj(temp, main)
406
  os.remove(temp_jsonl)
407
+
408
  # Clear GPU cache after each batch
409
  if gpu_id >= 0:
410
  torch.cuda.empty_cache()
411
+
412
  success_count = sum(1 for _, status, _ in results if status == "Success")
413
  timeout_count = sum(1 for _, status, _ in results if status == "Timeout")
414
  error_count = len(results) - success_count - timeout_count
415
+
416
  with open(log_file_path, 'a') as log_file:
417
+ log_file.write(
418
+ f"Processed {len(results)} PDFs. {success_count} succeeded, {timeout_count} timed out, {error_count} failed.\n")
419
+
420
  with open(os.path.join(output_folder, 'processing_summary.txt'), 'w') as summary:
421
  summary.write(f"Total PDFs processed: {len(results)}\n")
422
  summary.write(f"Successful: {success_count}\n")
 
426
  for pdf, status, _ in [result for result in results if result[1] != "Success"]:
427
  summary.write(f" - {pdf}: {status}\n")
428
 
429
+
430
  if __name__ == '__main__':
431
  main()