File size: 3,051 Bytes
d4ff93a
 
 
 
 
 
4def4a0
d4ff93a
 
 
 
 
 
 
 
 
 
 
7fc9b26
d4ff93a
 
 
7fc9b26
 
 
d4ff93a
7fc9b26
 
d4ff93a
7fc9b26
 
 
 
 
 
 
d4ff93a
 
 
7fc9b26
 
 
 
 
d4ff93a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad1014c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import gradio as gr
from transformers import AutoProcessor, AutoModelForCausalLM
from pdf2image import convert_from_path
import base64
import io
from PIL import Image

# Load the OCR model and processor from Hugging Face
processor = AutoProcessor.from_pretrained("allenai/olmOCR-7B-0225-preview")
model = AutoModelForCausalLM.from_pretrained("allenai/olmOCR-7B-0225-preview")

def process_pdf(pdf_file):
 """
 Process the uploaded PDF file, extract text from each page, and generate HTML
 to display each page's image and text with copy buttons.
 """
 # Check if a PDF file was uploaded
 if pdf_file is None:
 return "<p>Please upload a PDF file.</p>"
 
 # Convert PDF to images
 try:
 pages = convert_from_path(pdf_file.name)
 except Exception as e:
 return f"<p>Error converting PDF to images: {str(e)}</p>"
 
 # Start building the HTML output
 html = '<div><button onclick="copyAll()" style="margin-bottom: 10px;">Copy All</button></div><div id="pages">'
 
 # Process each page
 for i, page in enumerate(pages):
 # Convert the page image to base64 for embedding in HTML
 buffered = io.BytesIO()
 page.save(buffered, format="PNG")
 img_str = base64.b64encode(buffered.getvalue()).decode()
 img_data = f"data:image/png;base64,{img_str}"
 
 # Extract text from the page using the OCR model
 try:
 inputs = processor(text="Extract the text from this image.", images=page, return_tensors="pt")
 outputs = model.generate(**inputs)
 text = processor.decode(outputs[0], skip_special_tokens=True)
 except Exception as e:
 text = f"Error extracting text: {str(e)}"
 
 # Generate HTML for this page's section
 textarea_id = f"text{i+1}"
 html += f'''
 <div class="page" style="margin-bottom: 20px; border-bottom: 1px solid #ccc; padding-bottom: 20px;">
 <h3>Page {i+1}</h3>
 <div style="display: flex; align-items: flex-start;">
 <img src="{img_data}" alt="Page {i+1}" style="max-width: 300px; margin-right: 20px;">
 <div style="flex-grow: 1;">
 <textarea id="{textarea_id}" rows="10" style="width: 100%;">{text}</textarea>
 <button onclick="copyText('{textarea_id}')" style="margin-top: 5px;">Copy</button>
 </div>
 </div>
 </div>
 '''
 
 # Close the pages div and add JavaScript for copy functionality
 html += '</div>'
 html += '''
 <script>
 function copyText(id) {
 var text = document.getElementById(id);
 text.select();
 document.execCommand("copy");
 }
 function copyAll() {
 var texts = document.querySelectorAll("#pages textarea");
 var allText = Array.from(texts).map(t => t.value).join("\\n\\n");
 navigator.clipboard.writeText(allText);
 }
 </script>
 '''
 return html

# Define the Gradio interface
with gr.Blocks(title="PDF Text Extractor") as demo:
 gr.Markdown("# PDF Text Extractor")
 gr.Markdown("Upload a PDF file and click 'Extract Text' to see each page's image and extracted text.")
 with gr.Row():
 pdf_input = gr.File(label="Upload PDF", file_types=[".pdf"])
 submit_btn = gr.Button("Extract Text")
 output_html = gr.HTML()
 submit_btn.click(fn=process_pdf, inputs=pdf_input, outputs=output_html)

# Launch the interface
demo.launch()