Spaces:
Sleeping
Sleeping
Commit
·
28b0132
1
Parent(s):
d36bbc5
initial commit
Browse files- .dockerignore +2 -0
- .gitattributes +6 -0
- .gitignore +9 -0
- Dockerfile +25 -0
- Ubuntu-Regular.ttf +3 -0
- app.py +101 -0
- cool-background.png +3 -0
- custom/kz_latest.pt +3 -0
- nohup.out +21 -0
- packages.txt +4 -0
- requirements.txt +10 -0
- samples/kz_curved.jpeg +3 -0
- samples/kz_curved_difficult.jpeg +3 -0
- samples/kz_straigt_1.PNG +3 -0
- samples/kz_straigt_2.PNG +3 -0
- samples/kz_straigt_3.PNG +3 -0
- samples/kz_yassaui1.jpg +3 -0
- samples/kz_yassaui2.jpg +3 -0
- samples/ru_curved1.jpg +3 -0
- samples/ru_curved2.jpg +3 -0
- utils.py +575 -0
.dockerignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.venv/
|
2 |
+
samples/
|
.gitattributes
CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.pdf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
*.ttf filter=lfs diff=lfs merge=lfs -text
|
39 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
40 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
41 |
+
*.PNG filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.venv/
|
2 |
+
__pycache__/
|
3 |
+
*.pyc
|
4 |
+
*.pyo
|
5 |
+
*.pyd
|
6 |
+
*.pyw
|
7 |
+
*.pyz
|
8 |
+
*.pywz
|
9 |
+
venv/
|
Dockerfile
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10-slim
|
2 |
+
|
3 |
+
# Set the working directory
|
4 |
+
WORKDIR /app
|
5 |
+
|
6 |
+
# Install system dependencies
|
7 |
+
COPY packages.txt /app/packages.txt
|
8 |
+
RUN apt-get update \
|
9 |
+
&& xargs -a /app/packages.txt apt-get install -y \
|
10 |
+
&& apt-get clean \
|
11 |
+
&& rm -rf /var/lib/apt/lists/*
|
12 |
+
|
13 |
+
# Install Python dependencies
|
14 |
+
RUN pip install --no-cache-dir python-doctr[torch]
|
15 |
+
COPY requirements.txt /app/requirements.txt
|
16 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
17 |
+
|
18 |
+
# Copy application files
|
19 |
+
COPY . /app
|
20 |
+
|
21 |
+
# Expose Streamlit default port
|
22 |
+
EXPOSE 8501
|
23 |
+
|
24 |
+
# Run Streamlit app
|
25 |
+
CMD ["streamlit", "run", "app.py"]
|
Ubuntu-Regular.ttf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3128df86a31805618436d0ae5651ba4285d0c9de0a39057d025f64ee33bceb64
|
3 |
+
size 351884
|
app.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from doctr.io import DocumentFile
|
3 |
+
from reportlab.pdfbase import pdfmetrics
|
4 |
+
from reportlab.pdfbase.ttfonts import TTFont
|
5 |
+
from PIL import Image
|
6 |
+
import pytesseract
|
7 |
+
|
8 |
+
import utils
|
9 |
+
|
10 |
+
# Register a Unicode-compatible font
|
11 |
+
fontname = "Ubuntu"
|
12 |
+
fontpath = "./Ubuntu-Regular.ttf"
|
13 |
+
reco_arch = "kz_latest.pt"
|
14 |
+
pdfmetrics.registerFont(TTFont(fontname, fontpath))
|
15 |
+
|
16 |
+
|
17 |
+
use_pytesseract = True
|
18 |
+
def main():
|
19 |
+
"""Построение интерфейса Streamlit"""
|
20 |
+
# Широкий режим - must be first!
|
21 |
+
st.set_page_config(layout="wide")
|
22 |
+
|
23 |
+
# Hide Streamlit elements and set background
|
24 |
+
hide_st_style = """
|
25 |
+
<style>
|
26 |
+
#MainMenu {visibility: hidden;}
|
27 |
+
footer {visibility: hidden;}
|
28 |
+
header {visibility: hidden;}
|
29 |
+
|
30 |
+
</style>
|
31 |
+
"""
|
32 |
+
st.markdown(hide_st_style, unsafe_allow_html=True)
|
33 |
+
|
34 |
+
# Дизайн интерфейса
|
35 |
+
st.title("Қазақша жазылған құжаттардың OCR")
|
36 |
+
|
37 |
+
# Move file upload to top
|
38 |
+
uploaded_file = st.file_uploader(
|
39 |
+
"Файлдарды жүктеңіз", type=["pdf", "png", "jpeg", "jpg"]
|
40 |
+
)
|
41 |
+
|
42 |
+
# Новая строка
|
43 |
+
st.write("\n")
|
44 |
+
|
45 |
+
# Установка колонок
|
46 |
+
cols = st.columns((1, 1))
|
47 |
+
cols[0].subheader("Бастапқы бет")
|
48 |
+
cols[1].subheader("Мәтіннің біріктірілген нұсқасы")
|
49 |
+
|
50 |
+
if uploaded_file is not None:
|
51 |
+
if uploaded_file.name.endswith(".pdf"):
|
52 |
+
doc = DocumentFile.from_pdf(uploaded_file.read())
|
53 |
+
else:
|
54 |
+
doc = DocumentFile.from_images(uploaded_file.read())
|
55 |
+
page_idx = (
|
56 |
+
st.selectbox("Бетті таңдау", [idx + 1 for idx in range(len(doc))])
|
57 |
+
- 1
|
58 |
+
)
|
59 |
+
page = doc[page_idx]
|
60 |
+
cols[0].image(page)
|
61 |
+
|
62 |
+
with st.spinner("Модельді жүктеу..."):
|
63 |
+
predictor = utils.get_ocr_predictor(
|
64 |
+
reco_arch=reco_arch,
|
65 |
+
)
|
66 |
+
|
67 |
+
with st.spinner("Талдау..."):
|
68 |
+
out = predictor([page])
|
69 |
+
page_export = out.pages[0].export()
|
70 |
+
|
71 |
+
(coordinates, _, _) = utils.page_to_coordinates(page_export)
|
72 |
+
|
73 |
+
# Пропуск изображения через модель
|
74 |
+
'''
|
75 |
+
|
76 |
+
boxes_with_labels = utils.draw_boxes_with_labels(
|
77 |
+
page, coordinates, font_path="./Ubuntu-Regular.ttf"
|
78 |
+
)
|
79 |
+
cols[1].image(boxes_with_labels)
|
80 |
+
'''
|
81 |
+
# Отображение объединенного текста
|
82 |
+
|
83 |
+
final_text = utils.ocr_to_txt(coordinates)
|
84 |
+
cols[1].text_area("Мәтіннің біріктірілген нұсқасы:", final_text, height=500)
|
85 |
+
|
86 |
+
# Use pytesseract if checkbox is selected
|
87 |
+
if use_pytesseract:
|
88 |
+
if uploaded_file.name.lower().endswith(('.png', '.jpg', '.jpeg')):
|
89 |
+
image = Image.open(uploaded_file)
|
90 |
+
ocr_text = pytesseract.image_to_string(image, lang="kaz+eng+rus")
|
91 |
+
|
92 |
+
# Create a collapsible block for OCR results
|
93 |
+
with st.expander("OCR нәтижесі (pytesseract)"):
|
94 |
+
st.text_area("Тексеру нәтижесі:", ocr_text, height=300)
|
95 |
+
else:
|
96 |
+
st.warning("OCR тек суреттер үшін қол жетімді.")
|
97 |
+
|
98 |
+
|
99 |
+
|
100 |
+
if __name__ == "__main__":
|
101 |
+
main()
|
cool-background.png
ADDED
![]() |
Git LFS Details
|
custom/kz_latest.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2ae55be255163e84fbfb1598d47ddbebcb5699b72bdc3f94e4e5d6b34f27c955
|
3 |
+
size 95657854
|
nohup.out
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
Collecting usage statistics. To deactivate, set browser.gatherUsageStats to false.
|
3 |
+
|
4 |
+
|
5 |
+
You can now view your Streamlit app in your browser.
|
6 |
+
|
7 |
+
Local URL: http://localhost:8501
|
8 |
+
Network URL: http://192.168.10.30:8501
|
9 |
+
External URL: http://89.250.84.146:8501
|
10 |
+
|
11 |
+
2025-02-11 11:57:22.523 Examining the path of torch.classes raised: Tried to instantiate class '__path__._path', but it does not exist! Ensure that it is registered via torch::class_
|
12 |
+
2025-02-11 11:58:14.941 Examining the path of torch.classes raised: Tried to instantiate class '__path__._path', but it does not exist! Ensure that it is registered via torch::class_
|
13 |
+
2025-02-11 12:01:39.450 Examining the path of torch.classes raised: Tried to instantiate class '__path__._path', but it does not exist! Ensure that it is registered via torch::class_
|
14 |
+
2025-02-11 12:08:21.887 Examining the path of torch.classes raised: Tried to instantiate class '__path__._path', but it does not exist! Ensure that it is registered via torch::class_
|
15 |
+
2025-02-11 12:15:45.396 Examining the path of torch.classes raised: Tried to instantiate class '__path__._path', but it does not exist! Ensure that it is registered via torch::class_
|
16 |
+
2025-02-11 12:26:43.882 Examining the path of torch.classes raised: Tried to instantiate class '__path__._path', but it does not exist! Ensure that it is registered via torch::class_
|
17 |
+
2025-02-11 12:32:15.389 Examining the path of torch.classes raised: Tried to instantiate class '__path__._path', but it does not exist! Ensure that it is registered via torch::class_
|
18 |
+
2025-02-11 12:33:50.702 Examining the path of torch.classes raised: Tried to instantiate class '__path__._path', but it does not exist! Ensure that it is registered via torch::class_
|
19 |
+
2025-02-11 12:35:27.654 Examining the path of torch.classes raised: Tried to instantiate class '__path__._path', but it does not exist! Ensure that it is registered via torch::class_
|
20 |
+
2025-02-11 12:40:13.172 Examining the path of torch.classes raised: Tried to instantiate class '__path__._path', but it does not exist! Ensure that it is registered via torch::class_
|
21 |
+
Stopping...
|
packages.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
tesseract-ocr
|
2 |
+
tesseract-ocr-kaz
|
3 |
+
tesseract-ocr-rus
|
4 |
+
tesseract-ocr-eng
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
python-doctr[torch]
|
2 |
+
streamlit==1.39.0
|
3 |
+
matplotlib==3.9.0
|
4 |
+
numpy==1.26.4
|
5 |
+
pandas==2.2.2
|
6 |
+
reportlab==4.2.5
|
7 |
+
pillow==10.3.0
|
8 |
+
scikit-learn==1.5.2
|
9 |
+
opencv-python-headless==4.10.0.84
|
10 |
+
pytesseract==0.3.13
|
samples/kz_curved.jpeg
ADDED
![]() |
Git LFS Details
|
samples/kz_curved_difficult.jpeg
ADDED
![]() |
Git LFS Details
|
samples/kz_straigt_1.PNG
ADDED
|
Git LFS Details
|
samples/kz_straigt_2.PNG
ADDED
|
Git LFS Details
|
samples/kz_straigt_3.PNG
ADDED
|
Git LFS Details
|
samples/kz_yassaui1.jpg
ADDED
![]() |
Git LFS Details
|
samples/kz_yassaui2.jpg
ADDED
![]() |
Git LFS Details
|
samples/ru_curved1.jpg
ADDED
![]() |
Git LFS Details
|
samples/ru_curved2.jpg
ADDED
![]() |
Git LFS Details
|
utils.py
ADDED
@@ -0,0 +1,575 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import string
|
3 |
+
from collections import defaultdict
|
4 |
+
from typing import Any, List, Union
|
5 |
+
|
6 |
+
import cv2
|
7 |
+
import numpy as np
|
8 |
+
import torch
|
9 |
+
from doctr.io.elements import Document
|
10 |
+
from doctr.models import parseq
|
11 |
+
from doctr.models._utils import get_language
|
12 |
+
from doctr.models.detection.predictor import DetectionPredictor
|
13 |
+
from doctr.models.detection.zoo import detection_predictor
|
14 |
+
from doctr.models.predictor.base import _OCRPredictor
|
15 |
+
from doctr.models.recognition.predictor import RecognitionPredictor
|
16 |
+
from doctr.models.recognition.zoo import recognition_predictor
|
17 |
+
from doctr.utils.geometry import detach_scores
|
18 |
+
from PIL import Image, ImageDraw, ImageFont
|
19 |
+
from sklearn.cluster import DBSCAN
|
20 |
+
from sklearn.preprocessing import StandardScaler
|
21 |
+
from torch import nn
|
22 |
+
|
23 |
+
confidence_threshold = 0.75
|
24 |
+
|
25 |
+
reco_arch = "printed_v19.pt"
|
26 |
+
det_arch = "fast_base"
|
27 |
+
|
28 |
+
# Configure logging
|
29 |
+
afterword_symbols = "!?.,:;"
|
30 |
+
numbers = "0123456789"
|
31 |
+
other_symbols = string.punctuation + "«»…£€¥¢฿₸₽№°—"
|
32 |
+
space_symbol = " "
|
33 |
+
kazakh_letters = "АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯЁабвгдежзийклмнопрстуфхцчшщъыьэюяёӘҒҚҢӨҰҮІҺәғқңөұүіһ"
|
34 |
+
english_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
35 |
+
all_letters = kazakh_letters + english_letters
|
36 |
+
all_symbols = numbers + other_symbols + space_symbol + all_letters
|
37 |
+
|
38 |
+
|
39 |
+
def get_ocr_predictor(
|
40 |
+
det_arch: str = det_arch,
|
41 |
+
reco_arch: str = reco_arch,
|
42 |
+
pretrained=True,
|
43 |
+
pretrained_backbone: bool = True,
|
44 |
+
assume_straight_pages: bool = False,
|
45 |
+
preserve_aspect_ratio: bool = True,
|
46 |
+
symmetric_pad: bool = True,
|
47 |
+
det_bs: int = 2,
|
48 |
+
reco_bs: int = 128,
|
49 |
+
detect_orientation: bool = False,
|
50 |
+
straighten_pages: bool = False,
|
51 |
+
detect_language: bool = False,
|
52 |
+
bin_thresh: float = 0.3,
|
53 |
+
box_thresh: float = 0.3,
|
54 |
+
):
|
55 |
+
device = "cpu"
|
56 |
+
if torch.backends.mps.is_available():
|
57 |
+
device = "mps"
|
58 |
+
elif torch.cuda.is_available():
|
59 |
+
device = "cuda"
|
60 |
+
else:
|
61 |
+
device = "cpu"
|
62 |
+
logging.info(f"Using device: {device}")
|
63 |
+
|
64 |
+
device = torch.device(device)
|
65 |
+
|
66 |
+
# Initialize predictor
|
67 |
+
logging.info(f"Initializing predictor with device: {device}")
|
68 |
+
reco_model = parseq(pretrained=False, pretrained_backbone=False, vocab=all_symbols)
|
69 |
+
reco_model.to(device)
|
70 |
+
reco_params = torch.load(f"./custom/{reco_arch}", map_location=device)
|
71 |
+
reco_model.load_state_dict(reco_params)
|
72 |
+
|
73 |
+
det_predictor = detection_predictor(
|
74 |
+
det_arch,
|
75 |
+
pretrained=pretrained,
|
76 |
+
pretrained_backbone=pretrained_backbone,
|
77 |
+
batch_size=det_bs,
|
78 |
+
assume_straight_pages=assume_straight_pages,
|
79 |
+
preserve_aspect_ratio=preserve_aspect_ratio,
|
80 |
+
symmetric_pad=symmetric_pad,
|
81 |
+
)
|
82 |
+
|
83 |
+
# Recognition
|
84 |
+
reco_predictor = recognition_predictor(
|
85 |
+
reco_model,
|
86 |
+
pretrained=pretrained,
|
87 |
+
pretrained_backbone=pretrained_backbone,
|
88 |
+
batch_size=reco_bs,
|
89 |
+
)
|
90 |
+
|
91 |
+
predictor = OCRPredictor(
|
92 |
+
det_predictor,
|
93 |
+
reco_predictor,
|
94 |
+
assume_straight_pages=assume_straight_pages,
|
95 |
+
preserve_aspect_ratio=preserve_aspect_ratio,
|
96 |
+
symmetric_pad=symmetric_pad,
|
97 |
+
detect_orientation=detect_orientation,
|
98 |
+
straighten_pages=straighten_pages,
|
99 |
+
detect_language=detect_language,
|
100 |
+
)
|
101 |
+
|
102 |
+
predictor.det_predictor.model.postprocessor.bin_thresh = bin_thresh
|
103 |
+
predictor.det_predictor.model.postprocessor.box_thresh = box_thresh
|
104 |
+
predictor.add_hook(CustomHook())
|
105 |
+
|
106 |
+
return predictor
|
107 |
+
|
108 |
+
|
109 |
+
class OCRPredictor(nn.Module, _OCRPredictor):
|
110 |
+
"""Implements an object able to localize and identify text elements in a set of documents
|
111 |
+
|
112 |
+
Args:
|
113 |
+
----
|
114 |
+
det_predictor: detection module
|
115 |
+
reco_predictor: recognition module
|
116 |
+
assume_straight_pages: if True, speeds up the inference by assuming you only pass straight pages
|
117 |
+
without rotated textual elements.
|
118 |
+
straighten_pages: if True, estimates the page general orientation based on the median line orientation.
|
119 |
+
Then, rotates page before passing it to the deep learning modules. The final predictions will be remapped
|
120 |
+
accordingly. Doing so will improve performances for documents with page-uniform rotations.
|
121 |
+
detect_orientation: if True, the estimated general page orientation will be added to the predictions for each
|
122 |
+
page. Doing so will slightly deteriorate the overall latency.
|
123 |
+
detect_language: if True, the language prediction will be added to the predictions for each
|
124 |
+
page. Doing so will slightly deteriorate the overall latency.
|
125 |
+
**kwargs: keyword args of `DocumentBuilder`
|
126 |
+
"""
|
127 |
+
|
128 |
+
def __init__(
|
129 |
+
self,
|
130 |
+
det_predictor: DetectionPredictor,
|
131 |
+
reco_predictor: RecognitionPredictor,
|
132 |
+
assume_straight_pages: bool = True,
|
133 |
+
straighten_pages: bool = False,
|
134 |
+
preserve_aspect_ratio: bool = True,
|
135 |
+
symmetric_pad: bool = True,
|
136 |
+
detect_orientation: bool = False,
|
137 |
+
detect_language: bool = False,
|
138 |
+
**kwargs: Any,
|
139 |
+
) -> None:
|
140 |
+
nn.Module.__init__(self)
|
141 |
+
self.det_predictor = det_predictor.eval() # type: ignore[attr-defined]
|
142 |
+
self.reco_predictor = reco_predictor.eval() # type: ignore[attr-defined]
|
143 |
+
_OCRPredictor.__init__(
|
144 |
+
self,
|
145 |
+
assume_straight_pages,
|
146 |
+
straighten_pages,
|
147 |
+
preserve_aspect_ratio,
|
148 |
+
symmetric_pad,
|
149 |
+
detect_orientation,
|
150 |
+
**kwargs,
|
151 |
+
)
|
152 |
+
self.detect_orientation = detect_orientation
|
153 |
+
self.detect_language = detect_language
|
154 |
+
|
155 |
+
@torch.inference_mode()
|
156 |
+
def forward(
|
157 |
+
self,
|
158 |
+
pages: List[Union[np.ndarray, torch.Tensor]],
|
159 |
+
**kwargs: Any,
|
160 |
+
) -> Document:
|
161 |
+
# Dimension check
|
162 |
+
if any(page.ndim != 3 for page in pages):
|
163 |
+
raise ValueError(
|
164 |
+
"incorrect input shape: all pages are expected to be multi-channel 2D images."
|
165 |
+
)
|
166 |
+
|
167 |
+
origin_page_shapes = [
|
168 |
+
page.shape[:2] if isinstance(page, np.ndarray) else page.shape[-2:]
|
169 |
+
for page in pages
|
170 |
+
]
|
171 |
+
|
172 |
+
# Localize text elements
|
173 |
+
loc_preds, out_maps = self.det_predictor(pages, return_maps=True, **kwargs)
|
174 |
+
|
175 |
+
# Detect document rotation and rotate pages
|
176 |
+
seg_maps = [
|
177 |
+
np.where(
|
178 |
+
out_map > getattr(self.det_predictor.model.postprocessor, "bin_thresh"),
|
179 |
+
255,
|
180 |
+
0,
|
181 |
+
).astype(np.uint8)
|
182 |
+
for out_map in out_maps
|
183 |
+
]
|
184 |
+
if self.detect_orientation:
|
185 |
+
general_pages_orientations, origin_pages_orientations = self._get_orientations(pages, seg_maps) # type: ignore[arg-type]
|
186 |
+
orientations = [
|
187 |
+
{"value": orientation_page, "confidence": None}
|
188 |
+
for orientation_page in origin_pages_orientations
|
189 |
+
]
|
190 |
+
else:
|
191 |
+
orientations = None
|
192 |
+
general_pages_orientations = None
|
193 |
+
origin_pages_orientations = None
|
194 |
+
if self.straighten_pages:
|
195 |
+
pages = self._straighten_pages(pages, seg_maps, general_pages_orientations, origin_pages_orientations) # type: ignore
|
196 |
+
# Forward again to get predictions on straight pages
|
197 |
+
loc_preds = self.det_predictor(pages, **kwargs)
|
198 |
+
|
199 |
+
assert all(
|
200 |
+
len(loc_pred) == 1 for loc_pred in loc_preds
|
201 |
+
), "Detection Model in ocr_predictor should output only one class"
|
202 |
+
|
203 |
+
loc_preds = [list(loc_pred.values())[0] for loc_pred in loc_preds]
|
204 |
+
# Detach objectness scores from loc_preds
|
205 |
+
loc_preds, objectness_scores = detach_scores(loc_preds)
|
206 |
+
# Check whether crop mode should be switched to channels first
|
207 |
+
channels_last = len(pages) == 0 or isinstance(pages[0], np.ndarray)
|
208 |
+
|
209 |
+
# Apply hooks to loc_preds if any
|
210 |
+
for hook in self.hooks:
|
211 |
+
loc_preds = hook(loc_preds)
|
212 |
+
|
213 |
+
# Crop images
|
214 |
+
crops, loc_preds = self._prepare_crops(
|
215 |
+
pages, # type: ignore[arg-type]
|
216 |
+
loc_preds,
|
217 |
+
channels_last=channels_last,
|
218 |
+
assume_straight_pages=self.assume_straight_pages,
|
219 |
+
)
|
220 |
+
# Rectify crop orientation and get crop orientation predictions
|
221 |
+
crop_orientations: Any = []
|
222 |
+
# save crops to ./crops
|
223 |
+
# os.makedirs("./crops", exist_ok=True)
|
224 |
+
# for i, crop in enumerate(crops[0]):
|
225 |
+
# Image.fromarray(crop).save(f"./crops/{i}.png")
|
226 |
+
|
227 |
+
# if not self.assume_straight_pages:
|
228 |
+
# crops, loc_preds, _crop_orientations = self._rectify_crops(crops, loc_preds)
|
229 |
+
# crop_orientations = [
|
230 |
+
# {"value": orientation[0], "confidence": orientation[1]} for orientation in _crop_orientations
|
231 |
+
# ]
|
232 |
+
|
233 |
+
# Identify character sequences
|
234 |
+
word_preds = self.reco_predictor(
|
235 |
+
[crop for page_crops in crops for crop in page_crops], **kwargs
|
236 |
+
)
|
237 |
+
if not crop_orientations:
|
238 |
+
crop_orientations = [{"value": 0, "confidence": None} for _ in word_preds]
|
239 |
+
|
240 |
+
boxes, text_preds, crop_orientations = self._process_predictions(
|
241 |
+
loc_preds, word_preds, crop_orientations
|
242 |
+
)
|
243 |
+
|
244 |
+
if self.detect_language:
|
245 |
+
languages = [
|
246 |
+
get_language(" ".join([item[0] for item in text_pred]))
|
247 |
+
for text_pred in text_preds
|
248 |
+
]
|
249 |
+
languages_dict = [
|
250 |
+
{"value": lang[0], "confidence": lang[1]} for lang in languages
|
251 |
+
]
|
252 |
+
else:
|
253 |
+
languages_dict = None
|
254 |
+
|
255 |
+
out = self.doc_builder(
|
256 |
+
pages, # type: ignore[arg-type]
|
257 |
+
boxes,
|
258 |
+
objectness_scores,
|
259 |
+
text_preds,
|
260 |
+
origin_page_shapes, # type: ignore[arg-type]
|
261 |
+
crop_orientations,
|
262 |
+
orientations,
|
263 |
+
languages_dict,
|
264 |
+
)
|
265 |
+
return out
|
266 |
+
|
267 |
+
|
268 |
+
class CustomHook:
|
269 |
+
def __call__(self, loc_preds):
|
270 |
+
# Manipulate the location predictions here
|
271 |
+
# 1. The outpout structure needs to be the same as the input location predictions
|
272 |
+
# 2. Be aware that the coordinates are relative and needs to be between 0 and 1
|
273 |
+
|
274 |
+
# return np.array([self.order_bbox_points(point) for loc_pred in loc_preds for point in loc_pred ])
|
275 |
+
# iterate over each page and each box
|
276 |
+
answer = []
|
277 |
+
for page_idx, page_boxes in enumerate(loc_preds):
|
278 |
+
bboxes = []
|
279 |
+
for box_idx, box in enumerate(page_boxes):
|
280 |
+
box = self.order_bbox_points(box)
|
281 |
+
bboxes.append(box)
|
282 |
+
answer.append(bboxes)
|
283 |
+
return np.array(answer)
|
284 |
+
|
285 |
+
def order_bbox_points(self, points):
|
286 |
+
"""
|
287 |
+
Orders a list of four (x, y) points in the following order:
|
288 |
+
top-left, top-right, bottom-right, bottom-left.
|
289 |
+
|
290 |
+
Args:
|
291 |
+
points (list of tuples): List of four (x, y) tuples.
|
292 |
+
|
293 |
+
Returns:
|
294 |
+
list of tuples: Ordered list of four (x, y) tuples.
|
295 |
+
"""
|
296 |
+
if len(points) != 4:
|
297 |
+
raise ValueError(
|
298 |
+
"Exactly four points are required to define a quadrilateral."
|
299 |
+
)
|
300 |
+
|
301 |
+
# Convert points to NumPy array for easier manipulation
|
302 |
+
pts = np.array(points)
|
303 |
+
|
304 |
+
# Compute the sum and difference of the points
|
305 |
+
sum_pts = pts.sum(axis=1)
|
306 |
+
diff_pts = np.diff(pts, axis=1).flatten()
|
307 |
+
|
308 |
+
# Initialize ordered points list
|
309 |
+
ordered = [None] * 4
|
310 |
+
|
311 |
+
# Top-Left point has the smallest sum
|
312 |
+
ordered[0] = tuple(pts[np.argmin(sum_pts)])
|
313 |
+
|
314 |
+
# Bottom-Right point has the largest sum
|
315 |
+
ordered[2] = tuple(pts[np.argmax(sum_pts)])
|
316 |
+
|
317 |
+
# Top-Right point has the smallest difference
|
318 |
+
ordered[1] = tuple(pts[np.argmin(diff_pts)])
|
319 |
+
|
320 |
+
# Bottom-Left point has the largest difference
|
321 |
+
ordered[3] = tuple(pts[np.argmax(diff_pts)])
|
322 |
+
|
323 |
+
return ordered
|
324 |
+
|
325 |
+
|
326 |
+
def geometry_to_coordinates(geometry, img_width, img_height):
|
327 |
+
if len(geometry) == 2:
|
328 |
+
(x0_rel, y0_rel), (x1_rel, y1_rel) = geometry
|
329 |
+
x0 = int(x0_rel * img_width)
|
330 |
+
y0 = int(y0_rel * img_height)
|
331 |
+
x1 = int(x1_rel * img_width)
|
332 |
+
y1 = int(y1_rel * img_height)
|
333 |
+
# Bounding box with four corners
|
334 |
+
all_four = [[x0, y0], [x1, y0], [x1, y1], [x0, y1]]
|
335 |
+
return all_four
|
336 |
+
else:
|
337 |
+
# Bounding box with four corners
|
338 |
+
all_four = [[int(x * img_width), int(y * img_height)] for x, y in geometry]
|
339 |
+
return all_four
|
340 |
+
|
341 |
+
|
342 |
+
def page_to_coordinates(page_export):
|
343 |
+
coordinates = []
|
344 |
+
img_height, img_width = page_export["dimensions"]
|
345 |
+
for block in page_export["blocks"]:
|
346 |
+
for line in block["lines"]:
|
347 |
+
for word in line["words"]:
|
348 |
+
if (
|
349 |
+
word["confidence"] < confidence_threshold
|
350 |
+
and len(word["value"].strip()) > 1
|
351 |
+
):
|
352 |
+
logging.warning(
|
353 |
+
f"Skipping word with low confidence: {word['value']} confidence {word['confidence']}"
|
354 |
+
)
|
355 |
+
continue
|
356 |
+
all_four = geometry_to_coordinates(
|
357 |
+
word["geometry"], img_width, img_height
|
358 |
+
)
|
359 |
+
coordinates.append((all_four, word["value"], word["confidence"]))
|
360 |
+
|
361 |
+
return (coordinates, img_width, img_height)
|
362 |
+
|
363 |
+
|
364 |
+
def draw_boxes_with_labels(image, coordinates, font_path):
|
365 |
+
"""Бастапқы суретке шекаралар үстіне кішкентай белгілерді қою.
|
366 |
+
|
367 |
+
Args:
|
368 |
+
image: Бастапқы сурет (numpy массиві).
|
369 |
+
out: predictor([image]) нәтижесі.
|
370 |
+
font_path: TrueType қаріп файлының жолы.
|
371 |
+
|
372 |
+
Returns:
|
373 |
+
Шекаралар және белгілер қойылған сурет.
|
374 |
+
"""
|
375 |
+
|
376 |
+
# Суретті PIL форматына түрлендіреміз
|
377 |
+
img_with_boxes = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
378 |
+
img_pil = Image.fromarray(img_with_boxes)
|
379 |
+
draw = ImageDraw.Draw(img_pil)
|
380 |
+
|
381 |
+
for coords, word, score in coordinates:
|
382 |
+
# poligon
|
383 |
+
coords = [(x, y) for x, y in coords]
|
384 |
+
text_x, text_y = (
|
385 |
+
min(coords, key=lambda x: x[0])[0],
|
386 |
+
min(coords, key=lambda x: x[1])[1],
|
387 |
+
)
|
388 |
+
draw.polygon(coords, outline=(0, 255, 0, 125), width=1)
|
389 |
+
font = ImageFont.truetype(font_path, 10)
|
390 |
+
draw.text((text_x, max(text_y - 10, 0)), word, font=font, fill=(255, 0, 0))
|
391 |
+
|
392 |
+
# Суретті қайтадан OpenCV форматына түрлендіреміз
|
393 |
+
img_with_boxes = cv2.cvtColor(np.array(img_pil), cv2.COLOR_RGB2BGR)
|
394 |
+
|
395 |
+
# Суретті қайтарамыз
|
396 |
+
return img_with_boxes
|
397 |
+
|
398 |
+
|
399 |
+
def generate_line_points(bbox, num_points=10):
|
400 |
+
"""
|
401 |
+
Generates multiple points along the line connecting the left and right centers of a bounding box.
|
402 |
+
|
403 |
+
Parameters:
|
404 |
+
- bbox: List of four points [[x0, y0], [x1, y1], [x2, y2], [x3, y3]]
|
405 |
+
in the order: TopLeft, TopRight, BottomRight, BottomLeft.
|
406 |
+
- num_points: Number of points to generate along the line.
|
407 |
+
|
408 |
+
Returns:
|
409 |
+
- List of (x, y) tuples.
|
410 |
+
"""
|
411 |
+
# Calculate left center (midpoint of TopLeft and BottomLeft)
|
412 |
+
left_center_x = (bbox[0][0] + bbox[3][0]) / 2
|
413 |
+
left_center_y = (bbox[0][1] + bbox[3][1]) / 2
|
414 |
+
|
415 |
+
# Calculate right center (midpoint of TopRight and BottomRight)
|
416 |
+
right_center_x = (bbox[1][0] + bbox[2][0]) / 2
|
417 |
+
right_center_y = (bbox[1][1] + bbox[2][1]) / 2
|
418 |
+
|
419 |
+
# Generate linearly spaced points between left center and right center
|
420 |
+
x_values = np.linspace(left_center_x, right_center_x, num_points)
|
421 |
+
y_values = np.linspace(left_center_y, right_center_y, num_points)
|
422 |
+
|
423 |
+
points = list(zip(x_values, y_values))
|
424 |
+
return points
|
425 |
+
|
426 |
+
|
427 |
+
def ocr_to_txt(coordinates):
|
428 |
+
"""
|
429 |
+
Converts OCR output to a structured text file with lines using multiple points along connecting lines.
|
430 |
+
Inserts empty lines when there's significant vertical spacing between lines.
|
431 |
+
|
432 |
+
Parameters:
|
433 |
+
- coordinates: List of tuples containing bounding box coordinates, word value, and score.
|
434 |
+
Each tuple is (([[x0, y0], [x1, y1], [x2, y2], [x3, y3]]), word, score)
|
435 |
+
- img_width: Width of the image in pixels.
|
436 |
+
- img_height: Height of the image in pixels.
|
437 |
+
- output_file: Path to the output text file.
|
438 |
+
"""
|
439 |
+
# Step 1: Compute multiple points for each word
|
440 |
+
all_points = []
|
441 |
+
words = []
|
442 |
+
scaler = StandardScaler()
|
443 |
+
points_per_word = 25 # Number of points to generate per word
|
444 |
+
|
445 |
+
for bbox, word, score in coordinates:
|
446 |
+
points = generate_line_points(bbox, num_points=points_per_word)
|
447 |
+
all_points.extend(points)
|
448 |
+
words.append(
|
449 |
+
{
|
450 |
+
"bbox": bbox,
|
451 |
+
"word": word,
|
452 |
+
"score": score,
|
453 |
+
"points": points, # Store the multiple points
|
454 |
+
}
|
455 |
+
)
|
456 |
+
|
457 |
+
# Step 2: Scale the points
|
458 |
+
scaled_points = scaler.fit_transform(all_points)
|
459 |
+
scaled_points = [(c[0] / 5, c[1]) for c in scaled_points]
|
460 |
+
scaled_points = np.array(scaled_points)
|
461 |
+
|
462 |
+
# Step 3: Cluster points using DBSCAN
|
463 |
+
# Parameters for DBSCAN can be tuned based on the specific OCR output
|
464 |
+
# eps determines the maximum distance between two samples for them to be considered as in the same neighborhood
|
465 |
+
# min_samples is set to the number of points per word to ensure entire words are clustered together
|
466 |
+
db = DBSCAN(min_samples=2, eps=0.05).fit(scaled_points) # eps might need adjustment
|
467 |
+
labels = db.labels_
|
468 |
+
|
469 |
+
# Map each point to its cluster label
|
470 |
+
point_labels = labels.tolist()
|
471 |
+
|
472 |
+
# Step 4: Assign words to clusters based on their points
|
473 |
+
label_to_words = defaultdict(list)
|
474 |
+
current_point = 0 # To keep track of which point belongs to which word
|
475 |
+
|
476 |
+
for word in words:
|
477 |
+
word_labels = point_labels[current_point : current_point + points_per_word]
|
478 |
+
current_point += points_per_word
|
479 |
+
|
480 |
+
# Count the frequency of each label in the word's points
|
481 |
+
label_counts = defaultdict(int)
|
482 |
+
for lbl in word_labels:
|
483 |
+
label_counts[lbl] += 1
|
484 |
+
|
485 |
+
# Assign the word to the most frequent label
|
486 |
+
# If multiple labels have the same highest count, choose the smallest label (ignoring -1 for noise)
|
487 |
+
if label_counts:
|
488 |
+
# Exclude noise label (-1) when possible
|
489 |
+
filtered_labels = {k: v for k, v in label_counts.items() if k != -1}
|
490 |
+
if filtered_labels:
|
491 |
+
assigned_label = max(filtered_labels, key=filtered_labels.get)
|
492 |
+
else:
|
493 |
+
assigned_label = -1 # Assign to noise
|
494 |
+
label_to_words[assigned_label].append(word)
|
495 |
+
|
496 |
+
# Remove noise cluster if present
|
497 |
+
if -1 in label_to_words:
|
498 |
+
print(
|
499 |
+
f"Warning: {len(label_to_words[-1])} words assigned to noise cluster and will be ignored."
|
500 |
+
)
|
501 |
+
del label_to_words[-1]
|
502 |
+
|
503 |
+
# Step 5: Sort words within each line
|
504 |
+
sorted_lines = []
|
505 |
+
line_heights = [] # To store heights of each line for median calculation
|
506 |
+
line_y_bounds = [] # To store min and max y for each line
|
507 |
+
|
508 |
+
for label, line_words in label_to_words.items():
|
509 |
+
# Sort words based on their leftmost x-coordinate
|
510 |
+
line_words_sorted = sorted(
|
511 |
+
line_words, key=lambda w: min(point[0] for point in w["points"])
|
512 |
+
)
|
513 |
+
sorted_lines.append(line_words_sorted)
|
514 |
+
|
515 |
+
# Compute y-bounds for the line
|
516 |
+
y_values = []
|
517 |
+
for word in line_words_sorted:
|
518 |
+
y_coords = [point[1] for point in word["bbox"]]
|
519 |
+
y_min = min(y_coords)
|
520 |
+
y_max = max(y_coords)
|
521 |
+
y_values.append([y_min, y_max])
|
522 |
+
y_values = np.array(y_values)
|
523 |
+
# Compute the median y-coordinates for the line by sorting only with y_min
|
524 |
+
line_min_y_median = np.median(y_values[:, 0])
|
525 |
+
line_max_y_median = np.median(y_values[:, 1])
|
526 |
+
line_heights.append(line_max_y_median - line_min_y_median)
|
527 |
+
line_y_bounds.append((line_min_y_median, line_max_y_median))
|
528 |
+
|
529 |
+
# Step 6: Sort lines from top to bottom based on the average y-coordinate of their words
|
530 |
+
sorted_lines, line_heights, line_y_bounds = zip(
|
531 |
+
*sorted(
|
532 |
+
zip(sorted_lines, line_heights, line_y_bounds),
|
533 |
+
key=lambda item: np.median(
|
534 |
+
[np.mean([p[1] for p in w["bbox"]]) for w in item[0]]
|
535 |
+
),
|
536 |
+
)
|
537 |
+
)
|
538 |
+
|
539 |
+
sorted_lines = list(sorted_lines)
|
540 |
+
line_heights = list(line_heights)
|
541 |
+
line_y_bounds = list(line_y_bounds)
|
542 |
+
|
543 |
+
# Step 8: Write sorted lines to the output text file with empty lines where necessary
|
544 |
+
output_text = ""
|
545 |
+
previous_line_median_y = None # To track the max y of the previous line
|
546 |
+
|
547 |
+
for idx, line in enumerate(sorted_lines):
|
548 |
+
# Compute current line's min y
|
549 |
+
current_line_min_y_median = line_y_bounds[idx][0]
|
550 |
+
current_line_max_y_median = line_y_bounds[idx][1]
|
551 |
+
current_line_median_height = line_heights[idx]
|
552 |
+
current_line_median_y = (
|
553 |
+
current_line_min_y_median + current_line_max_y_median
|
554 |
+
) / 2
|
555 |
+
|
556 |
+
if previous_line_median_y is not None:
|
557 |
+
# Compute vertical distance between lines
|
558 |
+
vertical_distance = current_line_median_y - previous_line_median_y
|
559 |
+
median_height = (
|
560 |
+
current_line_median_height + previous_line_median_height
|
561 |
+
) / 2
|
562 |
+
|
563 |
+
# If the vertical distance is greater than the median height, insert an empty line
|
564 |
+
if vertical_distance > median_height * 2:
|
565 |
+
output_text += "\n" # Insert empty line
|
566 |
+
|
567 |
+
# Write the current line's text
|
568 |
+
line_text = " ".join([w["word"] for w in line])
|
569 |
+
output_text += line_text + "\n"
|
570 |
+
|
571 |
+
# Update the previous_line_max_y for the next iteration
|
572 |
+
previous_line_median_y = current_line_median_y
|
573 |
+
previous_line_median_height = current_line_median_height
|
574 |
+
|
575 |
+
return output_text
|