python_code
stringlengths 0
456k
|
---|
import numpy as np
import pytest
import torch
from doctr.models.preprocessor import PreProcessor
@pytest.mark.parametrize(
"batch_size, output_size, input_tensor, expected_batches, expected_value",
[
[2, (128, 128), np.full((3, 256, 128, 3), 255, dtype=np.uint8), 1, 0.5], # numpy uint8
[2, (128, 128), np.ones((3, 256, 128, 3), dtype=np.float32), 1, 0.5], # numpy fp32
[2, (128, 128), torch.full((3, 3, 256, 128), 255, dtype=torch.uint8), 1, 0.5], # torch uint8
[2, (128, 128), torch.ones((3, 3, 256, 128), dtype=torch.float32), 1, 0.5], # torch fp32
[2, (128, 128), torch.ones((3, 3, 256, 128), dtype=torch.float16), 1, 0.5], # torch fp16
[2, (128, 128), [np.full((256, 128, 3), 255, dtype=np.uint8)] * 3, 2, 0.5], # list of numpy uint8
[2, (128, 128), [np.ones((256, 128, 3), dtype=np.float32)] * 3, 2, 0.5], # list of numpy fp32
[2, (128, 128), [torch.full((3, 256, 128), 255, dtype=torch.uint8)] * 3, 2, 0.5], # list of torch uint8
[2, (128, 128), [torch.ones((3, 256, 128), dtype=torch.float32)] * 3, 2, 0.5], # list of torch fp32
[2, (128, 128), [torch.ones((3, 256, 128), dtype=torch.float16)] * 3, 2, 0.5], # list of torch fp32
],
)
def test_preprocessor(batch_size, output_size, input_tensor, expected_batches, expected_value):
processor = PreProcessor(output_size, batch_size)
# Invalid input type
with pytest.raises(TypeError):
processor(42)
# 4D check
with pytest.raises(AssertionError):
processor(np.full((256, 128, 3), 255, dtype=np.uint8))
with pytest.raises(TypeError):
processor(np.full((1, 256, 128, 3), 255, dtype=np.int32))
# 3D check
with pytest.raises(AssertionError):
processor([np.full((3, 256, 128, 3), 255, dtype=np.uint8)])
with pytest.raises(TypeError):
processor([np.full((256, 128, 3), 255, dtype=np.int32)])
with torch.no_grad():
out = processor(input_tensor)
assert isinstance(out, list) and len(out) == expected_batches
assert all(isinstance(b, torch.Tensor) for b in out)
assert all(b.dtype == torch.float32 for b in out)
assert all(b.shape[-2:] == output_size for b in out)
assert all(torch.all(b == expected_value) for b in out)
assert len(repr(processor).split("\n")) == 4
|
import os
import pytest
from torch import nn
from doctr.models.utils import conv_sequence_pt, load_pretrained_params
def test_load_pretrained_params(tmpdir_factory):
model = nn.Sequential(nn.Linear(8, 8), nn.ReLU(), nn.Linear(8, 4))
# Retrieve this URL
url = "https://github.com/mindee/doctr/releases/download/v0.2.1/tmp_checkpoint-6f0ce0e6.pt"
# Temp cache dir
cache_dir = tmpdir_factory.mktemp("cache")
# Pass an incorrect hash
with pytest.raises(ValueError):
load_pretrained_params(model, url, "mywronghash", cache_dir=str(cache_dir))
# Let it resolve the hash from the file name
load_pretrained_params(model, url, cache_dir=str(cache_dir))
# Check that the file was downloaded & the archive extracted
assert os.path.exists(cache_dir.join("models").join(url.rpartition("/")[-1].split("&")[0]))
# Check ignore keys
load_pretrained_params(model, url, cache_dir=str(cache_dir), ignore_keys=["2.weight"])
# non matching keys
model = nn.Sequential(nn.Linear(8, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU(), nn.Linear(4, 1))
with pytest.raises(ValueError):
load_pretrained_params(model, url, cache_dir=str(cache_dir), ignore_keys=["2.weight"])
def test_conv_sequence():
assert len(conv_sequence_pt(3, 8, kernel_size=3)) == 1
assert len(conv_sequence_pt(3, 8, True, kernel_size=3)) == 2
assert len(conv_sequence_pt(3, 8, False, True, kernel_size=3)) == 2
assert len(conv_sequence_pt(3, 8, True, True, kernel_size=3)) == 3
|
import numpy as np
import pytest
from torch import nn
from doctr import models
from doctr.io import Document, DocumentFile
from doctr.io.elements import KIEDocument
from doctr.models import detection, recognition
from doctr.models.detection.predictor import DetectionPredictor
from doctr.models.kie_predictor import KIEPredictor
from doctr.models.predictor import OCRPredictor
from doctr.models.preprocessor import PreProcessor
from doctr.models.recognition.predictor import RecognitionPredictor
@pytest.mark.parametrize(
"assume_straight_pages, straighten_pages",
[
[True, False],
[False, False],
[True, True],
],
)
def test_ocrpredictor(mock_pdf, mock_vocab, assume_straight_pages, straighten_pages):
det_bsize = 4
det_predictor = DetectionPredictor(
PreProcessor(output_size=(512, 512), batch_size=det_bsize),
detection.db_mobilenet_v3_large(
pretrained=False,
pretrained_backbone=False,
assume_straight_pages=assume_straight_pages,
),
)
assert not det_predictor.model.training
reco_bsize = 32
reco_predictor = RecognitionPredictor(
PreProcessor(output_size=(32, 128), batch_size=reco_bsize, preserve_aspect_ratio=True),
recognition.crnn_vgg16_bn(pretrained=False, pretrained_backbone=False, vocab=mock_vocab),
)
assert not reco_predictor.model.training
doc = DocumentFile.from_pdf(mock_pdf)
predictor = OCRPredictor(
det_predictor,
reco_predictor,
assume_straight_pages=assume_straight_pages,
straighten_pages=straighten_pages,
detect_orientation=True,
detect_language=True,
)
if assume_straight_pages:
assert predictor.crop_orientation_predictor is None
else:
assert isinstance(predictor.crop_orientation_predictor, nn.Module)
out = predictor(doc)
assert isinstance(out, Document)
assert len(out.pages) == 2
# Dimension check
with pytest.raises(ValueError):
input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8)
_ = predictor([input_page])
orientation = 0
assert out.pages[0].orientation["value"] == orientation
@pytest.mark.parametrize(
"assume_straight_pages, straighten_pages",
[
[True, False],
[False, False],
[True, True],
],
)
def test_kiepredictor(mock_pdf, mock_vocab, assume_straight_pages, straighten_pages):
det_bsize = 4
det_predictor = DetectionPredictor(
PreProcessor(output_size=(512, 512), batch_size=det_bsize),
detection.db_mobilenet_v3_large(
pretrained=False,
pretrained_backbone=False,
assume_straight_pages=assume_straight_pages,
),
)
assert not det_predictor.model.training
reco_bsize = 32
reco_predictor = RecognitionPredictor(
PreProcessor(output_size=(32, 128), batch_size=reco_bsize, preserve_aspect_ratio=True),
recognition.crnn_vgg16_bn(pretrained=False, pretrained_backbone=False, vocab=mock_vocab),
)
assert not reco_predictor.model.training
doc = DocumentFile.from_pdf(mock_pdf)
predictor = KIEPredictor(
det_predictor,
reco_predictor,
assume_straight_pages=assume_straight_pages,
straighten_pages=straighten_pages,
detect_orientation=True,
detect_language=True,
)
if assume_straight_pages:
assert predictor.crop_orientation_predictor is None
else:
assert isinstance(predictor.crop_orientation_predictor, nn.Module)
out = predictor(doc)
assert isinstance(out, Document)
assert len(out.pages) == 2
# Dimension check
with pytest.raises(ValueError):
input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8)
_ = predictor([input_page])
orientation = 0
assert out.pages[0].orientation["value"] == orientation
def _test_predictor(predictor):
# Output checks
assert isinstance(predictor, OCRPredictor)
doc = [np.zeros((512, 512, 3), dtype=np.uint8)]
out = predictor(doc)
# Document
assert isinstance(out, Document)
# The input doc has 1 page
assert len(out.pages) == 1
# Dimension check
with pytest.raises(ValueError):
input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8)
_ = predictor([input_page])
def _test_kiepredictor(predictor):
# Output checks
assert isinstance(predictor, KIEPredictor)
doc = [np.zeros((512, 512, 3), dtype=np.uint8)]
out = predictor(doc)
# Document
assert isinstance(out, KIEDocument)
# The input doc has 1 page
assert len(out.pages) == 1
# Dimension check
with pytest.raises(ValueError):
input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8)
_ = predictor([input_page])
@pytest.mark.parametrize(
"det_arch, reco_arch",
[
["db_mobilenet_v3_large", "crnn_mobilenet_v3_large"],
],
)
def test_zoo_models(det_arch, reco_arch):
# Model
predictor = models.ocr_predictor(det_arch, reco_arch, pretrained=True)
_test_predictor(predictor)
# passing model instance directly
det_model = detection.__dict__[det_arch](pretrained=True)
reco_model = recognition.__dict__[reco_arch](pretrained=True)
predictor = models.ocr_predictor(det_model, reco_model)
_test_predictor(predictor)
# passing recognition model as detection model
with pytest.raises(ValueError):
models.ocr_predictor(det_arch=reco_model, pretrained=True)
# passing detection model as recognition model
with pytest.raises(ValueError):
models.ocr_predictor(reco_arch=det_model, pretrained=True)
# KIE predictor
predictor = models.kie_predictor(det_arch, reco_arch, pretrained=True)
_test_kiepredictor(predictor)
# passing model instance directly
det_model = detection.__dict__[det_arch](pretrained=True)
reco_model = recognition.__dict__[reco_arch](pretrained=True)
predictor = models.kie_predictor(det_model, reco_model)
_test_kiepredictor(predictor)
# passing recognition model as detection model
with pytest.raises(ValueError):
models.kie_predictor(det_arch=reco_model, pretrained=True)
# passing detection model as recognition model
with pytest.raises(ValueError):
models.kie_predictor(reco_arch=det_model, pretrained=True)
|
import os
import tempfile
import onnxruntime
import pytest
import torch
from doctr.models import recognition
from doctr.models.recognition.crnn.pytorch import CTCPostProcessor
from doctr.models.recognition.master.pytorch import MASTERPostProcessor
from doctr.models.recognition.predictor import RecognitionPredictor
from doctr.models.recognition.sar.pytorch import SARPostProcessor
from doctr.models.recognition.vitstr.pytorch import ViTSTRPostProcessor
from doctr.models.utils import export_model_to_onnx
@pytest.mark.parametrize(
"arch_name, input_shape, pretrained",
[
["crnn_vgg16_bn", (3, 32, 128), True],
["crnn_mobilenet_v3_small", (3, 32, 128), True],
["crnn_mobilenet_v3_large", (3, 32, 128), True],
["sar_resnet31", (3, 32, 128), False],
["master", (3, 32, 128), False],
["vitstr_small", (3, 32, 128), False],
["vitstr_base", (3, 32, 128), False],
],
)
def test_recognition_models(arch_name, input_shape, pretrained, mock_vocab):
batch_size = 4
model = recognition.__dict__[arch_name](vocab=mock_vocab, pretrained=pretrained, input_shape=input_shape).eval()
assert isinstance(model, torch.nn.Module)
input_tensor = torch.rand((batch_size, *input_shape))
target = ["i", "am", "a", "jedi"]
if torch.cuda.is_available():
model.cuda()
input_tensor = input_tensor.cuda()
out = model(input_tensor, target, return_model_output=True, return_preds=True)
assert isinstance(out, dict)
assert len(out) == 3
assert isinstance(out["preds"], list)
assert len(out["preds"]) == batch_size
assert all(isinstance(word, str) and isinstance(conf, float) and 0 <= conf <= 1 for word, conf in out["preds"])
assert isinstance(out["out_map"], torch.Tensor)
assert out["out_map"].dtype == torch.float32
assert isinstance(out["loss"], torch.Tensor)
# test model in train mode needs targets
with pytest.raises(ValueError):
model.train()
model(input_tensor, None)
@pytest.mark.parametrize(
"post_processor, input_shape",
[
[CTCPostProcessor, [2, 119, 30]],
[SARPostProcessor, [2, 119, 30]],
[ViTSTRPostProcessor, [2, 119, 30]],
[MASTERPostProcessor, [2, 119, 30]],
],
)
def test_reco_postprocessors(post_processor, input_shape, mock_vocab):
processor = post_processor(mock_vocab)
decoded = processor(torch.rand(*input_shape))
assert isinstance(decoded, list)
assert all(isinstance(word, str) and isinstance(conf, float) and 0 <= conf <= 1 for word, conf in decoded)
assert len(decoded) == input_shape[0]
assert all(char in mock_vocab for word, _ in decoded for char in word)
# Repr
assert repr(processor) == f"{post_processor.__name__}(vocab_size={len(mock_vocab)})"
@pytest.mark.parametrize(
"arch_name",
[
"crnn_vgg16_bn",
"crnn_mobilenet_v3_small",
"crnn_mobilenet_v3_large",
"sar_resnet31",
"master",
"vitstr_small",
"vitstr_base",
],
)
def test_recognition_zoo(arch_name):
batch_size = 2
# Model
predictor = recognition.zoo.recognition_predictor(arch_name, pretrained=False)
predictor.model.eval()
# object check
assert isinstance(predictor, RecognitionPredictor)
input_tensor = torch.rand((batch_size, 3, 128, 128))
if torch.cuda.is_available():
predictor.model.cuda()
input_tensor = input_tensor.cuda()
with torch.no_grad():
out = predictor(input_tensor)
out = predictor(input_tensor)
assert isinstance(out, list) and len(out) == batch_size
assert all(isinstance(word, str) and isinstance(conf, float) for word, conf in out)
@pytest.mark.skipif(os.getenv("SLOW", "0") == "0", reason="slow test")
@pytest.mark.parametrize(
"arch_name, input_shape",
[
["crnn_vgg16_bn", (3, 32, 128)],
["crnn_mobilenet_v3_small", (3, 32, 128)],
["crnn_mobilenet_v3_large", (3, 32, 128)],
["sar_resnet31", (3, 32, 128)],
["master", (3, 32, 128)],
["vitstr_small", (3, 32, 128)], # testing one vitstr version is enough
],
)
def test_models_onnx_export(arch_name, input_shape):
# Model
batch_size = 2
model = recognition.__dict__[arch_name](pretrained=True, exportable=True).eval()
dummy_input = torch.rand((batch_size, *input_shape), dtype=torch.float32)
with tempfile.TemporaryDirectory() as tmpdir:
# Export
model_path = export_model_to_onnx(model, model_name=os.path.join(tmpdir, "model"), dummy_input=dummy_input)
assert os.path.exists(model_path)
# Inference
ort_session = onnxruntime.InferenceSession(
os.path.join(tmpdir, "model.onnx"), providers=["CPUExecutionProvider"]
)
ort_outs = ort_session.run(["logits"], {"input": dummy_input.numpy()})
assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape[0] == batch_size
|
import math
import numpy as np
import pytest
import torch
from doctr.transforms import (
ChannelShuffle,
ColorInversion,
GaussianNoise,
RandomCrop,
RandomHorizontalFlip,
RandomRotate,
RandomShadow,
Resize,
)
from doctr.transforms.functional import crop_detection, rotate_sample
def test_resize():
output_size = (32, 32)
transfo = Resize(output_size)
input_t = torch.ones((3, 64, 64), dtype=torch.float32)
out = transfo(input_t)
assert torch.all(out == 1)
assert out.shape[-2:] == output_size
assert repr(transfo) == f"Resize(output_size={output_size}, interpolation='bilinear')"
transfo = Resize(output_size, preserve_aspect_ratio=True)
input_t = torch.ones((3, 32, 64), dtype=torch.float32)
out = transfo(input_t)
assert out.shape[-2:] == output_size
assert not torch.all(out == 1)
# Asymetric padding
assert torch.all(out[:, -1] == 0) and torch.all(out[:, 0] == 1)
# Symetric padding
transfo = Resize(output_size, preserve_aspect_ratio=True, symmetric_pad=True)
assert repr(transfo) == (
f"Resize(output_size={output_size}, interpolation='bilinear', "
f"preserve_aspect_ratio=True, symmetric_pad=True)"
)
out = transfo(input_t)
assert out.shape[-2:] == output_size
# symetric padding
assert torch.all(out[:, -1] == 0) and torch.all(out[:, 0] == 0)
# Inverse aspect ratio
input_t = torch.ones((3, 64, 32), dtype=torch.float32)
out = transfo(input_t)
assert not torch.all(out == 1)
assert out.shape[-2:] == output_size
# Same aspect ratio
output_size = (32, 128)
transfo = Resize(output_size, preserve_aspect_ratio=True)
out = transfo(torch.ones((3, 16, 64), dtype=torch.float32))
assert out.shape[-2:] == output_size
# FP16
input_t = torch.ones((3, 64, 64), dtype=torch.float16)
out = transfo(input_t)
assert out.dtype == torch.float16
@pytest.mark.parametrize(
"rgb_min",
[
0.2,
0.4,
0.6,
],
)
def test_invert_colorize(rgb_min):
transfo = ColorInversion(min_val=rgb_min)
input_t = torch.ones((8, 3, 32, 32), dtype=torch.float32)
out = transfo(input_t)
assert torch.all(out <= 1 - rgb_min + 1e-4)
assert torch.all(out >= 0)
input_t = torch.full((8, 3, 32, 32), 255, dtype=torch.uint8)
out = transfo(input_t)
assert torch.all(out <= int(math.ceil(255 * (1 - rgb_min + 1e-4))))
assert torch.all(out >= 0)
# FP16
input_t = torch.ones((8, 3, 32, 32), dtype=torch.float16)
out = transfo(input_t)
assert out.dtype == torch.float16
def test_rotate_sample():
img = torch.ones((3, 200, 100), dtype=torch.float32)
boxes = np.array([0, 0, 100, 200])[None, ...]
polys = np.stack((boxes[..., [0, 1]], boxes[..., [2, 1]], boxes[..., [2, 3]], boxes[..., [0, 3]]), axis=1)
rel_boxes = np.array([0, 0, 1, 1], dtype=np.float32)[None, ...]
rel_polys = np.stack(
(rel_boxes[..., [0, 1]], rel_boxes[..., [2, 1]], rel_boxes[..., [2, 3]], rel_boxes[..., [0, 3]]), axis=1
)
# No angle
rotated_img, rotated_geoms = rotate_sample(img, boxes, 0, False)
assert torch.all(rotated_img == img) and np.all(rotated_geoms == rel_polys)
rotated_img, rotated_geoms = rotate_sample(img, boxes, 0, True)
assert torch.all(rotated_img == img) and np.all(rotated_geoms == rel_polys)
rotated_img, rotated_geoms = rotate_sample(img, polys, 0, False)
assert torch.all(rotated_img == img) and np.all(rotated_geoms == rel_polys)
rotated_img, rotated_geoms = rotate_sample(img, polys, 0, True)
assert torch.all(rotated_img == img) and np.all(rotated_geoms == rel_polys)
# No expansion
expected_img = torch.zeros((3, 200, 100), dtype=torch.float32)
expected_img[:, 50:150] = 1
expected_polys = np.array([[0, 0.75], [0, 0.25], [1, 0.25], [1, 0.75]])[None, ...]
rotated_img, rotated_geoms = rotate_sample(img, boxes, 90, False)
assert torch.all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
rotated_img, rotated_geoms = rotate_sample(img, polys, 90, False)
assert torch.all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
rotated_img, rotated_geoms = rotate_sample(img, rel_boxes, 90, False)
assert torch.all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
rotated_img, rotated_geoms = rotate_sample(img, rel_polys, 90, False)
assert torch.all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
# Expansion
expected_img = torch.ones((3, 100, 200), dtype=torch.float32)
expected_polys = np.array([[0, 1], [0, 0], [1, 0], [1, 1]], dtype=np.float32)[None, ...]
rotated_img, rotated_geoms = rotate_sample(img, boxes, 90, True)
assert torch.all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
rotated_img, rotated_geoms = rotate_sample(img, polys, 90, True)
assert torch.all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
rotated_img, rotated_geoms = rotate_sample(img, rel_boxes, 90, True)
assert torch.all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
rotated_img, rotated_geoms = rotate_sample(img, rel_polys, 90, True)
assert torch.all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
with pytest.raises(AssertionError):
rotate_sample(img, boxes[None, ...], 90, False)
def test_random_rotate():
rotator = RandomRotate(max_angle=10.0, expand=False)
input_t = torch.ones((3, 50, 50), dtype=torch.float32)
boxes = np.array([[15, 20, 35, 30]])
r_img, r_boxes = rotator(input_t, boxes)
assert r_img.shape == input_t.shape
rotator = RandomRotate(max_angle=10.0, expand=True)
r_img, r_boxes = rotator(input_t, boxes)
assert r_img.shape != input_t.shape
# FP16 (only on GPU)
if torch.cuda.is_available():
input_t = torch.ones((3, 50, 50), dtype=torch.float16).cuda()
r_img, _ = rotator(input_t, boxes)
assert r_img.dtype == torch.float16
def test_crop_detection():
img = torch.ones((3, 50, 50), dtype=torch.float32)
abs_boxes = np.array(
[
[15, 20, 35, 30],
[5, 10, 10, 20],
]
)
crop_box = (12 / 50, 23 / 50, 50 / 50, 50 / 50)
c_img, c_boxes = crop_detection(img, abs_boxes, crop_box)
assert c_img.shape == (3, 26, 37)
assert c_boxes.shape == (1, 4)
assert np.all(c_boxes == np.array([15 - 12, 0, 35 - 12, 30 - 23])[None, ...])
rel_boxes = np.array(
[
[0.3, 0.4, 0.7, 0.6],
[0.1, 0.2, 0.2, 0.4],
]
)
crop_box = (0.24, 0.46, 1.0, 1.0)
c_img, c_boxes = crop_detection(img, rel_boxes, crop_box)
assert c_img.shape == (3, 26, 37)
assert c_boxes.shape == (1, 4)
assert np.abs(c_boxes - np.array([0.06 / 0.76, 0.0, 0.46 / 0.76, 0.14 / 0.54])[None, ...]).mean() < 1e-7
# FP16
img = torch.ones((3, 50, 50), dtype=torch.float16)
c_img, _ = crop_detection(img, abs_boxes, crop_box)
assert c_img.dtype == torch.float16
with pytest.raises(AssertionError):
crop_detection(img, abs_boxes, (2, 6, 24, 56))
def test_random_crop():
cropper = RandomCrop(scale=(0.5, 1.0), ratio=(0.75, 1.33))
input_t = torch.ones((3, 50, 50), dtype=torch.float32)
boxes = np.array([[15, 20, 35, 30]])
img, target = cropper(input_t, dict(boxes=boxes))
# Check the scale
assert img.shape[-1] * img.shape[-2] >= 0.4 * input_t.shape[-1] * input_t.shape[-2]
# Check aspect ratio
assert 0.65 <= img.shape[-2] / img.shape[-1] <= 1.5
# Check the target
assert np.all(target["boxes"] >= 0)
assert np.all(target["boxes"][:, [0, 2]] <= img.shape[-1]) and np.all(target["boxes"][:, [1, 3]] <= img.shape[-2])
@pytest.mark.parametrize(
"input_dtype, input_size",
[
[torch.float32, (3, 32, 32)],
[torch.uint8, (3, 32, 32)],
],
)
def test_channel_shuffle(input_dtype, input_size):
transfo = ChannelShuffle()
input_t = torch.rand(input_size, dtype=torch.float32)
if input_dtype == torch.uint8:
input_t = (255 * input_t).round()
input_t = input_t.to(dtype=input_dtype)
out = transfo(input_t)
assert isinstance(out, torch.Tensor)
assert out.shape == input_size
assert out.dtype == input_dtype
# Ensure that nothing has changed apart from channel order
if input_dtype == torch.uint8:
assert torch.all(input_t.sum(0) == out.sum(0))
else:
# Float approximation
assert (input_t.sum(0) - out.sum(0)).abs().mean() < 1e-7
@pytest.mark.parametrize(
"input_dtype,input_shape",
[
[torch.float32, (3, 32, 32)],
[torch.uint8, (3, 32, 32)],
],
)
def test_gaussian_noise(input_dtype, input_shape):
transform = GaussianNoise(0.0, 1.0)
input_t = torch.rand(input_shape, dtype=torch.float32)
if input_dtype == torch.uint8:
input_t = (255 * input_t).round()
input_t = input_t.to(dtype=input_dtype)
transformed = transform(input_t)
assert isinstance(transformed, torch.Tensor)
assert transformed.shape == input_shape
assert transformed.dtype == input_dtype
assert torch.any(transformed != input_t)
assert torch.all(transformed >= 0)
if input_dtype == torch.uint8:
assert torch.all(transformed <= 255)
else:
assert torch.all(transformed <= 1.0)
@pytest.mark.parametrize("p", [1, 0])
def test_randomhorizontalflip(p):
# testing for 2 cases, with flip probability 1 and 0.
transform = RandomHorizontalFlip(p)
input_t = torch.ones((3, 32, 32), dtype=torch.float32)
input_t[..., :16] = 0
target = {"boxes": np.array([[0.1, 0.1, 0.3, 0.4]], dtype=np.float32), "labels": np.ones(1, dtype=np.int64)}
transformed, _target = transform(input_t, target)
assert isinstance(transformed, torch.Tensor)
assert transformed.shape == input_t.shape
assert transformed.dtype == input_t.dtype
# integrity check of targets
assert isinstance(_target, dict)
assert all(isinstance(val, np.ndarray) for val in _target.values())
assert _target["boxes"].dtype == np.float32
assert _target["labels"].dtype == np.int64
if p == 1:
assert np.all(_target["boxes"] == np.array([[0.7, 0.1, 0.9, 0.4]], dtype=np.float32))
assert torch.all(transformed.mean((0, 1)) == torch.tensor([1] * 16 + [0] * 16, dtype=torch.float32))
elif p == 0:
assert np.all(_target["boxes"] == np.array([[0.1, 0.1, 0.3, 0.4]], dtype=np.float32))
assert torch.all(transformed.mean((0, 1)) == torch.tensor([0] * 16 + [1] * 16, dtype=torch.float32))
assert np.all(_target["labels"] == np.ones(1, dtype=np.int64))
@pytest.mark.parametrize(
"input_dtype,input_shape",
[
[torch.float32, (3, 32, 32)],
[torch.uint8, (3, 32, 32)],
[torch.float32, (3, 64, 32)],
[torch.uint8, (3, 64, 32)],
],
)
def test_random_shadow(input_dtype, input_shape):
transform = RandomShadow((0.2, 0.8))
input_t = torch.ones(input_shape, dtype=torch.float32)
if input_dtype == torch.uint8:
input_t = (255 * input_t).round()
input_t = input_t.to(dtype=input_dtype)
transformed = transform(input_t)
assert isinstance(transformed, torch.Tensor)
assert transformed.shape == input_shape
assert transformed.dtype == input_dtype
# The shadow will darken the picture
assert input_t.float().mean() >= transformed.float().mean()
assert torch.all(transformed >= 0)
if input_dtype == torch.uint8:
assert torch.all(transformed <= 255)
else:
assert torch.all(transformed <= 1.0)
|
from doctr.file_utils import is_torch_available
def test_file_utils():
assert is_torch_available()
|
import numpy as np
import pytest
import torch
from doctr.io import decode_img_as_tensor, read_img_as_tensor, tensor_from_numpy
def test_read_img_as_tensor(mock_image_path):
img = read_img_as_tensor(mock_image_path)
assert isinstance(img, torch.Tensor)
assert img.dtype == torch.float32
assert img.shape == (3, 900, 1200)
img = read_img_as_tensor(mock_image_path, dtype=torch.float16)
assert img.dtype == torch.float16
img = read_img_as_tensor(mock_image_path, dtype=torch.uint8)
assert img.dtype == torch.uint8
def test_decode_img_as_tensor(mock_image_stream):
img = decode_img_as_tensor(mock_image_stream)
assert isinstance(img, torch.Tensor)
assert img.dtype == torch.float32
assert img.shape == (3, 900, 1200)
img = decode_img_as_tensor(mock_image_stream, dtype=torch.float16)
assert img.dtype == torch.float16
img = decode_img_as_tensor(mock_image_stream, dtype=torch.uint8)
assert img.dtype == torch.uint8
def test_tensor_from_numpy(mock_image_stream):
with pytest.raises(ValueError):
tensor_from_numpy(np.zeros((256, 256, 3)), torch.int64)
out = tensor_from_numpy(np.zeros((256, 256, 3), dtype=np.uint8))
assert isinstance(out, torch.Tensor)
assert out.dtype == torch.float32
assert out.shape == (3, 256, 256)
out = tensor_from_numpy(np.zeros((256, 256, 3), dtype=np.uint8), dtype=torch.float16)
assert out.dtype == torch.float16
out = tensor_from_numpy(np.zeros((256, 256, 3), dtype=np.uint8), dtype=torch.uint8)
assert out.dtype == torch.uint8
|
import pytest
import torch
from doctr.models import obj_detection
@pytest.mark.parametrize(
"arch_name, input_shape, pretrained",
[
["fasterrcnn_mobilenet_v3_large_fpn", (3, 512, 512), True],
["fasterrcnn_mobilenet_v3_large_fpn", (3, 512, 512), False],
],
)
def test_detection_models(arch_name, input_shape, pretrained):
batch_size = 2
model = obj_detection.__dict__[arch_name](pretrained=pretrained).eval()
assert isinstance(model, torch.nn.Module)
input_tensor = torch.rand((batch_size, *input_shape))
if torch.cuda.is_available():
model.cuda()
input_tensor = input_tensor.cuda()
out = model(input_tensor)
assert isinstance(out, list) and all(isinstance(det, dict) for det in out)
# Train mode
model = model.train()
target = [
dict(boxes=torch.tensor([[0.5, 0.5, 1, 1]], dtype=torch.float32), labels=torch.tensor((0,), dtype=torch.long)),
dict(boxes=torch.tensor([[0.5, 0.5, 1, 1]], dtype=torch.float32), labels=torch.tensor((0,), dtype=torch.long)),
]
if torch.cuda.is_available():
target = [{k: v.cuda() for k, v in t.items()} for t in target]
out = model(input_tensor, target)
assert isinstance(out, dict) and all(isinstance(v, torch.Tensor) for v in out.values())
|
import os
import tempfile
import numpy as np
import onnxruntime
import pytest
import torch
from doctr.file_utils import CLASS_NAME
from doctr.models import detection
from doctr.models.detection._utils import dilate, erode
from doctr.models.detection.predictor import DetectionPredictor
from doctr.models.utils import export_model_to_onnx
@pytest.mark.parametrize(
"arch_name, input_shape, output_size, out_prob",
[
["db_resnet34", (3, 512, 512), (1, 512, 512), True],
["db_resnet50", (3, 512, 512), (1, 512, 512), True],
["db_mobilenet_v3_large", (3, 512, 512), (1, 512, 512), True],
["linknet_resnet18", (3, 512, 512), (1, 512, 512), False],
["linknet_resnet34", (3, 512, 512), (1, 512, 512), False],
["linknet_resnet50", (3, 512, 512), (1, 512, 512), False],
],
)
def test_detection_models(arch_name, input_shape, output_size, out_prob):
batch_size = 2
model = detection.__dict__[arch_name](pretrained=False).eval()
assert isinstance(model, torch.nn.Module)
input_tensor = torch.rand((batch_size, *input_shape))
target = [
{CLASS_NAME: np.array([[0.5, 0.5, 1, 1], [0.5, 0.5, 0.8, 0.8]], dtype=np.float32)},
{CLASS_NAME: np.array([[0.5, 0.5, 1, 1], [0.5, 0.5, 0.8, 0.9]], dtype=np.float32)},
]
if torch.cuda.is_available():
model.cuda()
input_tensor = input_tensor.cuda()
out = model(input_tensor, target, return_model_output=True, return_preds=True)
assert isinstance(out, dict)
assert len(out) == 3
# Check proba map
assert out["out_map"].shape == (batch_size, *output_size)
assert out["out_map"].dtype == torch.float32
if out_prob:
assert torch.all((out["out_map"] >= 0) & (out["out_map"] <= 1))
# Check boxes
for boxes_dict in out["preds"]:
for boxes in boxes_dict.values():
assert boxes.shape[1] == 5
assert np.all(boxes[:, :2] < boxes[:, 2:4])
assert np.all(boxes[:, :4] >= 0) and np.all(boxes[:, :4] <= 1)
# Check loss
assert isinstance(out["loss"], torch.Tensor)
# Check the rotated case (same targets)
target = [
{
CLASS_NAME: np.array(
[[[0.5, 0.5], [1, 0.5], [1, 1], [0.5, 1]], [[0.5, 0.5], [0.8, 0.5], [0.8, 0.8], [0.5, 0.8]]],
dtype=np.float32,
)
},
{
CLASS_NAME: np.array(
[[[0.5, 0.5], [1, 0.5], [1, 1], [0.5, 1]], [[0.5, 0.5], [0.8, 0.5], [0.8, 0.9], [0.5, 0.9]]],
dtype=np.float32,
)
},
]
loss = model(input_tensor, target)["loss"]
assert isinstance(loss, torch.Tensor) and ((loss - out["loss"]).abs() / loss).item() < 1e-1
@pytest.mark.parametrize(
"arch_name",
[
"db_resnet34",
"db_resnet50",
"db_mobilenet_v3_large",
"linknet_resnet18",
],
)
def test_detection_zoo(arch_name):
# Model
predictor = detection.zoo.detection_predictor(arch_name, pretrained=False)
predictor.model.eval()
# object check
assert isinstance(predictor, DetectionPredictor)
input_tensor = torch.rand((2, 3, 1024, 1024))
if torch.cuda.is_available():
predictor.model.cuda()
input_tensor = input_tensor.cuda()
with torch.no_grad():
out = predictor(input_tensor)
assert all(isinstance(boxes, dict) for boxes in out)
assert all(isinstance(boxes[CLASS_NAME], np.ndarray) and boxes[CLASS_NAME].shape[1] == 5 for boxes in out)
def test_erode():
x = torch.zeros((1, 1, 3, 3))
x[..., 1, 1] = 1
expected = torch.zeros((1, 1, 3, 3))
out = erode(x, 3)
assert torch.equal(out, expected)
def test_dilate():
x = torch.zeros((1, 1, 3, 3))
x[..., 1, 1] = 1
expected = torch.ones((1, 1, 3, 3))
out = dilate(x, 3)
assert torch.equal(out, expected)
@pytest.mark.parametrize(
"arch_name, input_shape, output_size",
[
["db_resnet34", (3, 512, 512), (1, 512, 512)],
["db_resnet50", (3, 512, 512), (1, 512, 512)],
["db_mobilenet_v3_large", (3, 512, 512), (1, 512, 512)],
["linknet_resnet18", (3, 512, 512), (1, 512, 512)],
["linknet_resnet34", (3, 512, 512), (1, 512, 512)],
["linknet_resnet50", (3, 512, 512), (1, 512, 512)],
],
)
def test_models_onnx_export(arch_name, input_shape, output_size):
# Model
batch_size = 2
model = detection.__dict__[arch_name](pretrained=True, exportable=True).eval()
dummy_input = torch.rand((batch_size, *input_shape), dtype=torch.float32)
with tempfile.TemporaryDirectory() as tmpdir:
# Export
model_path = export_model_to_onnx(model, model_name=os.path.join(tmpdir, "model"), dummy_input=dummy_input)
assert os.path.exists(model_path)
# Inference
ort_session = onnxruntime.InferenceSession(
os.path.join(tmpdir, "model.onnx"), providers=["CPUExecutionProvider"]
)
ort_outs = ort_session.run(["logits"], {"input": dummy_input.numpy()})
assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape == (batch_size, *output_size)
|
import os
import tempfile
import cv2
import numpy as np
import onnxruntime
import pytest
import torch
from doctr.models import classification
from doctr.models.classification.predictor import CropOrientationPredictor
from doctr.models.utils import export_model_to_onnx
def _test_classification(model, input_shape, output_size, batch_size=2):
# Forward
with torch.no_grad():
out = model(torch.rand((batch_size, *input_shape), dtype=torch.float32))
# Output checks
assert isinstance(out, torch.Tensor)
assert out.dtype == torch.float32
assert out.numpy().shape == (batch_size, *output_size)
# Check FP16
if torch.cuda.is_available():
model = model.half().cuda()
with torch.no_grad():
out = model(torch.rand((batch_size, *input_shape), dtype=torch.float16).cuda())
assert out.dtype == torch.float16
@pytest.mark.parametrize(
"arch_name, input_shape, output_size",
[
["vgg16_bn_r", (3, 32, 32), (126,)],
["resnet18", (3, 32, 32), (126,)],
["resnet31", (3, 32, 32), (126,)],
["resnet34", (3, 32, 32), (126,)],
["resnet34_wide", (3, 32, 32), (126,)],
["resnet50", (3, 32, 32), (126,)],
["magc_resnet31", (3, 32, 32), (126,)],
["mobilenet_v3_small", (3, 32, 32), (126,)],
["mobilenet_v3_large", (3, 32, 32), (126,)],
["vit_s", (3, 32, 32), (126,)],
["vit_b", (3, 32, 32), (126,)],
],
)
def test_classification_architectures(arch_name, input_shape, output_size):
# Model
model = classification.__dict__[arch_name](pretrained=True).eval()
_test_classification(model, input_shape, output_size)
# Check that you can pretrained everything up until the last layer
classification.__dict__[arch_name](pretrained=True, num_classes=10)
@pytest.mark.parametrize(
"arch_name, input_shape",
[
["mobilenet_v3_small_orientation", (3, 128, 128)],
],
)
def test_classification_models(arch_name, input_shape):
batch_size = 8
model = classification.__dict__[arch_name](pretrained=False, input_shape=input_shape).eval()
assert isinstance(model, torch.nn.Module)
input_tensor = torch.rand((batch_size, *input_shape))
if torch.cuda.is_available():
model.cuda()
input_tensor = input_tensor.cuda()
out = model(input_tensor)
assert isinstance(out, torch.Tensor)
assert out.shape == (8, 4)
@pytest.mark.parametrize(
"arch_name",
[
"mobilenet_v3_small_orientation",
],
)
def test_classification_zoo(arch_name):
batch_size = 16
# Model
predictor = classification.zoo.crop_orientation_predictor(arch_name, pretrained=False)
predictor.model.eval()
with pytest.raises(ValueError):
predictor = classification.zoo.crop_orientation_predictor(arch="wrong_model", pretrained=False)
# object check
assert isinstance(predictor, CropOrientationPredictor)
input_tensor = torch.rand((batch_size, 3, 128, 128))
if torch.cuda.is_available():
predictor.model.cuda()
input_tensor = input_tensor.cuda()
with torch.no_grad():
out = predictor(input_tensor)
out = predictor(input_tensor)
assert isinstance(out, list) and len(out) == batch_size
assert all(isinstance(pred, int) for pred in out)
def test_crop_orientation_model(mock_text_box):
text_box_0 = cv2.imread(mock_text_box)
text_box_90 = np.rot90(text_box_0, 1)
text_box_180 = np.rot90(text_box_0, 2)
text_box_270 = np.rot90(text_box_0, 3)
classifier = classification.crop_orientation_predictor("mobilenet_v3_small_orientation", pretrained=True)
assert classifier([text_box_0, text_box_90, text_box_180, text_box_270]) == [0, 1, 2, 3]
@pytest.mark.parametrize(
"arch_name, input_shape, output_size",
[
["vgg16_bn_r", (3, 32, 32), (126,)],
["resnet18", (3, 32, 32), (126,)],
["resnet31", (3, 32, 32), (126,)],
["resnet34", (3, 32, 32), (126,)],
["resnet34_wide", (3, 32, 32), (126,)],
["resnet50", (3, 32, 32), (126,)],
["magc_resnet31", (3, 32, 32), (126,)],
["mobilenet_v3_small", (3, 32, 32), (126,)],
["mobilenet_v3_large", (3, 32, 32), (126,)],
["mobilenet_v3_small_orientation", (3, 128, 128), (4,)],
["vit_b", (3, 32, 32), (126,)],
],
)
def test_models_onnx_export(arch_name, input_shape, output_size):
# Model
batch_size = 2
model = classification.__dict__[arch_name](pretrained=True).eval()
dummy_input = torch.rand((batch_size, *input_shape), dtype=torch.float32)
with tempfile.TemporaryDirectory() as tmpdir:
# Export
model_path = export_model_to_onnx(model, model_name=os.path.join(tmpdir, "model"), dummy_input=dummy_input)
assert os.path.exists(model_path)
# Inference
ort_session = onnxruntime.InferenceSession(
os.path.join(tmpdir, "model.onnx"), providers=["CPUExecutionProvider"]
)
ort_outs = ort_session.run(["logits"], {"input": dummy_input.numpy()})
assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape == (batch_size, *output_size)
|
import os
from shutil import move
import numpy as np
import pytest
import torch
from torch.utils.data import DataLoader, RandomSampler
from doctr import datasets
from doctr.file_utils import CLASS_NAME
from doctr.transforms import Resize
def _validate_dataset(ds, input_size, batch_size=2, class_indices=False, is_polygons=False):
# Fetch one sample
img, target = ds[0]
assert isinstance(img, torch.Tensor)
assert img.shape == (3, *input_size)
assert img.dtype == torch.float32
assert isinstance(target, dict)
assert isinstance(target["boxes"], np.ndarray) and target["boxes"].dtype == np.float32
if is_polygons:
assert target["boxes"].ndim == 3 and target["boxes"].shape[1:] == (4, 2)
else:
assert target["boxes"].ndim == 2 and target["boxes"].shape[1:] == (4,)
assert np.all(np.logical_and(target["boxes"] <= 1, target["boxes"] >= 0))
if class_indices:
assert isinstance(target["labels"], np.ndarray) and target["labels"].dtype == np.int64
else:
assert isinstance(target["labels"], list) and all(isinstance(s, str) for s in target["labels"])
assert len(target["labels"]) == len(target["boxes"])
# Check batching
loader = DataLoader(
ds,
batch_size=batch_size,
drop_last=True,
sampler=RandomSampler(ds),
num_workers=0,
pin_memory=True,
collate_fn=ds.collate_fn,
)
images, targets = next(iter(loader))
assert isinstance(images, torch.Tensor) and images.shape == (batch_size, 3, *input_size)
assert isinstance(targets, list) and all(isinstance(elt, dict) for elt in targets)
def _validate_dataset_recognition_part(ds, input_size, batch_size=2):
# Fetch one sample
img, label = ds[0]
assert isinstance(img, torch.Tensor)
assert img.shape == (3, *input_size)
assert img.dtype == torch.float32
assert isinstance(label, str)
# Check batching
loader = DataLoader(
ds,
batch_size=batch_size,
drop_last=True,
sampler=RandomSampler(ds),
num_workers=0,
pin_memory=True,
collate_fn=ds.collate_fn,
)
images, labels = next(iter(loader))
assert isinstance(images, torch.Tensor) and images.shape == (batch_size, 3, *input_size)
assert isinstance(labels, list) and all(isinstance(elt, str) for elt in labels)
def test_visiondataset():
url = "https://data.deepai.org/mnist.zip"
with pytest.raises(ValueError):
datasets.datasets.VisionDataset(url, download=False)
dataset = datasets.datasets.VisionDataset(url, download=True, extract_archive=True)
assert len(dataset) == 0
assert repr(dataset) == "VisionDataset()"
def test_detection_dataset(mock_image_folder, mock_detection_label):
input_size = (1024, 1024)
ds = datasets.DetectionDataset(
img_folder=mock_image_folder,
label_path=mock_detection_label,
img_transforms=Resize(input_size),
)
assert len(ds) == 5
img, target_dict = ds[0]
target = target_dict[CLASS_NAME]
assert isinstance(img, torch.Tensor)
assert img.dtype == torch.float32
assert img.shape[-2:] == input_size
# Bounding boxes
assert isinstance(target_dict, dict)
assert isinstance(target, np.ndarray) and target.dtype == np.float32
assert np.all(np.logical_and(target[:, :4] >= 0, target[:, :4] <= 1))
assert target.shape[1] == 4
loader = DataLoader(ds, batch_size=2, collate_fn=ds.collate_fn)
images, targets = next(iter(loader))
assert isinstance(images, torch.Tensor) and images.shape == (2, 3, *input_size)
assert isinstance(targets, list) and all(
isinstance(elt, np.ndarray) for target in targets for elt in target.values()
)
# Rotated DS
rotated_ds = datasets.DetectionDataset(
img_folder=mock_image_folder,
label_path=mock_detection_label,
img_transforms=Resize(input_size),
use_polygons=True,
)
_, r_target = rotated_ds[0]
assert r_target[CLASS_NAME].shape[1:] == (4, 2)
# File existence check
img_name, _ = ds.data[0]
move(os.path.join(ds.root, img_name), os.path.join(ds.root, "tmp_file"))
with pytest.raises(FileNotFoundError):
datasets.DetectionDataset(mock_image_folder, mock_detection_label)
move(os.path.join(ds.root, "tmp_file"), os.path.join(ds.root, img_name))
def test_recognition_dataset(mock_image_folder, mock_recognition_label):
input_size = (32, 128)
ds = datasets.RecognitionDataset(
img_folder=mock_image_folder,
labels_path=mock_recognition_label,
img_transforms=Resize(input_size, preserve_aspect_ratio=True),
)
assert len(ds) == 5
image, label = ds[0]
assert isinstance(image, torch.Tensor)
assert image.shape[-2:] == input_size
assert image.dtype == torch.float32
assert isinstance(label, str)
loader = DataLoader(ds, batch_size=2, collate_fn=ds.collate_fn)
images, labels = next(iter(loader))
assert isinstance(images, torch.Tensor) and images.shape == (2, 3, *input_size)
assert isinstance(labels, list) and all(isinstance(elt, str) for elt in labels)
# File existence check
img_name, _ = ds.data[0]
move(os.path.join(ds.root, img_name), os.path.join(ds.root, "tmp_file"))
with pytest.raises(FileNotFoundError):
datasets.RecognitionDataset(mock_image_folder, mock_recognition_label)
move(os.path.join(ds.root, "tmp_file"), os.path.join(ds.root, img_name))
@pytest.mark.parametrize(
"use_polygons",
[False, True],
)
def test_ocrdataset(mock_ocrdataset, use_polygons):
input_size = (512, 512)
ds = datasets.OCRDataset(
*mock_ocrdataset,
img_transforms=Resize(input_size),
use_polygons=use_polygons,
)
assert len(ds) == 3
_validate_dataset(ds, input_size, is_polygons=use_polygons)
# File existence check
img_name, _ = ds.data[0]
move(os.path.join(ds.root, img_name), os.path.join(ds.root, "tmp_file"))
with pytest.raises(FileNotFoundError):
datasets.OCRDataset(*mock_ocrdataset)
move(os.path.join(ds.root, "tmp_file"), os.path.join(ds.root, img_name))
def test_charactergenerator():
input_size = (32, 32)
vocab = "abcdef"
ds = datasets.CharacterGenerator(
vocab=vocab,
num_samples=10,
cache_samples=True,
img_transforms=Resize(input_size),
)
assert len(ds) == 10
image, label = ds[0]
assert isinstance(image, torch.Tensor)
assert image.shape[-2:] == input_size
assert image.dtype == torch.float32
assert isinstance(label, int) and label < len(vocab)
loader = DataLoader(ds, batch_size=2, collate_fn=ds.collate_fn)
images, targets = next(iter(loader))
assert isinstance(images, torch.Tensor) and images.shape == (2, 3, *input_size)
assert isinstance(targets, torch.Tensor) and targets.shape == (2,)
assert targets.dtype == torch.int64
def test_wordgenerator():
input_size = (32, 128)
wordlen_range = (1, 10)
vocab = "abcdef"
ds = datasets.WordGenerator(
vocab=vocab,
min_chars=wordlen_range[0],
max_chars=wordlen_range[1],
num_samples=10,
cache_samples=True,
img_transforms=Resize(input_size),
)
assert len(ds) == 10
image, target = ds[0]
assert isinstance(image, torch.Tensor)
assert image.shape[-2:] == input_size
assert image.dtype == torch.float32
assert isinstance(target, str) and len(target) >= wordlen_range[0] and len(target) <= wordlen_range[1]
assert all(char in vocab for char in target)
loader = DataLoader(ds, batch_size=2, collate_fn=ds.collate_fn)
images, targets = next(iter(loader))
assert isinstance(images, torch.Tensor) and images.shape == (2, 3, *input_size)
assert isinstance(targets, list) and len(targets) == 2 and all(isinstance(t, str) for t in targets)
@pytest.mark.parametrize(
"input_size, num_samples, rotate",
[
[[512, 512], 3, True], # Actual set has 2700 training samples and 300 test samples
[[512, 512], 3, False],
],
)
def test_artefact_detection(input_size, num_samples, rotate, mock_doc_artefacts):
# monkeypatch the path to temporary dataset
datasets.DocArtefacts.URL = mock_doc_artefacts
datasets.DocArtefacts.SHA256 = None
ds = datasets.DocArtefacts(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
cache_dir="/".join(mock_doc_artefacts.split("/")[:-2]),
cache_subdir=mock_doc_artefacts.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"DocArtefacts(train={True})"
_validate_dataset(ds, input_size, class_indices=True, is_polygons=rotate)
# NOTE: following datasets support also recognition task
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 3, True, False], # Actual set has 626 training samples and 360 test samples
[[512, 512], 3, False, False],
[[32, 128], 15, True, True], # recognition
[[32, 128], 15, False, True],
],
)
def test_sroie(input_size, num_samples, rotate, recognition, mock_sroie_dataset):
# monkeypatch the path to temporary dataset
datasets.SROIE.TRAIN = (mock_sroie_dataset, None)
ds = datasets.SROIE(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_sroie_dataset.split("/")[:-2]),
cache_subdir=mock_sroie_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"SROIE(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 5, True, False], # Actual set has 229 train and 233 test samples
[[512, 512], 5, False, False],
[[32, 128], 25, True, True], # recognition
[[32, 128], 25, False, True],
],
)
def test_ic13_dataset(input_size, num_samples, rotate, recognition, mock_ic13):
ds = datasets.IC13(
*mock_ic13,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
)
assert len(ds) == num_samples
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 3, True, False], # Actual set has 7149 train and 796 test samples
[[512, 512], 3, False, False],
[[32, 128], 5, True, True], # recognition
[[32, 128], 5, False, True],
],
)
def test_imgur5k_dataset(input_size, num_samples, rotate, recognition, mock_imgur5k):
ds = datasets.IMGUR5K(
*mock_imgur5k,
train=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
)
assert len(ds) == num_samples - 1 # -1 because of the test set 90 / 10 split
assert repr(ds) == f"IMGUR5K(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[32, 128], 3, True, False], # Actual set has 33402 training samples and 13068 test samples
[[32, 128], 3, False, False],
[[32, 128], 12, True, True], # recognition
[[32, 128], 12, False, True],
],
)
def test_svhn(input_size, num_samples, rotate, recognition, mock_svhn_dataset):
# monkeypatch the path to temporary dataset
datasets.SVHN.TRAIN = (mock_svhn_dataset, None, "svhn_train.tar")
ds = datasets.SVHN(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_svhn_dataset.split("/")[:-2]),
cache_subdir=mock_svhn_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"SVHN(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 3, True, False], # Actual set has 149 training samples and 50 test samples
[[512, 512], 3, False, False],
[[32, 128], 9, True, True], # recognition
[[32, 128], 9, False, True],
],
)
def test_funsd(input_size, num_samples, rotate, recognition, mock_funsd_dataset):
# monkeypatch the path to temporary dataset
datasets.FUNSD.URL = mock_funsd_dataset
datasets.FUNSD.SHA256 = None
datasets.FUNSD.FILE_NAME = "funsd.zip"
ds = datasets.FUNSD(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_funsd_dataset.split("/")[:-2]),
cache_subdir=mock_funsd_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"FUNSD(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 3, True, False], # Actual set has 800 training samples and 100 test samples
[[512, 512], 3, False, False],
[[32, 128], 9, True, True], # recognition
[[32, 128], 9, False, True],
],
)
def test_cord(input_size, num_samples, rotate, recognition, mock_cord_dataset):
# monkeypatch the path to temporary dataset
datasets.CORD.TRAIN = (mock_cord_dataset, None)
ds = datasets.CORD(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_cord_dataset.split("/")[:-2]),
cache_subdir=mock_cord_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"CORD(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 2, True, False], # Actual set has 772875 training samples and 85875 test samples
[[512, 512], 2, False, False],
[[32, 128], 10, True, True], # recognition
[[32, 128], 10, False, True],
],
)
def test_synthtext(input_size, num_samples, rotate, recognition, mock_synthtext_dataset):
# monkeypatch the path to temporary dataset
datasets.SynthText.URL = mock_synthtext_dataset
datasets.SynthText.SHA256 = None
ds = datasets.SynthText(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_synthtext_dataset.split("/")[:-2]),
cache_subdir=mock_synthtext_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"SynthText(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[32, 128], 1, True, False], # Actual set has 2000 training samples and 3000 test samples
[[32, 128], 1, False, False],
[[32, 128], 1, True, True], # recognition
[[32, 128], 1, False, True],
],
)
def test_iiit5k(input_size, num_samples, rotate, recognition, mock_iiit5k_dataset):
# monkeypatch the path to temporary dataset
datasets.IIIT5K.URL = mock_iiit5k_dataset
datasets.IIIT5K.SHA256 = None
ds = datasets.IIIT5K(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_iiit5k_dataset.split("/")[:-2]),
cache_subdir=mock_iiit5k_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"IIIT5K(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size, batch_size=1)
else:
_validate_dataset(ds, input_size, batch_size=1, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 3, True, False], # Actual set has 100 training samples and 249 test samples
[[512, 512], 3, False, False],
[[32, 128], 3, True, True], # recognition
[[32, 128], 3, False, True],
],
)
def test_svt(input_size, num_samples, rotate, recognition, mock_svt_dataset):
# monkeypatch the path to temporary dataset
datasets.SVT.URL = mock_svt_dataset
datasets.SVT.SHA256 = None
ds = datasets.SVT(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_svt_dataset.split("/")[:-2]),
cache_subdir=mock_svt_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"SVT(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 3, True, False], # Actual set has 246 training samples and 249 test samples
[[512, 512], 3, False, False],
[[32, 128], 3, True, True], # recognition
[[32, 128], 3, False, True],
],
)
def test_ic03(input_size, num_samples, rotate, recognition, mock_ic03_dataset):
# monkeypatch the path to temporary dataset
datasets.IC03.TRAIN = (mock_ic03_dataset, None, "ic03_train.zip")
ds = datasets.IC03(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_ic03_dataset.split("/")[:-2]),
cache_subdir=mock_ic03_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"IC03(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
# NOTE: following datasets are only for recognition task
def test_mjsynth_dataset(mock_mjsynth_dataset):
input_size = (32, 128)
ds = datasets.MJSynth(
*mock_mjsynth_dataset,
img_transforms=Resize(input_size, preserve_aspect_ratio=True),
)
assert len(ds) == 4 # Actual set has 7581382 train and 1337891 test samples
assert repr(ds) == f"MJSynth(train={True})"
_validate_dataset_recognition_part(ds, input_size)
|
import json
import os
import tempfile
import pytest
from doctr import models
from doctr.models.factory import _save_model_and_config_for_hf_hub, from_hub, push_to_hf_hub
def test_push_to_hf_hub():
model = models.classification.resnet18(pretrained=False)
with pytest.raises(ValueError):
# run_config and/or arch must be specified
push_to_hf_hub(model, model_name="test", task="classification")
with pytest.raises(ValueError):
# task must be one of classification, detection, recognition, obj_detection
push_to_hf_hub(model, model_name="test", task="invalid_task", arch="mobilenet_v3_small")
with pytest.raises(ValueError):
# arch not in available architectures for task
push_to_hf_hub(model, model_name="test", task="detection", arch="crnn_mobilenet_v3_large")
@pytest.mark.parametrize(
"arch_name, task_name, dummy_model_id",
[
["vgg16_bn_r", "classification", "Felix92/doctr-dummy-torch-vgg16-bn-r"],
["resnet18", "classification", "Felix92/doctr-dummy-torch-resnet18"],
["resnet31", "classification", "Felix92/doctr-dummy-torch-resnet31"],
["resnet34", "classification", "Felix92/doctr-dummy-torch-resnet34"],
["resnet34_wide", "classification", "Felix92/doctr-dummy-torch-resnet34-wide"],
["resnet50", "classification", "Felix92/doctr-dummy-torch-resnet50"],
["magc_resnet31", "classification", "Felix92/doctr-dummy-torch-magc-resnet31"],
["mobilenet_v3_small", "classification", "Felix92/doctr-dummy-torch-mobilenet-v3-small"],
["mobilenet_v3_large", "classification", "Felix92/doctr-dummy-torch-mobilenet-v3-large"],
["vit_b", "classification", "Felix92/doctr-dummy-torch-vit-b"],
["db_resnet34", "detection", "Felix92/doctr-dummy-torch-db-resnet34"],
["db_resnet50", "detection", "Felix92/doctr-dummy-torch-db-resnet50"],
["db_mobilenet_v3_large", "detection", "Felix92/doctr-dummy-torch-db-mobilenet-v3-large"],
["db_resnet50_rotation", "detection", "Felix92/doctr-dummy-torch-db-resnet50-rotation"],
["linknet_resnet18", "detection", "Felix92/doctr-dummy-torch-linknet-resnet18"],
["linknet_resnet34", "detection", "Felix92/doctr-dummy-torch-linknet-resnet34"],
["linknet_resnet50", "detection", "Felix92/doctr-dummy-torch-linknet-resnet50"],
["crnn_vgg16_bn", "recognition", "Felix92/doctr-dummy-torch-crnn-vgg16-bn"],
["crnn_mobilenet_v3_small", "recognition", "Felix92/doctr-dummy-torch-crnn-mobilenet-v3-small"],
["crnn_mobilenet_v3_large", "recognition", "Felix92/doctr-dummy-torch-crnn-mobilenet-v3-large"],
["sar_resnet31", "recognition", "Felix92/doctr-dummy-torch-sar-resnet31"],
["master", "recognition", "Felix92/doctr-dummy-torch-master"],
["vitstr_small", "recognition", "Felix92/doctr-dummy-torch-vitstr-small"],
[
"fasterrcnn_mobilenet_v3_large_fpn",
"obj_detection",
"Felix92/doctr-dummy-torch-fasterrcnn-mobilenet-v3-large-fpn",
],
],
)
def test_models_huggingface_hub(arch_name, task_name, dummy_model_id, tmpdir):
with tempfile.TemporaryDirectory() as tmp_dir:
model = models.__dict__[task_name].__dict__[arch_name](pretrained=True).eval()
_save_model_and_config_for_hf_hub(model, arch=arch_name, task=task_name, save_dir=tmp_dir)
assert hasattr(model, "cfg")
assert len(os.listdir(tmp_dir)) == 2
assert os.path.exists(tmp_dir + "/pytorch_model.bin")
assert os.path.exists(tmp_dir + "/config.json")
tmp_config = json.load(open(tmp_dir + "/config.json"))
assert arch_name == tmp_config["arch"]
assert task_name == tmp_config["task"]
assert all(key in model.cfg.keys() for key in tmp_config.keys())
# test from hub
hub_model = from_hub(repo_id=dummy_model_id)
assert isinstance(hub_model, type(model))
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from datetime import datetime
sys.path.insert(0, os.path.abspath("../.."))
import doctr
# -- Project information -----------------------------------------------------
master_doc = "index"
project = "docTR"
_copyright_str = f"-{datetime.now().year}" if datetime.now().year > 2021 else ""
copyright = f"2021{_copyright_str}, Mindee"
author = "François-Guillaume Fernandez, Charles Gaillard"
# The full version, including alpha/beta/rc tags
version = doctr.__version__
release = doctr.__version__ + "-git"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.autosectionlabel",
"sphinxemoji.sphinxemoji", # cf. https://sphinxemojicodes.readthedocs.io/en/stable/
"sphinx_copybutton",
"recommonmark",
"sphinx_markdown_tables",
"sphinx_tabs.tabs",
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"pypdfium2": ("https://pypdfium2.readthedocs.io/en/stable/", None),
}
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "notebooks/*.rst"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "friendly"
pygments_dark_style = "monokai"
highlight_language = "python3"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"sidebar_hide_name": True,
"navigation_with_keys": True,
"light_css_variables": {
"color-sidebar-background": "#082747",
"color-sidebar-background-border": "#082747",
"color-sidebar-caption-text": "white",
"color-sidebar-link-text--top-level": "white",
"color-sidebar-link-text": "white",
"sidebar-caption-font-size": "normal",
"color-sidebar-item-background--hover": " #5dade2",
},
"dark_css_variables": {
"color-sidebar-background": "#1a1c1e",
"color-sidebar-background-border": "#1a1c1e",
"color-sidebar-caption-text": "white",
"color-sidebar-link-text--top-level": "white",
},
}
html_logo = "_static/images/Logo-docTR-white.png"
html_favicon = "_static/images/favicon.ico"
html_title = "docTR documentation"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
def add_ga_javascript(app, pagename, templatename, context, doctree):
# Add googleanalytics id
# ref: https://github.com/orenhecht/googleanalytics/blob/master/sphinxcontrib/googleanalytics.py
metatags = context.get("metatags", "")
metatags += """
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id={0}"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){{dataLayer.push(arguments);}}
gtag('js', new Date());
gtag('config', '{0}');
</script>
""".format(
app.config.googleanalytics_id
)
context["metatags"] = metatags
def setup(app):
app.add_config_value("googleanalytics_id", "G-40DVRMX8T4", "html")
app.add_css_file("css/mindee.css")
app.add_js_file("js/custom.js")
app.connect("html-page-context", add_ga_javascript)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
__version__ = '0.6.1a0'
|
from . import io, datasets, models, transforms, utils
from .file_utils import is_tf_available, is_torch_available
from .version import __version__ # noqa: F401
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
# Adapted from https://github.com/huggingface/transformers/blob/master/src/transformers/file_utils.py
import importlib.util
import logging
import os
import sys
CLASS_NAME: str = "words"
if sys.version_info < (3, 8):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__all__ = ["is_tf_available", "is_torch_available", "CLASS_NAME", "copy_tensor"]
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
_torch_available = importlib.util.find_spec("torch") is not None
if _torch_available:
try:
_torch_version = importlib_metadata.version("torch")
logging.info(f"PyTorch version {_torch_version} available.")
except importlib_metadata.PackageNotFoundError:
_torch_available = False
else:
logging.info("Disabling PyTorch because USE_TF is set")
_torch_available = False
if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
_tf_available = importlib.util.find_spec("tensorflow") is not None
if _tf_available:
candidates = (
"tensorflow",
"tensorflow-cpu",
"tensorflow-gpu",
"tf-nightly",
"tf-nightly-cpu",
"tf-nightly-gpu",
"intel-tensorflow",
"tensorflow-rocm",
"tensorflow-macos",
)
_tf_version = None
# For the metadata, we have to look for both tensorflow and tensorflow-cpu
for pkg in candidates:
try:
_tf_version = importlib_metadata.version(pkg)
break
except importlib_metadata.PackageNotFoundError:
pass
_tf_available = _tf_version is not None
if _tf_available:
if int(_tf_version.split(".")[0]) < 2: # type: ignore[union-attr]
logging.info(f"TensorFlow found but with version {_tf_version}. DocTR requires version 2 minimum.")
_tf_available = False
else:
logging.info(f"TensorFlow version {_tf_version} available.")
else:
logging.info("Disabling Tensorflow because USE_TORCH is set")
_tf_available = False
if not _torch_available and not _tf_available:
raise ModuleNotFoundError(
"DocTR requires either TensorFlow or PyTorch to be installed. Please ensure one of them"
" is installed and that either USE_TF or USE_TORCH is enabled."
)
def is_torch_available():
return _torch_available
def is_tf_available():
return _tf_available
def copy_tensor(x):
if is_tf_available():
import tensorflow as tf
return tf.identity(x)
elif is_torch_available():
return x.detach().clone()
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import json
import os
from pathlib import Path
from typing import Any, List, Tuple
from .datasets import AbstractDataset
__all__ = ["RecognitionDataset"]
class RecognitionDataset(AbstractDataset):
"""Dataset implementation for text recognition tasks
>>> from doctr.datasets import RecognitionDataset
>>> train_set = RecognitionDataset(img_folder="/path/to/images",
>>> labels_path="/path/to/labels.json")
>>> img, target = train_set[0]
Args:
img_folder: path to the images folder
labels_path: pathe to the json file containing all labels (character sequences)
**kwargs: keyword arguments from `AbstractDataset`.
"""
def __init__(
self,
img_folder: str,
labels_path: str,
**kwargs: Any,
) -> None:
super().__init__(img_folder, **kwargs)
self.data: List[Tuple[str, str]] = []
with open(labels_path) as f:
labels = json.load(f)
for img_name, label in labels.items():
if not os.path.exists(os.path.join(self.root, img_name)):
raise FileNotFoundError(f"unable to locate {os.path.join(self.root, img_name)}")
self.data.append((img_name, label))
def merge_dataset(self, ds: AbstractDataset) -> None:
# Update data with new root for self
self.data = [(str(Path(self.root).joinpath(img_path)), label) for img_path, label in self.data]
# Define new root
self.root = Path("/")
# Merge with ds data
for img_path, label in ds.data:
self.data.append((str(Path(ds.root).joinpath(img_path)), label))
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import glob
import os
from typing import Any, Dict, List, Tuple, Union
import numpy as np
from PIL import Image
from scipy import io as sio
from tqdm import tqdm
from .datasets import VisionDataset
from .utils import convert_target_to_relative, crop_bboxes_from_image
__all__ = ["SynthText"]
class SynthText(VisionDataset):
"""SynthText dataset from `"Synthetic Data for Text Localisation in Natural Images"
<https://arxiv.org/abs/1604.06646>`_ | `"repository" <https://github.com/ankush-me/SynthText>`_ |
`"website" <https://www.robots.ox.ac.uk/~vgg/data/scenetext/>`_.
.. image:: https://doctr-static.mindee.com/models?id=v0.5.0/svt-grid.png&src=0
:align: center
>>> from doctr.datasets import SynthText
>>> train_set = SynthText(train=True, download=True)
>>> img, target = train_set[0]
Args:
train: whether the subset should be the training one
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
recognition_task: whether the dataset should be used for recognition task
**kwargs: keyword arguments from `VisionDataset`.
"""
URL = "https://thor.robots.ox.ac.uk/~vgg/data/scenetext/SynthText.zip"
SHA256 = "28ab030485ec8df3ed612c568dd71fb2793b9afbfa3a9d9c6e792aef33265bf1"
def __init__(
self,
train: bool = True,
use_polygons: bool = False,
recognition_task: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
self.URL,
None,
file_hash=None,
extract_archive=True,
pre_transforms=convert_target_to_relative if not recognition_task else None,
**kwargs,
)
self.train = train
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
np_dtype = np.float32
# Load mat data
tmp_root = os.path.join(self.root, "SynthText") if self.SHA256 else self.root
# define folder to write SynthText recognition dataset
reco_folder_name = "SynthText_recognition_train" if self.train else "SynthText_recognition_test"
reco_folder_name = "Poly_" + reco_folder_name if use_polygons else reco_folder_name
reco_folder_path = os.path.join(tmp_root, reco_folder_name)
reco_images_counter = 0
if recognition_task and os.path.isdir(reco_folder_path):
self._read_from_folder(reco_folder_path)
return
elif recognition_task and not os.path.isdir(reco_folder_path):
os.makedirs(reco_folder_path, exist_ok=False)
mat_data = sio.loadmat(os.path.join(tmp_root, "gt.mat"))
train_samples = int(len(mat_data["imnames"][0]) * 0.9)
set_slice = slice(train_samples) if self.train else slice(train_samples, None)
paths = mat_data["imnames"][0][set_slice]
boxes = mat_data["wordBB"][0][set_slice]
labels = mat_data["txt"][0][set_slice]
del mat_data
for img_path, word_boxes, txt in tqdm(
iterable=zip(paths, boxes, labels), desc="Unpacking SynthText", total=len(paths)
):
# File existence check
if not os.path.exists(os.path.join(tmp_root, img_path[0])):
raise FileNotFoundError(f"unable to locate {os.path.join(tmp_root, img_path[0])}")
labels = [elt for word in txt.tolist() for elt in word.split()]
# (x, y) coordinates of top left, top right, bottom right, bottom left corners
word_boxes = (
word_boxes.transpose(2, 1, 0)
if word_boxes.ndim == 3
else np.expand_dims(word_boxes.transpose(1, 0), axis=0)
)
if not use_polygons:
# xmin, ymin, xmax, ymax
word_boxes = np.concatenate((word_boxes.min(axis=1), word_boxes.max(axis=1)), axis=1)
if recognition_task:
crops = crop_bboxes_from_image(img_path=os.path.join(tmp_root, img_path[0]), geoms=word_boxes)
for crop, label in zip(crops, labels):
if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
# write data to disk
with open(os.path.join(reco_folder_path, f"{reco_images_counter}.txt"), "w") as f:
f.write(label)
tmp_img = Image.fromarray(crop)
tmp_img.save(os.path.join(reco_folder_path, f"{reco_images_counter}.png"))
reco_images_counter += 1
else:
self.data.append((img_path[0], dict(boxes=np.asarray(word_boxes, dtype=np_dtype), labels=labels)))
if recognition_task:
self._read_from_folder(reco_folder_path)
self.root = tmp_root
def extra_repr(self) -> str:
return f"train={self.train}"
def _read_from_folder(self, path: str) -> None:
for img_path in glob.glob(os.path.join(path, "*.png")):
with open(os.path.join(path, f"{os.path.basename(img_path)[:-4]}.txt"), "r") as f:
self.data.append((img_path, f.read()))
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import json
import os
from typing import Any, Dict, List, Tuple, Type, Union
import numpy as np
from doctr.file_utils import CLASS_NAME
from .datasets import AbstractDataset
from .utils import pre_transform_multiclass
__all__ = ["DetectionDataset"]
class DetectionDataset(AbstractDataset):
"""Implements a text detection dataset
>>> from doctr.datasets import DetectionDataset
>>> train_set = DetectionDataset(img_folder="/path/to/images",
>>> label_path="/path/to/labels.json")
>>> img, target = train_set[0]
Args:
img_folder: folder with all the images of the dataset
label_path: path to the annotations of each image
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
**kwargs: keyword arguments from `AbstractDataset`.
"""
def __init__(
self,
img_folder: str,
label_path: str,
use_polygons: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
img_folder,
pre_transforms=pre_transform_multiclass,
**kwargs,
)
# File existence check
self._class_names: List = []
if not os.path.exists(label_path):
raise FileNotFoundError(f"unable to locate {label_path}")
with open(label_path, "rb") as f:
labels = json.load(f)
self.data: List[Tuple[str, Tuple[np.ndarray, List[str]]]] = []
np_dtype = np.float32
for img_name, label in labels.items():
# File existence check
if not os.path.exists(os.path.join(self.root, img_name)):
raise FileNotFoundError(f"unable to locate {os.path.join(self.root, img_name)}")
geoms, polygons_classes = self.format_polygons(label["polygons"], use_polygons, np_dtype)
self.data.append((img_name, (np.asarray(geoms, dtype=np_dtype), polygons_classes)))
def format_polygons(
self, polygons: Union[List, Dict], use_polygons: bool, np_dtype: Type
) -> Tuple[np.ndarray, List[str]]:
"""format polygons into an array
Args:
polygons: the bounding boxes
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
np_dtype: dtype of array
Returns:
geoms: bounding boxes as np array
polygons_classes: list of classes for each bounding box
"""
if isinstance(polygons, list):
self._class_names += [CLASS_NAME]
polygons_classes = [CLASS_NAME for _ in polygons]
_polygons: np.ndarray = np.asarray(polygons, dtype=np_dtype)
elif isinstance(polygons, dict):
self._class_names += list(polygons.keys())
polygons_classes = [k for k, v in polygons.items() for _ in v]
_polygons = np.concatenate([np.asarray(poly, dtype=np_dtype) for poly in polygons.values() if poly], axis=0)
else:
raise TypeError(f"polygons should be a dictionary or list, it was {type(polygons)}")
geoms = _polygons if use_polygons else np.concatenate((_polygons.min(axis=1), _polygons.max(axis=1)), axis=1)
return geoms, polygons_classes
@property
def class_names(self):
return sorted(list(set(self._class_names)))
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import json
import os
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union
import numpy as np
from tqdm import tqdm
from .datasets import VisionDataset
from .utils import convert_target_to_relative, crop_bboxes_from_image
__all__ = ["CORD"]
class CORD(VisionDataset):
"""CORD dataset from `"CORD: A Consolidated Receipt Dataset forPost-OCR Parsing"
<https://openreview.net/pdf?id=SJl3z659UH>`_.
.. image:: https://doctr-static.mindee.com/models?id=v0.5.0/cord-grid.png&src=0
:align: center
>>> from doctr.datasets import CORD
>>> train_set = CORD(train=True, download=True)
>>> img, target = train_set[0]
Args:
train: whether the subset should be the training one
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
recognition_task: whether the dataset should be used for recognition task
**kwargs: keyword arguments from `VisionDataset`.
"""
TRAIN = (
"https://doctr-static.mindee.com/models?id=v0.1.1/cord_train.zip&src=0",
"45f9dc77f126490f3e52d7cb4f70ef3c57e649ea86d19d862a2757c9c455d7f8",
)
TEST = (
"https://doctr-static.mindee.com/models?id=v0.1.1/cord_test.zip&src=0",
"8c895e3d6f7e1161c5b7245e3723ce15c04d84be89eaa6093949b75a66fb3c58",
)
def __init__(
self,
train: bool = True,
use_polygons: bool = False,
recognition_task: bool = False,
**kwargs: Any,
) -> None:
url, sha256 = self.TRAIN if train else self.TEST
super().__init__(
url,
None,
sha256,
True,
pre_transforms=convert_target_to_relative if not recognition_task else None,
**kwargs,
)
# List images
tmp_root = os.path.join(self.root, "image")
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
self.train = train
np_dtype = np.float32
for img_path in tqdm(iterable=os.listdir(tmp_root), desc="Unpacking CORD", total=len(os.listdir(tmp_root))):
# File existence check
if not os.path.exists(os.path.join(tmp_root, img_path)):
raise FileNotFoundError(f"unable to locate {os.path.join(tmp_root, img_path)}")
stem = Path(img_path).stem
_targets = []
with open(os.path.join(self.root, "json", f"{stem}.json"), "rb") as f:
label = json.load(f)
for line in label["valid_line"]:
for word in line["words"]:
if len(word["text"]) > 0:
x = word["quad"]["x1"], word["quad"]["x2"], word["quad"]["x3"], word["quad"]["x4"]
y = word["quad"]["y1"], word["quad"]["y2"], word["quad"]["y3"], word["quad"]["y4"]
box: Union[List[float], np.ndarray]
if use_polygons:
# (x, y) coordinates of top left, top right, bottom right, bottom left corners
box = np.array(
[
[x[0], y[0]],
[x[1], y[1]],
[x[2], y[2]],
[x[3], y[3]],
],
dtype=np_dtype,
)
else:
# Reduce 8 coords to 4 -> xmin, ymin, xmax, ymax
box = [min(x), min(y), max(x), max(y)]
_targets.append((word["text"], box))
text_targets, box_targets = zip(*_targets)
if recognition_task:
crops = crop_bboxes_from_image(
img_path=os.path.join(tmp_root, img_path), geoms=np.asarray(box_targets, dtype=int).clip(min=0)
)
for crop, label in zip(crops, list(text_targets)):
self.data.append((crop, label))
else:
self.data.append(
(img_path, dict(boxes=np.asarray(box_targets, dtype=int).clip(min=0), labels=list(text_targets)))
)
self.root = tmp_root
def extra_repr(self) -> str:
return f"train={self.train}"
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
from typing import Any, List, Tuple
from tqdm import tqdm
from .datasets import AbstractDataset
__all__ = ["MJSynth"]
class MJSynth(AbstractDataset):
"""MJSynth dataset from `"Synthetic Data and Artificial Neural Networks for Natural Scene Text Recognition"
<https://www.robots.ox.ac.uk/~vgg/data/text/>`_.
>>> # NOTE: This is a pure recognition dataset without bounding box labels.
>>> # NOTE: You need to download the dataset.
>>> from doctr.datasets import MJSynth
>>> train_set = MJSynth(img_folder="/path/to/mjsynth/mnt/ramdisk/max/90kDICT32px",
>>> label_path="/path/to/mjsynth/mnt/ramdisk/max/90kDICT32px/imlist.txt",
>>> train=True)
>>> img, target = train_set[0]
>>> test_set = MJSynth(img_folder="/path/to/mjsynth/mnt/ramdisk/max/90kDICT32px",
>>> label_path="/path/to/mjsynth/mnt/ramdisk/max/90kDICT32px/imlist.txt")
>>> train=False)
>>> img, target = test_set[0]
Args:
img_folder: folder with all the images of the dataset
label_path: path to the file with the labels
train: whether the subset should be the training one
**kwargs: keyword arguments from `AbstractDataset`.
"""
# filter corrupted or missing images
BLACKLIST = [
"./1881/4/225_Marbling_46673.jpg\n",
"./2069/4/192_whittier_86389.jpg\n",
"./869/4/234_TRIASSIC_80582.jpg\n",
"./173/2/358_BURROWING_10395.jpg\n",
"./913/4/231_randoms_62372.jpg\n",
"./596/2/372_Ump_81662.jpg\n",
"./936/2/375_LOCALITIES_44992.jpg\n",
"./2540/4/246_SQUAMOUS_73902.jpg\n",
"./1332/4/224_TETHERED_78397.jpg\n",
"./627/6/83_PATRIARCHATE_55931.jpg\n",
"./2013/2/370_refract_63890.jpg\n",
"./2911/6/77_heretical_35885.jpg\n",
"./1730/2/361_HEREON_35880.jpg\n",
"./2194/2/334_EFFLORESCENT_24742.jpg\n",
"./2025/2/364_SNORTERS_72304.jpg\n",
"./368/4/232_friar_30876.jpg\n",
"./275/6/96_hackle_34465.jpg\n",
"./384/4/220_bolts_8596.jpg\n",
"./905/4/234_Postscripts_59142.jpg\n",
"./2749/6/101_Chided_13155.jpg\n",
"./495/6/81_MIDYEAR_48332.jpg\n",
"./2852/6/60_TOILSOME_79481.jpg\n",
"./554/2/366_Teleconferences_77948.jpg\n",
"./1696/4/211_Queened_61779.jpg\n",
"./2128/2/369_REDACTED_63458.jpg\n",
"./2557/2/351_DOWN_23492.jpg\n",
"./2489/4/221_snored_72290.jpg\n",
"./1650/2/355_stony_74902.jpg\n",
"./1863/4/223_Diligently_21672.jpg\n",
"./264/2/362_FORETASTE_30276.jpg\n",
"./429/4/208_Mainmasts_46140.jpg\n",
"./1817/2/363_actuating_904.jpg\n",
]
def __init__(
self,
img_folder: str,
label_path: str,
train: bool = True,
**kwargs: Any,
) -> None:
super().__init__(img_folder, **kwargs)
# File existence check
if not os.path.exists(label_path) or not os.path.exists(img_folder):
raise FileNotFoundError(f"unable to locate {label_path if not os.path.exists(label_path) else img_folder}")
self.data: List[Tuple[str, str]] = []
self.train = train
with open(label_path) as f:
img_paths = f.readlines()
train_samples = int(len(img_paths) * 0.9)
set_slice = slice(train_samples) if self.train else slice(train_samples, None)
for path in tqdm(iterable=img_paths[set_slice], desc="Unpacking MJSynth", total=len(img_paths[set_slice])):
if path not in self.BLACKLIST:
label = path.split("_")[1]
img_path = os.path.join(img_folder, path[2:]).strip()
self.data.append((img_path, label))
def extra_repr(self) -> str:
return f"train={self.train}"
|
from doctr.file_utils import is_tf_available
from .generator import *
from .cord import *
from .detection import *
from .doc_artefacts import *
from .funsd import *
from .ic03 import *
from .ic13 import *
from .iiit5k import *
from .imgur5k import *
from .mjsynth import *
from .ocr import *
from .recognition import *
from .sroie import *
from .svhn import *
from .svt import *
from .synthtext import *
from .utils import *
from .vocabs import *
if is_tf_available():
from .loader import *
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import json
import os
from typing import Any, Dict, List, Tuple
import numpy as np
from .datasets import VisionDataset
__all__ = ["DocArtefacts"]
class DocArtefacts(VisionDataset):
"""Object detection dataset for non-textual elements in documents.
The dataset includes a variety of synthetic document pages with non-textual elements.
.. image:: https://doctr-static.mindee.com/models?id=v0.5.0/artefacts-grid.png&src=0
:align: center
>>> from doctr.datasets import DocArtefacts
>>> train_set = DocArtefacts(train=True, download=True)
>>> img, target = train_set[0]
Args:
train: whether the subset should be the training one
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
**kwargs: keyword arguments from `VisionDataset`.
"""
URL = "https://doctr-static.mindee.com/models?id=v0.4.0/artefact_detection-13fab8ce.zip&src=0"
SHA256 = "13fab8ced7f84583d9dccd0c634f046c3417e62a11fe1dea6efbbaba5052471b"
CLASSES = ["background", "qr_code", "bar_code", "logo", "photo"]
def __init__(
self,
train: bool = True,
use_polygons: bool = False,
**kwargs: Any,
) -> None:
super().__init__(self.URL, None, self.SHA256, True, **kwargs)
self.train = train
# Update root
self.root = os.path.join(self.root, "train" if train else "val")
# List images
tmp_root = os.path.join(self.root, "images")
with open(os.path.join(self.root, "labels.json"), "rb") as f:
labels = json.load(f)
self.data: List[Tuple[str, Dict[str, Any]]] = []
img_list = os.listdir(tmp_root)
if len(labels) != len(img_list):
raise AssertionError("the number of images and labels do not match")
np_dtype = np.float32
for img_name, label in labels.items():
# File existence check
if not os.path.exists(os.path.join(tmp_root, img_name)):
raise FileNotFoundError(f"unable to locate {os.path.join(tmp_root, img_name)}")
# xmin, ymin, xmax, ymax
boxes: np.ndarray = np.asarray([obj["geometry"] for obj in label], dtype=np_dtype)
classes: np.ndarray = np.asarray([self.CLASSES.index(obj["label"]) for obj in label], dtype=np.int64)
if use_polygons:
# (x, y) coordinates of top left, top right, bottom right, bottom left corners
boxes = np.stack(
[
np.stack([boxes[:, 0], boxes[:, 1]], axis=-1),
np.stack([boxes[:, 2], boxes[:, 1]], axis=-1),
np.stack([boxes[:, 2], boxes[:, 3]], axis=-1),
np.stack([boxes[:, 0], boxes[:, 3]], axis=-1),
],
axis=1,
)
self.data.append((img_name, dict(boxes=boxes, labels=classes)))
self.root = tmp_root
def extra_repr(self) -> str:
return f"train={self.train}"
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
from typing import Any, Dict, List, Tuple, Union
import h5py
import numpy as np
from tqdm import tqdm
from .datasets import VisionDataset
from .utils import convert_target_to_relative, crop_bboxes_from_image
__all__ = ["SVHN"]
class SVHN(VisionDataset):
"""SVHN dataset from `"The Street View House Numbers (SVHN) Dataset"
<http://ufldl.stanford.edu/housenumbers/>`_.
.. image:: https://doctr-static.mindee.com/models?id=v0.5.0/svhn-grid.png&src=0
:align: center
>>> from doctr.datasets import SVHN
>>> train_set = SVHN(train=True, download=True)
>>> img, target = train_set[0]
Args:
train: whether the subset should be the training one
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
recognition_task: whether the dataset should be used for recognition task
**kwargs: keyword arguments from `VisionDataset`.
"""
TRAIN = (
"http://ufldl.stanford.edu/housenumbers/train.tar.gz",
"4b17bb33b6cd8f963493168f80143da956f28ec406cc12f8e5745a9f91a51898",
"svhn_train.tar",
)
TEST = (
"http://ufldl.stanford.edu/housenumbers/test.tar.gz",
"57ac9ceb530e4aa85b55d991be8fc49c695b3d71c6f6a88afea86549efde7fb5",
"svhn_test.tar",
)
def __init__(
self,
train: bool = True,
use_polygons: bool = False,
recognition_task: bool = False,
**kwargs: Any,
) -> None:
url, sha256, name = self.TRAIN if train else self.TEST
super().__init__(
url,
file_name=name,
file_hash=sha256,
extract_archive=True,
pre_transforms=convert_target_to_relative if not recognition_task else None,
**kwargs,
)
self.train = train
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
np_dtype = np.float32
tmp_root = os.path.join(self.root, "train" if train else "test")
# Load mat data (matlab v7.3 - can not be loaded with scipy)
with h5py.File(os.path.join(tmp_root, "digitStruct.mat"), "r") as f:
img_refs = f["digitStruct/name"]
box_refs = f["digitStruct/bbox"]
for img_ref, box_ref in tqdm(iterable=zip(img_refs, box_refs), desc="Unpacking SVHN", total=len(img_refs)):
# convert ascii matrix to string
img_name = "".join(map(chr, f[img_ref[0]][()].flatten()))
# File existence check
if not os.path.exists(os.path.join(tmp_root, img_name)):
raise FileNotFoundError(f"unable to locate {os.path.join(tmp_root, img_name)}")
# Unpack the information
box = f[box_ref[0]]
if box["left"].shape[0] == 1:
box_dict = {k: [int(vals[0][0])] for k, vals in box.items()}
else:
box_dict = {k: [int(f[v[0]][()].item()) for v in vals] for k, vals in box.items()}
# Convert it to the right format
coords: np.ndarray = np.array(
[box_dict["left"], box_dict["top"], box_dict["width"], box_dict["height"]], dtype=np_dtype
).transpose()
label_targets = list(map(str, box_dict["label"]))
if use_polygons:
# (x, y) coordinates of top left, top right, bottom right, bottom left corners
box_targets: np.ndarray = np.stack(
[
np.stack([coords[:, 0], coords[:, 1]], axis=-1),
np.stack([coords[:, 0] + coords[:, 2], coords[:, 1]], axis=-1),
np.stack([coords[:, 0] + coords[:, 2], coords[:, 1] + coords[:, 3]], axis=-1),
np.stack([coords[:, 0], coords[:, 1] + coords[:, 3]], axis=-1),
],
axis=1,
)
else:
# x, y, width, height -> xmin, ymin, xmax, ymax
box_targets = np.stack(
[
coords[:, 0],
coords[:, 1],
coords[:, 0] + coords[:, 2],
coords[:, 1] + coords[:, 3],
],
axis=-1,
)
if recognition_task:
crops = crop_bboxes_from_image(img_path=os.path.join(tmp_root, img_name), geoms=box_targets)
for crop, label in zip(crops, label_targets):
if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
self.data.append((crop, label))
else:
self.data.append((img_name, dict(boxes=box_targets, labels=label_targets)))
self.root = tmp_root
def extra_repr(self) -> str:
return f"train={self.train}"
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
from typing import Any, Dict, List, Tuple, Union
import numpy as np
import scipy.io as sio
from tqdm import tqdm
from .datasets import VisionDataset
from .utils import convert_target_to_relative
__all__ = ["IIIT5K"]
class IIIT5K(VisionDataset):
"""IIIT-5K character-level localization dataset from
`"BMVC 2012 Scene Text Recognition using Higher Order Language Priors"
<https://cdn.iiit.ac.in/cdn/cvit.iiit.ac.in/images/Projects/SceneTextUnderstanding/home/mishraBMVC12.pdf>`_.
.. image:: https://doctr-static.mindee.com/models?id=v0.5.0/iiit5k-grid.png&src=0
:align: center
>>> # NOTE: this dataset is for character-level localization
>>> from doctr.datasets import IIIT5K
>>> train_set = IIIT5K(train=True, download=True)
>>> img, target = train_set[0]
Args:
train: whether the subset should be the training one
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
recognition_task: whether the dataset should be used for recognition task
**kwargs: keyword arguments from `VisionDataset`.
"""
URL = "https://cvit.iiit.ac.in/images/Projects/SceneTextUnderstanding/IIIT5K-Word_V3.0.tar.gz"
SHA256 = "7872c9efbec457eb23f3368855e7738f72ce10927f52a382deb4966ca0ffa38e"
def __init__(
self,
train: bool = True,
use_polygons: bool = False,
recognition_task: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
self.URL,
None,
file_hash=self.SHA256,
extract_archive=True,
pre_transforms=convert_target_to_relative if not recognition_task else None,
**kwargs,
)
self.train = train
# Load mat data
tmp_root = os.path.join(self.root, "IIIT5K") if self.SHA256 else self.root
mat_file = "trainCharBound" if self.train else "testCharBound"
mat_data = sio.loadmat(os.path.join(tmp_root, f"{mat_file}.mat"))[mat_file][0]
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
np_dtype = np.float32
for img_path, label, box_targets in tqdm(iterable=mat_data, desc="Unpacking IIIT5K", total=len(mat_data)):
_raw_path = img_path[0]
_raw_label = label[0]
# File existence check
if not os.path.exists(os.path.join(tmp_root, _raw_path)):
raise FileNotFoundError(f"unable to locate {os.path.join(tmp_root, _raw_path)}")
if recognition_task:
self.data.append((_raw_path, _raw_label))
else:
if use_polygons:
# (x, y) coordinates of top left, top right, bottom right, bottom left corners
box_targets = [
[
[box[0], box[1]],
[box[0] + box[2], box[1]],
[box[0] + box[2], box[1] + box[3]],
[box[0], box[1] + box[3]],
]
for box in box_targets
]
else:
# xmin, ymin, xmax, ymax
box_targets = [[box[0], box[1], box[0] + box[2], box[1] + box[3]] for box in box_targets]
# label are casted to list where each char corresponds to the character's bounding box
self.data.append(
(_raw_path, dict(boxes=np.asarray(box_targets, dtype=np_dtype), labels=list(_raw_label)))
)
self.root = tmp_root
def extra_repr(self) -> str:
return f"train={self.train}"
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import glob
import json
import os
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union
import cv2
import numpy as np
from PIL import Image
from tqdm import tqdm
from .datasets import AbstractDataset
from .utils import convert_target_to_relative, crop_bboxes_from_image
__all__ = ["IMGUR5K"]
class IMGUR5K(AbstractDataset):
"""IMGUR5K dataset from `"TextStyleBrush: Transfer of Text Aesthetics from a Single Example"
<https://arxiv.org/abs/2106.08385>`_ |
`repository <https://github.com/facebookresearch/IMGUR5K-Handwriting-Dataset>`_.
.. image:: https://doctr-static.mindee.com/models?id=v0.5.0/imgur5k-grid.png&src=0
:align: center
:width: 630
:height: 400
>>> # NOTE: You need to download/generate the dataset from the repository.
>>> from doctr.datasets import IMGUR5K
>>> train_set = IMGUR5K(train=True, img_folder="/path/to/IMGUR5K-Handwriting-Dataset/images",
>>> label_path="/path/to/IMGUR5K-Handwriting-Dataset/dataset_info/imgur5k_annotations.json")
>>> img, target = train_set[0]
>>> test_set = IMGUR5K(train=False, img_folder="/path/to/IMGUR5K-Handwriting-Dataset/images",
>>> label_path="/path/to/IMGUR5K-Handwriting-Dataset/dataset_info/imgur5k_annotations.json")
>>> img, target = test_set[0]
Args:
img_folder: folder with all the images of the dataset
label_path: path to the annotations file of the dataset
train: whether the subset should be the training one
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
recognition_task: whether the dataset should be used for recognition task
**kwargs: keyword arguments from `AbstractDataset`.
"""
def __init__(
self,
img_folder: str,
label_path: str,
train: bool = True,
use_polygons: bool = False,
recognition_task: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
img_folder, pre_transforms=convert_target_to_relative if not recognition_task else None, **kwargs
)
# File existence check
if not os.path.exists(label_path) or not os.path.exists(img_folder):
raise FileNotFoundError(f"unable to locate {label_path if not os.path.exists(label_path) else img_folder}")
self.data: List[Tuple[Union[str, Path, np.ndarray], Union[str, Dict[str, Any]]]] = []
self.train = train
np_dtype = np.float32
img_names = os.listdir(img_folder)
train_samples = int(len(img_names) * 0.9)
set_slice = slice(train_samples) if self.train else slice(train_samples, None)
# define folder to write IMGUR5K recognition dataset
reco_folder_name = "IMGUR5K_recognition_train" if self.train else "IMGUR5K_recognition_test"
reco_folder_name = "Poly_" + reco_folder_name if use_polygons else reco_folder_name
reco_folder_path = os.path.join(os.path.dirname(self.root), reco_folder_name)
reco_images_counter = 0
if recognition_task and os.path.isdir(reco_folder_path):
self._read_from_folder(reco_folder_path)
return
elif recognition_task and not os.path.isdir(reco_folder_path):
os.makedirs(reco_folder_path, exist_ok=False)
with open(label_path) as f:
annotation_file = json.load(f)
for img_name in tqdm(iterable=img_names[set_slice], desc="Unpacking IMGUR5K", total=len(img_names[set_slice])):
img_path = Path(img_folder, img_name)
img_id = img_name.split(".")[0]
# File existence check
if not os.path.exists(os.path.join(self.root, img_name)):
raise FileNotFoundError(f"unable to locate {os.path.join(self.root, img_name)}")
# some files have no annotations which are marked with only a dot in the 'word' key
# ref: https://github.com/facebookresearch/IMGUR5K-Handwriting-Dataset/blob/main/README.md
if img_id not in annotation_file["index_to_ann_map"].keys():
continue
ann_ids = annotation_file["index_to_ann_map"][img_id]
annotations = [annotation_file["ann_id"][a_id] for a_id in ann_ids]
labels = [ann["word"] for ann in annotations if ann["word"] != "."]
# x_center, y_center, width, height, angle
_boxes = [
list(map(float, ann["bounding_box"].strip("[ ]").split(", ")))
for ann in annotations
if ann["word"] != "."
]
# (x, y) coordinates of top left, top right, bottom right, bottom left corners
box_targets = [cv2.boxPoints(((box[0], box[1]), (box[2], box[3]), box[4])) for box in _boxes]
if not use_polygons:
# xmin, ymin, xmax, ymax
box_targets = [np.concatenate((points.min(0), points.max(0)), axis=-1) for points in box_targets]
# filter images without boxes
if len(box_targets) > 0:
if recognition_task:
crops = crop_bboxes_from_image(
img_path=os.path.join(self.root, img_name), geoms=np.asarray(box_targets, dtype=np_dtype)
)
for crop, label in zip(crops, labels):
if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
# write data to disk
with open(os.path.join(reco_folder_path, f"{reco_images_counter}.txt"), "w") as f:
f.write(label)
tmp_img = Image.fromarray(crop)
tmp_img.save(os.path.join(reco_folder_path, f"{reco_images_counter}.png"))
reco_images_counter += 1
else:
self.data.append((img_path, dict(boxes=np.asarray(box_targets, dtype=np_dtype), labels=labels)))
if recognition_task:
self._read_from_folder(reco_folder_path)
def extra_repr(self) -> str:
return f"train={self.train}"
def _read_from_folder(self, path: str) -> None:
for img_path in glob.glob(os.path.join(path, "*.png")):
with open(os.path.join(path, f"{os.path.basename(img_path)[:-4]}.txt"), "r") as f:
self.data.append((img_path, f.read()))
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import csv
import os
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union
import numpy as np
from tqdm import tqdm
from .datasets import AbstractDataset
from .utils import convert_target_to_relative, crop_bboxes_from_image
__all__ = ["IC13"]
class IC13(AbstractDataset):
"""IC13 dataset from `"ICDAR 2013 Robust Reading Competition" <https://rrc.cvc.uab.es/>`_.
.. image:: https://doctr-static.mindee.com/models?id=v0.5.0/ic13-grid.png&src=0
:align: center
>>> # NOTE: You need to download both image and label parts from Focused Scene Text challenge Task2.1 2013-2015.
>>> from doctr.datasets import IC13
>>> train_set = IC13(img_folder="/path/to/Challenge2_Training_Task12_Images",
>>> label_folder="/path/to/Challenge2_Training_Task1_GT")
>>> img, target = train_set[0]
>>> test_set = IC13(img_folder="/path/to/Challenge2_Test_Task12_Images",
>>> label_folder="/path/to/Challenge2_Test_Task1_GT")
>>> img, target = test_set[0]
Args:
img_folder: folder with all the images of the dataset
label_folder: folder with all annotation files for the images
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
recognition_task: whether the dataset should be used for recognition task
**kwargs: keyword arguments from `AbstractDataset`.
"""
def __init__(
self,
img_folder: str,
label_folder: str,
use_polygons: bool = False,
recognition_task: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
img_folder, pre_transforms=convert_target_to_relative if not recognition_task else None, **kwargs
)
# File existence check
if not os.path.exists(label_folder) or not os.path.exists(img_folder):
raise FileNotFoundError(
f"unable to locate {label_folder if not os.path.exists(label_folder) else img_folder}"
)
self.data: List[Tuple[Union[Path, np.ndarray], Union[str, Dict[str, Any]]]] = []
np_dtype = np.float32
img_names = os.listdir(img_folder)
for img_name in tqdm(iterable=img_names, desc="Unpacking IC13", total=len(img_names)):
img_path = Path(img_folder, img_name)
label_path = Path(label_folder, "gt_" + Path(img_name).stem + ".txt")
with open(label_path, newline="\n") as f:
_lines = [
[val[:-1] if val.endswith(",") else val for val in row]
for row in csv.reader(f, delimiter=" ", quotechar="'")
]
labels = [line[-1].replace('"', "") for line in _lines]
# xmin, ymin, xmax, ymax
box_targets: np.ndarray = np.array([list(map(int, line[:4])) for line in _lines], dtype=np_dtype)
if use_polygons:
# (x, y) coordinates of top left, top right, bottom right, bottom left corners
box_targets = np.array(
[
[
[coords[0], coords[1]],
[coords[2], coords[1]],
[coords[2], coords[3]],
[coords[0], coords[3]],
]
for coords in box_targets
],
dtype=np_dtype,
)
if recognition_task:
crops = crop_bboxes_from_image(img_path=img_path, geoms=box_targets)
for crop, label in zip(crops, labels):
self.data.append((crop, label))
else:
self.data.append((img_path, dict(boxes=box_targets, labels=labels)))
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import json
import os
from pathlib import Path
from typing import Any, Dict, List, Tuple
import numpy as np
from .datasets import AbstractDataset
__all__ = ["OCRDataset"]
class OCRDataset(AbstractDataset):
"""Implements an OCR dataset
>>> from doctr.datasets import OCRDataset
>>> train_set = OCRDataset(img_folder="/path/to/images",
>>> label_file="/path/to/labels.json")
>>> img, target = train_set[0]
Args:
img_folder: local path to image folder (all jpg at the root)
label_file: local path to the label file
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
**kwargs: keyword arguments from `AbstractDataset`.
"""
def __init__(
self,
img_folder: str,
label_file: str,
use_polygons: bool = False,
**kwargs: Any,
) -> None:
super().__init__(img_folder, **kwargs)
# List images
self.data: List[Tuple[str, Dict[str, Any]]] = []
np_dtype = np.float32
with open(label_file, "rb") as f:
data = json.load(f)
for img_name, annotations in data.items():
# Get image path
img_name = Path(img_name)
# File existence check
if not os.path.exists(os.path.join(self.root, img_name)):
raise FileNotFoundError(f"unable to locate {os.path.join(self.root, img_name)}")
# handle empty images
if len(annotations["typed_words"]) == 0:
self.data.append((img_name, dict(boxes=np.zeros((0, 4), dtype=np_dtype), labels=[])))
continue
# Unpack the straight boxes (xmin, ymin, xmax, ymax)
geoms = [list(map(float, obj["geometry"][:4])) for obj in annotations["typed_words"]]
if use_polygons:
# (x, y) coordinates of top left, top right, bottom right, bottom left corners
geoms = [
[geom[:2], [geom[2], geom[1]], geom[2:], [geom[0], geom[3]]] # type: ignore[list-item]
for geom in geoms
]
text_targets = [obj["value"] for obj in annotations["typed_words"]]
self.data.append((img_name, dict(boxes=np.asarray(geoms, dtype=np_dtype), labels=text_targets)))
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
from typing import Any, Dict, List, Tuple, Union
import defusedxml.ElementTree as ET
import numpy as np
from tqdm import tqdm
from .datasets import VisionDataset
from .utils import convert_target_to_relative, crop_bboxes_from_image
__all__ = ["IC03"]
class IC03(VisionDataset):
"""IC03 dataset from `"ICDAR 2003 Robust Reading Competitions: Entries, Results and Future Directions"
<http://www.iapr-tc11.org/mediawiki/index.php?title=ICDAR_2003_Robust_Reading_Competitions>`_.
.. image:: https://doctr-static.mindee.com/models?id=v0.5.0/ic03-grid.png&src=0
:align: center
>>> from doctr.datasets import IC03
>>> train_set = IC03(train=True, download=True)
>>> img, target = train_set[0]
Args:
train: whether the subset should be the training one
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
recognition_task: whether the dataset should be used for recognition task
**kwargs: keyword arguments from `VisionDataset`.
"""
TRAIN = (
"http://www.iapr-tc11.org/dataset/ICDAR2003_RobustReading/TrialTrain/scene.zip",
"9d86df514eb09dd693fb0b8c671ef54a0cfe02e803b1bbef9fc676061502eb94",
"ic03_train.zip",
)
TEST = (
"http://www.iapr-tc11.org/dataset/ICDAR2003_RobustReading/TrialTest/scene.zip",
"dbc4b5fd5d04616b8464a1b42ea22db351ee22c2546dd15ac35611857ea111f8",
"ic03_test.zip",
)
def __init__(
self,
train: bool = True,
use_polygons: bool = False,
recognition_task: bool = False,
**kwargs: Any,
) -> None:
url, sha256, file_name = self.TRAIN if train else self.TEST
super().__init__(
url,
file_name,
sha256,
True,
pre_transforms=convert_target_to_relative if not recognition_task else None,
**kwargs,
)
self.train = train
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
np_dtype = np.float32
# Load xml data
tmp_root = (
os.path.join(self.root, "SceneTrialTrain" if self.train else "SceneTrialTest") if sha256 else self.root
)
xml_tree = ET.parse(os.path.join(tmp_root, "words.xml"))
xml_root = xml_tree.getroot()
for image in tqdm(iterable=xml_root, desc="Unpacking IC03", total=len(xml_root)):
name, resolution, rectangles = image
# File existence check
if not os.path.exists(os.path.join(tmp_root, name.text)):
raise FileNotFoundError(f"unable to locate {os.path.join(tmp_root, name.text)}")
if use_polygons:
# (x, y) coordinates of top left, top right, bottom right, bottom left corners
_boxes = [
[
[float(rect.attrib["x"]), float(rect.attrib["y"])],
[float(rect.attrib["x"]) + float(rect.attrib["width"]), float(rect.attrib["y"])],
[
float(rect.attrib["x"]) + float(rect.attrib["width"]),
float(rect.attrib["y"]) + float(rect.attrib["height"]),
],
[float(rect.attrib["x"]), float(rect.attrib["y"]) + float(rect.attrib["height"])],
]
for rect in rectangles
]
else:
# x_min, y_min, x_max, y_max
_boxes = [
[
float(rect.attrib["x"]), # type: ignore[list-item]
float(rect.attrib["y"]), # type: ignore[list-item]
float(rect.attrib["x"]) + float(rect.attrib["width"]), # type: ignore[list-item]
float(rect.attrib["y"]) + float(rect.attrib["height"]), # type: ignore[list-item]
]
for rect in rectangles
]
# filter images without boxes
if len(_boxes) > 0:
boxes: np.ndarray = np.asarray(_boxes, dtype=np_dtype)
# Get the labels
labels = [lab.text for rect in rectangles for lab in rect if lab.text]
if recognition_task:
crops = crop_bboxes_from_image(img_path=os.path.join(tmp_root, name.text), geoms=boxes)
for crop, label in zip(crops, labels):
if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
self.data.append((crop, label))
else:
self.data.append((name.text, dict(boxes=boxes, labels=labels)))
self.root = tmp_root
def extra_repr(self) -> str:
return f"train={self.train}"
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import string
import unicodedata
from collections.abc import Sequence
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional
from typing import Sequence as SequenceType
from typing import Tuple, TypeVar, Union
import numpy as np
from PIL import Image
from doctr.io.image import get_img_shape
from doctr.utils.geometry import convert_to_relative_coords, extract_crops, extract_rcrops
from .vocabs import VOCABS
__all__ = ["translate", "encode_string", "decode_sequence", "encode_sequences", "pre_transform_multiclass"]
ImageTensor = TypeVar("ImageTensor")
def translate(
input_string: str,
vocab_name: str,
unknown_char: str = "■",
) -> str:
"""Translate a string input in a given vocabulary
Args:
input_string: input string to translate
vocab_name: vocabulary to use (french, latin, ...)
unknown_char: unknown character for non-translatable characters
Returns:
A string translated in a given vocab"""
if VOCABS.get(vocab_name) is None:
raise KeyError("output vocabulary must be in vocabs dictionnary")
translated = ""
for char in input_string:
if char not in VOCABS[vocab_name]:
# we need to translate char into a vocab char
if char in string.whitespace:
# remove whitespaces
continue
# normalize character if it is not in vocab
char = unicodedata.normalize("NFD", char).encode("ascii", "ignore").decode("ascii")
if char == "" or char not in VOCABS[vocab_name]:
# if normalization fails or char still not in vocab, return unknown character)
char = unknown_char
translated += char
return translated
def encode_string(
input_string: str,
vocab: str,
) -> List[int]:
"""Given a predefined mapping, encode the string to a sequence of numbers
Args:
input_string: string to encode
vocab: vocabulary (string), the encoding is given by the indexing of the character sequence
Returns:
A list encoding the input_string"""
try:
return list(map(vocab.index, input_string))
except ValueError:
raise ValueError("some characters cannot be found in 'vocab'")
def decode_sequence(
input_seq: Union[np.ndarray, SequenceType[int]],
mapping: str,
) -> str:
"""Given a predefined mapping, decode the sequence of numbers to a string
Args:
input_seq: array to decode
mapping: vocabulary (string), the encoding is given by the indexing of the character sequence
Returns:
A string, decoded from input_seq
"""
if not isinstance(input_seq, (Sequence, np.ndarray)):
raise TypeError("Invalid sequence type")
if isinstance(input_seq, np.ndarray) and (input_seq.dtype != np.int_ or input_seq.max() >= len(mapping)):
raise AssertionError("Input must be an array of int, with max less than mapping size")
return "".join(map(mapping.__getitem__, input_seq))
def encode_sequences(
sequences: List[str],
vocab: str,
target_size: Optional[int] = None,
eos: int = -1,
sos: Optional[int] = None,
pad: Optional[int] = None,
dynamic_seq_length: bool = False,
**kwargs: Any,
) -> np.ndarray:
"""Encode character sequences using a given vocab as mapping
Args:
sequences: the list of character sequences of size N
vocab: the ordered vocab to use for encoding
target_size: maximum length of the encoded data
eos: encoding of End Of String
sos: optional encoding of Start Of String
pad: optional encoding for padding. In case of padding, all sequences are followed by 1 EOS then PAD
dynamic_seq_length: if `target_size` is specified, uses it as upper bound and enables dynamic sequence size
Returns:
the padded encoded data as a tensor
"""
if 0 <= eos < len(vocab):
raise ValueError("argument 'eos' needs to be outside of vocab possible indices")
if not isinstance(target_size, int) or dynamic_seq_length:
# Maximum string length + EOS
max_length = max(len(w) for w in sequences) + 1
if isinstance(sos, int):
max_length += 1
if isinstance(pad, int):
max_length += 1
target_size = max_length if not isinstance(target_size, int) else min(max_length, target_size)
# Pad all sequences
if isinstance(pad, int): # pad with padding symbol
if 0 <= pad < len(vocab):
raise ValueError("argument 'pad' needs to be outside of vocab possible indices")
# In that case, add EOS at the end of the word before padding
default_symbol = pad
else: # pad with eos symbol
default_symbol = eos
encoded_data: np.ndarray = np.full([len(sequences), target_size], default_symbol, dtype=np.int32)
# Encode the strings
for idx, seq in enumerate(map(partial(encode_string, vocab=vocab), sequences)):
if isinstance(pad, int): # add eos at the end of the sequence
seq.append(eos)
encoded_data[idx, : min(len(seq), target_size)] = seq[: min(len(seq), target_size)]
if isinstance(sos, int): # place sos symbol at the beginning of each sequence
if 0 <= sos < len(vocab):
raise ValueError("argument 'sos' needs to be outside of vocab possible indices")
encoded_data = np.roll(encoded_data, 1)
encoded_data[:, 0] = sos
return encoded_data
def convert_target_to_relative(img: ImageTensor, target: Dict[str, Any]) -> Tuple[ImageTensor, Dict[str, Any]]:
target["boxes"] = convert_to_relative_coords(target["boxes"], get_img_shape(img))
return img, target
def crop_bboxes_from_image(img_path: Union[str, Path], geoms: np.ndarray) -> List[np.ndarray]:
"""Crop a set of bounding boxes from an image
Args:
img_path: path to the image
geoms: a array of polygons of shape (N, 4, 2) or of straight boxes of shape (N, 4)
Returns:
a list of cropped images
"""
img: np.ndarray = np.array(Image.open(img_path).convert("RGB"))
# Polygon
if geoms.ndim == 3 and geoms.shape[1:] == (4, 2):
return extract_rcrops(img, geoms.astype(dtype=int))
if geoms.ndim == 2 and geoms.shape[1] == 4:
return extract_crops(img, geoms.astype(dtype=int))
raise ValueError("Invalid geometry format")
def pre_transform_multiclass(img, target: Tuple[np.ndarray, List]) -> Tuple[np.ndarray, Dict[str, List]]:
"""Converts multiclass target to relative coordinates.
Args:
img: Image
target: tuple of target polygons and their classes names
"""
boxes = convert_to_relative_coords(target[0], get_img_shape(img))
boxes_classes = target[1]
boxes_dict: Dict = {k: [] for k in sorted(set(boxes_classes))}
for k, poly in zip(boxes_classes, boxes):
boxes_dict[k].append(poly)
boxes_dict = {k: np.stack(v, axis=0) for k, v in boxes_dict.items()}
return img, boxes_dict
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import math
from typing import Callable, Optional
import numpy as np
import tensorflow as tf
from doctr.utils.multithreading import multithread_exec
__all__ = ["DataLoader"]
def default_collate(samples):
"""Collate multiple elements into batches
Args:
samples: list of N tuples containing M elements
Returns:
Tuple of M sequences contianing N elements each
"""
batch_data = zip(*samples)
tf_data = tuple(tf.stack(elt, axis=0) for elt in batch_data)
return tf_data
class DataLoader:
"""Implements a dataset wrapper for fast data loading
>>> from doctr.datasets import CORD, DataLoader
>>> train_set = CORD(train=True, download=True)
>>> train_loader = DataLoader(train_set, batch_size=32)
>>> train_iter = iter(train_loader)
>>> images, targets = next(train_iter)
Args:
dataset: the dataset
shuffle: whether the samples should be shuffled before passing it to the iterator
batch_size: number of elements in each batch
drop_last: if `True`, drops the last batch if it isn't full
num_workers: number of workers to use for data loading
collate_fn: function to merge samples into a batch
"""
def __init__(
self,
dataset,
shuffle: bool = True,
batch_size: int = 1,
drop_last: bool = False,
num_workers: Optional[int] = None,
collate_fn: Optional[Callable] = None,
) -> None:
self.dataset = dataset
self.shuffle = shuffle
self.batch_size = batch_size
nb = len(self.dataset) / batch_size
self.num_batches = math.floor(nb) if drop_last else math.ceil(nb)
if collate_fn is None:
self.collate_fn = self.dataset.collate_fn if hasattr(self.dataset, "collate_fn") else default_collate
else:
self.collate_fn = collate_fn
self.num_workers = num_workers
self.reset()
def __len__(self) -> int:
return self.num_batches
def reset(self) -> None:
# Updates indices after each epoch
self._num_yielded = 0
self.indices = np.arange(len(self.dataset))
if self.shuffle is True:
np.random.shuffle(self.indices)
def __iter__(self):
self.reset()
return self
def __next__(self):
if self._num_yielded < self.num_batches:
# Get next indices
idx = self._num_yielded * self.batch_size
indices = self.indices[idx : min(len(self.dataset), idx + self.batch_size)]
samples = list(multithread_exec(self.dataset.__getitem__, indices, threads=self.num_workers))
batch_data = self.collate_fn(samples)
self._num_yielded += 1
return batch_data
else:
raise StopIteration
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import string
from typing import Dict
__all__ = ["VOCABS"]
VOCABS: Dict[str, str] = {
"digits": string.digits,
"ascii_letters": string.ascii_letters,
"punctuation": string.punctuation,
"currency": "£€¥¢฿",
"ancient_greek": "αβγδεζηθικλμνξοπρστυφχψωΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩ",
"arabic_letters": "ءآأؤإئابةتثجحخدذرزسشصضطظعغـفقكلمنهوىي",
"persian_letters": "پچڢڤگ",
"hindi_digits": "٠١٢٣٤٥٦٧٨٩",
"arabic_diacritics": "ًٌٍَُِّْ",
"arabic_punctuation": "؟؛«»—",
}
VOCABS["latin"] = VOCABS["digits"] + VOCABS["ascii_letters"] + VOCABS["punctuation"]
VOCABS["english"] = VOCABS["latin"] + "°" + VOCABS["currency"]
VOCABS["legacy_french"] = VOCABS["latin"] + "°" + "àâéèêëîïôùûçÀÂÉÈËÎÏÔÙÛÇ" + VOCABS["currency"]
VOCABS["french"] = VOCABS["english"] + "àâéèêëîïôùûüçÀÂÉÈÊËÎÏÔÙÛÜÇ"
VOCABS["portuguese"] = VOCABS["english"] + "áàâãéêíïóôõúüçÁÀÂÃÉÊÍÏÓÔÕÚÜÇ"
VOCABS["spanish"] = VOCABS["english"] + "áéíóúüñÁÉÍÓÚÜÑ" + "¡¿"
VOCABS["german"] = VOCABS["english"] + "äöüßÄÖÜẞ"
VOCABS["arabic"] = (
VOCABS["digits"]
+ VOCABS["hindi_digits"]
+ VOCABS["arabic_letters"]
+ VOCABS["persian_letters"]
+ VOCABS["arabic_diacritics"]
+ VOCABS["arabic_punctuation"]
+ VOCABS["punctuation"]
)
VOCABS["czech"] = VOCABS["english"] + "áčďéěíňóřšťúůýžÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ"
VOCABS["vietnamese"] = (
VOCABS["english"]
+ "áàảạãăắằẳẵặâấầẩẫậéèẻẽẹêếềểễệóòỏõọôốồổộỗơớờởợỡúùủũụưứừửữựiíìỉĩịýỳỷỹỵ"
+ "ÁÀẢẠÃĂẮẰẲẴẶÂẤẦẨẪẬÉÈẺẼẸÊẾỀỂỄỆÓÒỎÕỌÔỐỒỔỘỖƠỚỜỞỢỠÚÙỦŨỤƯỨỪỬỮỰIÍÌỈĨỊÝỲỶỸỴ"
)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import json
import os
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union
import numpy as np
from tqdm import tqdm
from .datasets import VisionDataset
from .utils import convert_target_to_relative, crop_bboxes_from_image
__all__ = ["FUNSD"]
class FUNSD(VisionDataset):
"""FUNSD dataset from `"FUNSD: A Dataset for Form Understanding in Noisy Scanned Documents"
<https://arxiv.org/pdf/1905.13538.pdf>`_.
.. image:: https://doctr-static.mindee.com/models?id=v0.5.0/funsd-grid.png&src=0
:align: center
>>> from doctr.datasets import FUNSD
>>> train_set = FUNSD(train=True, download=True)
>>> img, target = train_set[0]
Args:
train: whether the subset should be the training one
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
recognition_task: whether the dataset should be used for recognition task
**kwargs: keyword arguments from `VisionDataset`.
"""
URL = "https://guillaumejaume.github.io/FUNSD/dataset.zip"
SHA256 = "c31735649e4f441bcbb4fd0f379574f7520b42286e80b01d80b445649d54761f"
FILE_NAME = "funsd.zip"
def __init__(
self,
train: bool = True,
use_polygons: bool = False,
recognition_task: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
self.URL,
self.FILE_NAME,
self.SHA256,
True,
pre_transforms=convert_target_to_relative if not recognition_task else None,
**kwargs,
)
self.train = train
np_dtype = np.float32
# Use the subset
subfolder = os.path.join("dataset", "training_data" if train else "testing_data")
# # List images
tmp_root = os.path.join(self.root, subfolder, "images")
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
for img_path in tqdm(iterable=os.listdir(tmp_root), desc="Unpacking FUNSD", total=len(os.listdir(tmp_root))):
# File existence check
if not os.path.exists(os.path.join(tmp_root, img_path)):
raise FileNotFoundError(f"unable to locate {os.path.join(tmp_root, img_path)}")
stem = Path(img_path).stem
with open(os.path.join(self.root, subfolder, "annotations", f"{stem}.json"), "rb") as f:
data = json.load(f)
_targets = [
(word["text"], word["box"])
for block in data["form"]
for word in block["words"]
if len(word["text"]) > 0
]
text_targets, box_targets = zip(*_targets)
if use_polygons:
# xmin, ymin, xmax, ymax -> (x, y) coordinates of top left, top right, bottom right, bottom left corners
box_targets = [
[
[box[0], box[1]],
[box[2], box[1]],
[box[2], box[3]],
[box[0], box[3]],
]
for box in box_targets
]
if recognition_task:
crops = crop_bboxes_from_image(
img_path=os.path.join(tmp_root, img_path), geoms=np.asarray(box_targets, dtype=np_dtype)
)
for crop, label in zip(crops, list(text_targets)):
# filter labels with unknown characters
if not any(char in label for char in ["☑", "☐", "\uf703", "\uf702"]):
self.data.append((crop, label))
else:
self.data.append(
(
img_path,
dict(boxes=np.asarray(box_targets, dtype=np_dtype), labels=list(text_targets)),
)
)
self.root = tmp_root
def extra_repr(self) -> str:
return f"train={self.train}"
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import csv
import os
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union
import numpy as np
from tqdm import tqdm
from .datasets import VisionDataset
from .utils import convert_target_to_relative, crop_bboxes_from_image
__all__ = ["SROIE"]
class SROIE(VisionDataset):
"""SROIE dataset from `"ICDAR2019 Competition on Scanned Receipt OCR and Information Extraction"
<https://arxiv.org/pdf/2103.10213.pdf>`_.
.. image:: https://doctr-static.mindee.com/models?id=v0.5.0/sroie-grid.png&src=0
:align: center
>>> from doctr.datasets import SROIE
>>> train_set = SROIE(train=True, download=True)
>>> img, target = train_set[0]
Args:
train: whether the subset should be the training one
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
recognition_task: whether the dataset should be used for recognition task
**kwargs: keyword arguments from `VisionDataset`.
"""
TRAIN = (
"https://doctr-static.mindee.com/models?id=v0.1.1/sroie2019_train_task1.zip&src=0",
"d4fa9e60abb03500d83299c845b9c87fd9c9430d1aeac96b83c5d0bb0ab27f6f",
)
TEST = (
"https://doctr-static.mindee.com/models?id=v0.1.1/sroie2019_test.zip&src=0",
"41b3c746a20226fddc80d86d4b2a903d43b5be4f521dd1bbe759dbf8844745e2",
)
def __init__(
self,
train: bool = True,
use_polygons: bool = False,
recognition_task: bool = False,
**kwargs: Any,
) -> None:
url, sha256 = self.TRAIN if train else self.TEST
super().__init__(
url,
None,
sha256,
True,
pre_transforms=convert_target_to_relative if not recognition_task else None,
**kwargs,
)
self.train = train
tmp_root = os.path.join(self.root, "images")
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
np_dtype = np.float32
for img_path in tqdm(iterable=os.listdir(tmp_root), desc="Unpacking SROIE", total=len(os.listdir(tmp_root))):
# File existence check
if not os.path.exists(os.path.join(tmp_root, img_path)):
raise FileNotFoundError(f"unable to locate {os.path.join(tmp_root, img_path)}")
stem = Path(img_path).stem
with open(os.path.join(self.root, "annotations", f"{stem}.txt"), encoding="latin") as f:
_rows = [row for row in list(csv.reader(f, delimiter=",")) if len(row) > 0]
labels = [",".join(row[8:]) for row in _rows]
# reorder coordinates (8 -> (4,2) ->
# (x, y) coordinates of top left, top right, bottom right, bottom left corners) and filter empty lines
coords: np.ndarray = np.stack(
[np.array(list(map(int, row[:8])), dtype=np_dtype).reshape((4, 2)) for row in _rows], axis=0
)
if not use_polygons:
# xmin, ymin, xmax, ymax
coords = np.concatenate((coords.min(axis=1), coords.max(axis=1)), axis=1)
if recognition_task:
crops = crop_bboxes_from_image(img_path=os.path.join(tmp_root, img_path), geoms=coords)
for crop, label in zip(crops, labels):
if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
self.data.append((crop, label))
else:
self.data.append((img_path, dict(boxes=coords, labels=labels)))
self.root = tmp_root
def extra_repr(self) -> str:
return f"train={self.train}"
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
from typing import Any, Dict, List, Tuple, Union
import defusedxml.ElementTree as ET
import numpy as np
from tqdm import tqdm
from .datasets import VisionDataset
from .utils import convert_target_to_relative, crop_bboxes_from_image
__all__ = ["SVT"]
class SVT(VisionDataset):
"""SVT dataset from `"The Street View Text Dataset - UCSD Computer Vision"
<http://vision.ucsd.edu/~kai/svt/>`_.
.. image:: https://doctr-static.mindee.com/models?id=v0.5.0/svt-grid.png&src=0
:align: center
>>> from doctr.datasets import SVT
>>> train_set = SVT(train=True, download=True)
>>> img, target = train_set[0]
Args:
train: whether the subset should be the training one
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
recognition_task: whether the dataset should be used for recognition task
**kwargs: keyword arguments from `VisionDataset`.
"""
URL = "http://vision.ucsd.edu/~kai/svt/svt.zip"
SHA256 = "63b3d55e6b6d1e036e2a844a20c034fe3af3c32e4d914d6e0c4a3cd43df3bebf"
def __init__(
self,
train: bool = True,
use_polygons: bool = False,
recognition_task: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
self.URL,
None,
self.SHA256,
True,
pre_transforms=convert_target_to_relative if not recognition_task else None,
**kwargs,
)
self.train = train
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
np_dtype = np.float32
# Load xml data
tmp_root = os.path.join(self.root, "svt1") if self.SHA256 else self.root
xml_tree = (
ET.parse(os.path.join(tmp_root, "train.xml"))
if self.train
else ET.parse(os.path.join(tmp_root, "test.xml"))
)
xml_root = xml_tree.getroot()
for image in tqdm(iterable=xml_root, desc="Unpacking SVT", total=len(xml_root)):
name, _, _, resolution, rectangles = image
# File existence check
if not os.path.exists(os.path.join(tmp_root, name.text)):
raise FileNotFoundError(f"unable to locate {os.path.join(tmp_root, name.text)}")
if use_polygons:
# (x, y) coordinates of top left, top right, bottom right, bottom left corners
_boxes = [
[
[float(rect.attrib["x"]), float(rect.attrib["y"])],
[float(rect.attrib["x"]) + float(rect.attrib["width"]), float(rect.attrib["y"])],
[
float(rect.attrib["x"]) + float(rect.attrib["width"]),
float(rect.attrib["y"]) + float(rect.attrib["height"]),
],
[float(rect.attrib["x"]), float(rect.attrib["y"]) + float(rect.attrib["height"])],
]
for rect in rectangles
]
else:
# x_min, y_min, x_max, y_max
_boxes = [
[
float(rect.attrib["x"]), # type: ignore[list-item]
float(rect.attrib["y"]), # type: ignore[list-item]
float(rect.attrib["x"]) + float(rect.attrib["width"]), # type: ignore[list-item]
float(rect.attrib["y"]) + float(rect.attrib["height"]), # type: ignore[list-item]
]
for rect in rectangles
]
boxes: np.ndarray = np.asarray(_boxes, dtype=np_dtype)
# Get the labels
labels = [lab.text for rect in rectangles for lab in rect]
if recognition_task:
crops = crop_bboxes_from_image(img_path=os.path.join(tmp_root, name.text), geoms=boxes)
for crop, label in zip(crops, labels):
if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
self.data.append((crop, label))
else:
self.data.append((name.text, dict(boxes=boxes, labels=labels)))
self.root = tmp_root
def extra_repr(self) -> str:
return f"train={self.train}"
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
from copy import deepcopy
from typing import Any, List, Tuple
import numpy as np
import tensorflow as tf
from doctr.io import read_img_as_tensor, tensor_from_numpy
from .base import _AbstractDataset, _VisionDataset
__all__ = ["AbstractDataset", "VisionDataset"]
class AbstractDataset(_AbstractDataset):
def _read_sample(self, index: int) -> Tuple[tf.Tensor, Any]:
img_name, target = self.data[index]
# Check target
if isinstance(target, dict):
assert "boxes" in target, "Target should contain 'boxes' key"
assert "labels" in target, "Target should contain 'labels' key"
elif isinstance(target, tuple):
assert len(target) == 2
assert isinstance(target[0], str) or isinstance(
target[0], np.ndarray
), "first element of the tuple should be a string or a numpy array"
assert isinstance(target[1], list), "second element of the tuple should be a list"
else:
assert isinstance(target, str) or isinstance(
target, np.ndarray
), "Target should be a string or a numpy array"
# Read image
img = (
tensor_from_numpy(img_name, dtype=tf.float32)
if isinstance(img_name, np.ndarray)
else read_img_as_tensor(os.path.join(self.root, img_name), dtype=tf.float32)
)
return img, deepcopy(target)
@staticmethod
def collate_fn(samples: List[Tuple[tf.Tensor, Any]]) -> Tuple[tf.Tensor, List[Any]]:
images, targets = zip(*samples)
images = tf.stack(images, axis=0)
return images, list(targets)
class VisionDataset(AbstractDataset, _VisionDataset):
pass
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
from copy import deepcopy
from typing import Any, List, Tuple
import numpy as np
import torch
from doctr.io import read_img_as_tensor, tensor_from_numpy
from .base import _AbstractDataset, _VisionDataset
__all__ = ["AbstractDataset", "VisionDataset"]
class AbstractDataset(_AbstractDataset):
def _read_sample(self, index: int) -> Tuple[torch.Tensor, Any]:
img_name, target = self.data[index]
# Check target
if isinstance(target, dict):
assert "boxes" in target, "Target should contain 'boxes' key"
assert "labels" in target, "Target should contain 'labels' key"
elif isinstance(target, tuple):
assert len(target) == 2
assert isinstance(target[0], str) or isinstance(
target[0], np.ndarray
), "first element of the tuple should be a string or a numpy array"
assert isinstance(target[1], list), "second element of the tuple should be a list"
else:
assert isinstance(target, str) or isinstance(
target, np.ndarray
), "Target should be a string or a numpy array"
# Read image
img = (
tensor_from_numpy(img_name, dtype=torch.float32)
if isinstance(img_name, np.ndarray)
else read_img_as_tensor(os.path.join(self.root, img_name), dtype=torch.float32)
)
return img, deepcopy(target)
@staticmethod
def collate_fn(samples: List[Tuple[torch.Tensor, Any]]) -> Tuple[torch.Tensor, List[Any]]:
images, targets = zip(*samples)
images = torch.stack(images, dim=0)
return images, list(targets)
class VisionDataset(AbstractDataset, _VisionDataset):
pass
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
import shutil
from pathlib import Path
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
from doctr.file_utils import copy_tensor
from doctr.io.image import get_img_shape
from doctr.utils.data import download_from_url
__all__ = ["_AbstractDataset", "_VisionDataset"]
class _AbstractDataset:
data: List[Any] = []
_pre_transforms: Optional[Callable[[Any, Any], Tuple[Any, Any]]] = None
def __init__(
self,
root: Union[str, Path],
img_transforms: Optional[Callable[[Any], Any]] = None,
sample_transforms: Optional[Callable[[Any, Any], Tuple[Any, Any]]] = None,
pre_transforms: Optional[Callable[[Any, Any], Tuple[Any, Any]]] = None,
) -> None:
if not Path(root).is_dir():
raise ValueError(f"expected a path to a reachable folder: {root}")
self.root = root
self.img_transforms = img_transforms
self.sample_transforms = sample_transforms
self._pre_transforms = pre_transforms
self._get_img_shape = get_img_shape
def __len__(self) -> int:
return len(self.data)
def _read_sample(self, index: int) -> Tuple[Any, Any]:
raise NotImplementedError
def __getitem__(self, index: int) -> Tuple[Any, Any]:
# Read image
img, target = self._read_sample(index)
# Pre-transforms (format conversion at run-time etc.)
if self._pre_transforms is not None:
img, target = self._pre_transforms(img, target)
if self.img_transforms is not None:
# typing issue cf. https://github.com/python/mypy/issues/5485
img = self.img_transforms(img)
if self.sample_transforms is not None:
if isinstance(target, dict) and all([isinstance(item, np.ndarray) for item in target.values()]):
img_transformed = copy_tensor(img)
for class_name, bboxes in target.items():
img_transformed, target[class_name] = self.sample_transforms(img, bboxes)
img = img_transformed
else:
img, target = self.sample_transforms(img, target)
return img, target
def extra_repr(self) -> str:
return ""
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.extra_repr()})"
class _VisionDataset(_AbstractDataset):
"""Implements an abstract dataset
Args:
url: URL of the dataset
file_name: name of the file once downloaded
file_hash: expected SHA256 of the file
extract_archive: whether the downloaded file is an archive to be extracted
download: whether the dataset should be downloaded if not present on disk
overwrite: whether the archive should be re-extracted
cache_dir: cache directory
cache_subdir: subfolder to use in the cache
"""
def __init__(
self,
url: str,
file_name: Optional[str] = None,
file_hash: Optional[str] = None,
extract_archive: bool = False,
download: bool = False,
overwrite: bool = False,
cache_dir: Optional[str] = None,
cache_subdir: Optional[str] = None,
**kwargs: Any,
) -> None:
cache_dir = (
str(os.environ.get("DOCTR_CACHE_DIR", os.path.join(os.path.expanduser("~"), ".cache", "doctr")))
if cache_dir is None
else cache_dir
)
cache_subdir = "datasets" if cache_subdir is None else cache_subdir
file_name = file_name if isinstance(file_name, str) else os.path.basename(url)
# Download the file if not present
archive_path: Union[str, Path] = os.path.join(cache_dir, cache_subdir, file_name)
if not os.path.exists(archive_path) and not download:
raise ValueError("the dataset needs to be downloaded first with download=True")
archive_path = download_from_url(url, file_name, file_hash, cache_dir=cache_dir, cache_subdir=cache_subdir)
# Extract the archive
if extract_archive:
archive_path = Path(archive_path)
dataset_path = archive_path.parent.joinpath(archive_path.stem)
if not dataset_path.is_dir() or overwrite:
shutil.unpack_archive(archive_path, dataset_path)
super().__init__(dataset_path if extract_archive else archive_path, **kwargs)
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import tensorflow as tf
from .base import _CharacterGenerator, _WordGenerator
__all__ = ["CharacterGenerator", "WordGenerator"]
class CharacterGenerator(_CharacterGenerator):
"""Implements a character image generation dataset
>>> from doctr.datasets import CharacterGenerator
>>> ds = CharacterGenerator(vocab='abdef', num_samples=100)
>>> img, target = ds[0]
Args:
vocab: vocabulary to take the character from
num_samples: number of samples that will be generated iterating over the dataset
cache_samples: whether generated images should be cached firsthand
font_family: font to use to generate the text images
img_transforms: composable transformations that will be applied to each image
sample_transforms: composable transformations that will be applied to both the image and the target
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def collate_fn(samples):
images, targets = zip(*samples)
images = tf.stack(images, axis=0)
return images, tf.convert_to_tensor(targets)
class WordGenerator(_WordGenerator):
"""Implements a character image generation dataset
>>> from doctr.datasets import WordGenerator
>>> ds = WordGenerator(vocab='abdef', min_chars=1, max_chars=32, num_samples=100)
>>> img, target = ds[0]
Args:
vocab: vocabulary to take the character from
min_chars: minimum number of characters in a word
max_chars: maximum number of characters in a word
num_samples: number of samples that will be generated iterating over the dataset
cache_samples: whether generated images should be cached firsthand
font_family: font to use to generate the text images
img_transforms: composable transformations that will be applied to each image
sample_transforms: composable transformations that will be applied to both the image and the target
"""
pass
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from torch.utils.data._utils.collate import default_collate
from .base import _CharacterGenerator, _WordGenerator
__all__ = ["CharacterGenerator", "WordGenerator"]
class CharacterGenerator(_CharacterGenerator):
"""Implements a character image generation dataset
>>> from doctr.datasets import CharacterGenerator
>>> ds = CharacterGenerator(vocab='abdef', num_samples=100)
>>> img, target = ds[0]
Args:
vocab: vocabulary to take the character from
num_samples: number of samples that will be generated iterating over the dataset
cache_samples: whether generated images should be cached firsthand
font_family: font to use to generate the text images
img_transforms: composable transformations that will be applied to each image
sample_transforms: composable transformations that will be applied to both the image and the target
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
setattr(self, "collate_fn", default_collate)
class WordGenerator(_WordGenerator):
"""Implements a character image generation dataset
>>> from doctr.datasets import WordGenerator
>>> ds = WordGenerator(vocab='abdef', min_chars=1, max_chars=32, num_samples=100)
>>> img, target = ds[0]
Args:
vocab: vocabulary to take the character from
min_chars: minimum number of characters in a word
max_chars: maximum number of characters in a word
num_samples: number of samples that will be generated iterating over the dataset
cache_samples: whether generated images should be cached firsthand
font_family: font to use to generate the text images
img_transforms: composable transformations that will be applied to each image
sample_transforms: composable transformations that will be applied to both the image and the target
"""
pass
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import random
from typing import Any, Callable, List, Optional, Tuple, Union
from PIL import Image, ImageDraw
from doctr.io.image import tensor_from_pil
from doctr.utils.fonts import get_font
from ..datasets import AbstractDataset
def synthesize_text_img(
text: str,
font_size: int = 32,
font_family: Optional[str] = None,
background_color: Optional[Tuple[int, int, int]] = None,
text_color: Optional[Tuple[int, int, int]] = None,
) -> Image:
"""Generate a synthetic text image
Args:
text: the text to render as an image
font_size: the size of the font
font_family: the font family (has to be installed on your system)
background_color: background color of the final image
text_color: text color on the final image
Returns:
PIL image of the text
"""
background_color = (0, 0, 0) if background_color is None else background_color
text_color = (255, 255, 255) if text_color is None else text_color
font = get_font(font_family, font_size)
text_w, text_h = font.getsize(text)
h, w = int(round(1.3 * text_h)), int(round(1.1 * text_w))
# If single letter, make the image square, otherwise expand to meet the text size
img_size = (h, w) if len(text) > 1 else (max(h, w), max(h, w))
img = Image.new("RGB", img_size[::-1], color=background_color)
d = ImageDraw.Draw(img)
# Offset so that the text is centered
text_pos = (int(round((img_size[1] - text_w) / 2)), int(round((img_size[0] - text_h) / 2)))
# Draw the text
d.text(text_pos, text, font=font, fill=text_color)
return img
class _CharacterGenerator(AbstractDataset):
def __init__(
self,
vocab: str,
num_samples: int,
cache_samples: bool = False,
font_family: Optional[Union[str, List[str]]] = None,
img_transforms: Optional[Callable[[Any], Any]] = None,
sample_transforms: Optional[Callable[[Any, Any], Tuple[Any, Any]]] = None,
) -> None:
self.vocab = vocab
self._num_samples = num_samples
self.font_family = font_family if isinstance(font_family, list) else [font_family] # type: ignore[list-item]
# Validate fonts
if isinstance(font_family, list):
for font in self.font_family:
try:
_ = get_font(font, 10)
except OSError:
raise ValueError(f"unable to locate font: {font}")
self.img_transforms = img_transforms
self.sample_transforms = sample_transforms
self._data: List[Image.Image] = []
if cache_samples:
self._data = [
(synthesize_text_img(char, font_family=font), idx)
for idx, char in enumerate(self.vocab)
for font in self.font_family
]
def __len__(self) -> int:
return self._num_samples
def _read_sample(self, index: int) -> Tuple[Any, int]:
# Samples are already cached
if len(self._data) > 0:
idx = index % len(self._data)
pil_img, target = self._data[idx]
else:
target = index % len(self.vocab)
pil_img = synthesize_text_img(self.vocab[target], font_family=random.choice(self.font_family))
img = tensor_from_pil(pil_img)
return img, target
class _WordGenerator(AbstractDataset):
def __init__(
self,
vocab: str,
min_chars: int,
max_chars: int,
num_samples: int,
cache_samples: bool = False,
font_family: Optional[Union[str, List[str]]] = None,
img_transforms: Optional[Callable[[Any], Any]] = None,
sample_transforms: Optional[Callable[[Any, Any], Tuple[Any, Any]]] = None,
) -> None:
self.vocab = vocab
self.wordlen_range = (min_chars, max_chars)
self._num_samples = num_samples
self.font_family = font_family if isinstance(font_family, list) else [font_family] # type: ignore[list-item]
# Validate fonts
if isinstance(font_family, list):
for font in self.font_family:
try:
_ = get_font(font, 10)
except OSError:
raise ValueError(f"unable to locate font: {font}")
self.img_transforms = img_transforms
self.sample_transforms = sample_transforms
self._data: List[Image.Image] = []
if cache_samples:
_words = [self._generate_string(*self.wordlen_range) for _ in range(num_samples)]
self._data = [
(synthesize_text_img(text, font_family=random.choice(self.font_family)), text) for text in _words
]
def _generate_string(self, min_chars: int, max_chars: int) -> str:
num_chars = random.randint(min_chars, max_chars)
return "".join(random.choice(self.vocab) for _ in range(num_chars))
def __len__(self) -> int:
return self._num_samples
def _read_sample(self, index: int) -> Tuple[Any, str]:
# Samples are already cached
if len(self._data) > 0:
pil_img, target = self._data[index]
else:
target = self._generate_string(*self.wordlen_range)
pil_img = synthesize_text_img(target, font_family=random.choice(self.font_family))
img = tensor_from_pil(pil_img)
return img, target
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, Dict, List, Optional, Tuple, Union
from defusedxml import defuse_stdlib
defuse_stdlib()
from xml.etree import ElementTree as ET
from xml.etree.ElementTree import Element as ETElement
from xml.etree.ElementTree import SubElement
import matplotlib.pyplot as plt
import numpy as np
import doctr
from doctr.utils.common_types import BoundingBox
from doctr.utils.geometry import resolve_enclosing_bbox, resolve_enclosing_rbbox
from doctr.utils.repr import NestedObject
from doctr.utils.visualization import synthesize_kie_page, synthesize_page, visualize_kie_page, visualize_page
__all__ = ["Element", "Word", "Artefact", "Line", "Prediction", "Block", "Page", "KIEPage", "Document"]
class Element(NestedObject):
"""Implements an abstract document element with exporting and text rendering capabilities"""
_children_names: List[str] = []
_exported_keys: List[str] = []
def __init__(self, **kwargs: Any) -> None:
for k, v in kwargs.items():
if k in self._children_names:
setattr(self, k, v)
else:
raise KeyError(f"{self.__class__.__name__} object does not have any attribute named '{k}'")
def export(self) -> Dict[str, Any]:
"""Exports the object into a nested dict format"""
export_dict = {k: getattr(self, k) for k in self._exported_keys}
for children_name in self._children_names:
if children_name in ["predictions"]:
export_dict[children_name] = {
k: [item.export() for item in c] for k, c in getattr(self, children_name).items()
}
else:
export_dict[children_name] = [c.export() for c in getattr(self, children_name)]
return export_dict
@classmethod
def from_dict(cls, save_dict: Dict[str, Any], **kwargs):
raise NotImplementedError
def render(self) -> str:
raise NotImplementedError
class Word(Element):
"""Implements a word element
Args:
value: the text string of the word
confidence: the confidence associated with the text prediction
geometry: bounding box of the word in format ((xmin, ymin), (xmax, ymax)) where coordinates are relative to
the page's size
"""
_exported_keys: List[str] = ["value", "confidence", "geometry"]
_children_names: List[str] = []
def __init__(self, value: str, confidence: float, geometry: Union[BoundingBox, np.ndarray]) -> None:
super().__init__()
self.value = value
self.confidence = confidence
self.geometry = geometry
def render(self) -> str:
"""Renders the full text of the element"""
return self.value
def extra_repr(self) -> str:
return f"value='{self.value}', confidence={self.confidence:.2}"
@classmethod
def from_dict(cls, save_dict: Dict[str, Any], **kwargs):
kwargs = {k: save_dict[k] for k in cls._exported_keys}
return cls(**kwargs)
class Artefact(Element):
"""Implements a non-textual element
Args:
artefact_type: the type of artefact
confidence: the confidence of the type prediction
geometry: bounding box of the word in format ((xmin, ymin), (xmax, ymax)) where coordinates are relative to
the page's size.
"""
_exported_keys: List[str] = ["geometry", "type", "confidence"]
_children_names: List[str] = []
def __init__(self, artefact_type: str, confidence: float, geometry: BoundingBox) -> None:
super().__init__()
self.geometry = geometry
self.type = artefact_type
self.confidence = confidence
def render(self) -> str:
"""Renders the full text of the element"""
return f"[{self.type.upper()}]"
def extra_repr(self) -> str:
return f"type='{self.type}', confidence={self.confidence:.2}"
@classmethod
def from_dict(cls, save_dict: Dict[str, Any], **kwargs):
kwargs = {k: save_dict[k] for k in cls._exported_keys}
return cls(**kwargs)
class Line(Element):
"""Implements a line element as a collection of words
Args:
words: list of word elements
geometry: bounding box of the word in format ((xmin, ymin), (xmax, ymax)) where coordinates are relative to
the page's size. If not specified, it will be resolved by default to the smallest bounding box enclosing
all words in it.
"""
_exported_keys: List[str] = ["geometry"]
_children_names: List[str] = ["words"]
words: List[Word] = []
def __init__(
self,
words: List[Word],
geometry: Optional[Union[BoundingBox, np.ndarray]] = None,
) -> None:
# Resolve the geometry using the smallest enclosing bounding box
if geometry is None:
# Check whether this is a rotated or straight box
box_resolution_fn = resolve_enclosing_rbbox if len(words[0].geometry) == 4 else resolve_enclosing_bbox
geometry = box_resolution_fn([w.geometry for w in words]) # type: ignore[operator]
super().__init__(words=words)
self.geometry = geometry
def render(self) -> str:
"""Renders the full text of the element"""
return " ".join(w.render() for w in self.words)
@classmethod
def from_dict(cls, save_dict: Dict[str, Any], **kwargs):
kwargs = {k: save_dict[k] for k in cls._exported_keys}
kwargs.update(
{
"words": [Word.from_dict(_dict) for _dict in save_dict["words"]],
}
)
return cls(**kwargs)
class Prediction(Word):
"""Implements a prediction element"""
def render(self) -> str:
"""Renders the full text of the element"""
return self.value
def extra_repr(self) -> str:
return f"value='{self.value}', confidence={self.confidence:.2}, bounding_box={self.geometry}"
class Block(Element):
"""Implements a block element as a collection of lines and artefacts
Args:
lines: list of line elements
artefacts: list of artefacts
geometry: bounding box of the word in format ((xmin, ymin), (xmax, ymax)) where coordinates are relative to
the page's size. If not specified, it will be resolved by default to the smallest bounding box enclosing
all lines and artefacts in it.
"""
_exported_keys: List[str] = ["geometry"]
_children_names: List[str] = ["lines", "artefacts"]
lines: List[Line] = []
artefacts: List[Artefact] = []
def __init__(
self,
lines: List[Line] = [],
artefacts: List[Artefact] = [],
geometry: Optional[Union[BoundingBox, np.ndarray]] = None,
) -> None:
# Resolve the geometry using the smallest enclosing bounding box
if geometry is None:
line_boxes = [word.geometry for line in lines for word in line.words]
artefact_boxes = [artefact.geometry for artefact in artefacts]
box_resolution_fn = (
resolve_enclosing_rbbox if isinstance(lines[0].geometry, np.ndarray) else resolve_enclosing_bbox
)
geometry = box_resolution_fn(line_boxes + artefact_boxes) # type: ignore[operator]
super().__init__(lines=lines, artefacts=artefacts)
self.geometry = geometry
def render(self, line_break: str = "\n") -> str:
"""Renders the full text of the element"""
return line_break.join(line.render() for line in self.lines)
@classmethod
def from_dict(cls, save_dict: Dict[str, Any], **kwargs):
kwargs = {k: save_dict[k] for k in cls._exported_keys}
kwargs.update(
{
"lines": [Line.from_dict(_dict) for _dict in save_dict["lines"]],
"artefacts": [Artefact.from_dict(_dict) for _dict in save_dict["artefacts"]],
}
)
return cls(**kwargs)
class Page(Element):
"""Implements a page element as a collection of blocks
Args:
blocks: list of block elements
page_idx: the index of the page in the input raw document
dimensions: the page size in pixels in format (height, width)
orientation: a dictionary with the value of the rotation angle in degress and confidence of the prediction
language: a dictionary with the language value and confidence of the prediction
"""
_exported_keys: List[str] = ["page_idx", "dimensions", "orientation", "language"]
_children_names: List[str] = ["blocks"]
blocks: List[Block] = []
def __init__(
self,
blocks: List[Block],
page_idx: int,
dimensions: Tuple[int, int],
orientation: Optional[Dict[str, Any]] = None,
language: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(blocks=blocks)
self.page_idx = page_idx
self.dimensions = dimensions
self.orientation = orientation if isinstance(orientation, dict) else dict(value=None, confidence=None)
self.language = language if isinstance(language, dict) else dict(value=None, confidence=None)
def render(self, block_break: str = "\n\n") -> str:
"""Renders the full text of the element"""
return block_break.join(b.render() for b in self.blocks)
def extra_repr(self) -> str:
return f"dimensions={self.dimensions}"
def show(self, page: np.ndarray, interactive: bool = True, preserve_aspect_ratio: bool = False, **kwargs) -> None:
"""Overlay the result on a given image
Args:
page: image encoded as a numpy array in uint8
interactive: whether the display should be interactive
preserve_aspect_ratio: pass True if you passed True to the predictor
"""
visualize_page(self.export(), page, interactive=interactive, preserve_aspect_ratio=preserve_aspect_ratio)
plt.show(**kwargs)
def synthesize(self, **kwargs) -> np.ndarray:
"""Synthesize the page from the predictions
Returns:
synthesized page
"""
return synthesize_page(self.export(), **kwargs)
def export_as_xml(self, file_title: str = "docTR - XML export (hOCR)") -> Tuple[bytes, ET.ElementTree]:
"""Export the page as XML (hOCR-format)
convention: https://github.com/kba/hocr-spec/blob/master/1.2/spec.md
Args:
file_title: the title of the XML file
Returns:
a tuple of the XML byte string, and its ElementTree
"""
p_idx = self.page_idx
block_count: int = 1
line_count: int = 1
word_count: int = 1
height, width = self.dimensions
language = self.language if "language" in self.language.keys() else "en"
# Create the XML root element
page_hocr = ETElement("html", attrib={"xmlns": "http://www.w3.org/1999/xhtml", "xml:lang": str(language)})
# Create the header / SubElements of the root element
head = SubElement(page_hocr, "head")
SubElement(head, "title").text = file_title
SubElement(head, "meta", attrib={"http-equiv": "Content-Type", "content": "text/html; charset=utf-8"})
SubElement(
head,
"meta",
attrib={"name": "ocr-system", "content": f"python-doctr {doctr.__version__}"}, # type: ignore[attr-defined]
)
SubElement(
head,
"meta",
attrib={"name": "ocr-capabilities", "content": "ocr_page ocr_carea ocr_par ocr_line ocrx_word"},
)
# Create the body
body = SubElement(page_hocr, "body")
SubElement(
body,
"div",
attrib={
"class": "ocr_page",
"id": f"page_{p_idx + 1}",
"title": f"image; bbox 0 0 {width} {height}; ppageno 0",
},
)
# iterate over the blocks / lines / words and create the XML elements in body line by line with the attributes
for block in self.blocks:
if len(block.geometry) != 2:
raise TypeError("XML export is only available for straight bounding boxes for now.")
(xmin, ymin), (xmax, ymax) = block.geometry
block_div = SubElement(
body,
"div",
attrib={
"class": "ocr_carea",
"id": f"block_{block_count}",
"title": f"bbox {int(round(xmin * width))} {int(round(ymin * height))} \
{int(round(xmax * width))} {int(round(ymax * height))}",
},
)
paragraph = SubElement(
block_div,
"p",
attrib={
"class": "ocr_par",
"id": f"par_{block_count}",
"title": f"bbox {int(round(xmin * width))} {int(round(ymin * height))} \
{int(round(xmax * width))} {int(round(ymax * height))}",
},
)
block_count += 1
for line in block.lines:
(xmin, ymin), (xmax, ymax) = line.geometry
# NOTE: baseline, x_size, x_descenders, x_ascenders is currently initalized to 0
line_span = SubElement(
paragraph,
"span",
attrib={
"class": "ocr_line",
"id": f"line_{line_count}",
"title": f"bbox {int(round(xmin * width))} {int(round(ymin * height))} \
{int(round(xmax * width))} {int(round(ymax * height))}; \
baseline 0 0; x_size 0; x_descenders 0; x_ascenders 0",
},
)
line_count += 1
for word in line.words:
(xmin, ymin), (xmax, ymax) = word.geometry
conf = word.confidence
word_div = SubElement(
line_span,
"span",
attrib={
"class": "ocrx_word",
"id": f"word_{word_count}",
"title": f"bbox {int(round(xmin * width))} {int(round(ymin * height))} \
{int(round(xmax * width))} {int(round(ymax * height))}; \
x_wconf {int(round(conf * 100))}",
},
)
# set the text
word_div.text = word.value
word_count += 1
return (ET.tostring(page_hocr, encoding="utf-8", method="xml"), ET.ElementTree(page_hocr))
@classmethod
def from_dict(cls, save_dict: Dict[str, Any], **kwargs):
kwargs = {k: save_dict[k] for k in cls._exported_keys}
kwargs.update({"blocks": [Block.from_dict(block_dict) for block_dict in save_dict["blocks"]]})
return cls(**kwargs)
class KIEPage(Element):
"""Implements a KIE page element as a collection of predictions
Args:
predictions: Dictionary with list of block elements for each detection class
page_idx: the index of the page in the input raw document
dimensions: the page size in pixels in format (height, width)
orientation: a dictionary with the value of the rotation angle in degress and confidence of the prediction
language: a dictionary with the language value and confidence of the prediction
"""
_exported_keys: List[str] = ["page_idx", "dimensions", "orientation", "language"]
_children_names: List[str] = ["predictions"]
predictions: Dict[str, List[Prediction]] = {}
def __init__(
self,
predictions: Dict[str, List[Prediction]],
page_idx: int,
dimensions: Tuple[int, int],
orientation: Optional[Dict[str, Any]] = None,
language: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(predictions=predictions)
self.page_idx = page_idx
self.dimensions = dimensions
self.orientation = orientation if isinstance(orientation, dict) else dict(value=None, confidence=None)
self.language = language if isinstance(language, dict) else dict(value=None, confidence=None)
def render(self, prediction_break: str = "\n\n") -> str:
"""Renders the full text of the element"""
return prediction_break.join(
f"{class_name}: {p.render()}" for class_name, predictions in self.predictions.items() for p in predictions
)
def extra_repr(self) -> str:
return f"dimensions={self.dimensions}"
def show(self, page: np.ndarray, interactive: bool = True, preserve_aspect_ratio: bool = False, **kwargs) -> None:
"""Overlay the result on a given image
Args:
page: image encoded as a numpy array in uint8
interactive: whether the display should be interactive
preserve_aspect_ratio: pass True if you passed True to the predictor
"""
visualize_kie_page(self.export(), page, interactive=interactive, preserve_aspect_ratio=preserve_aspect_ratio)
plt.show(**kwargs)
def synthesize(self, **kwargs) -> np.ndarray:
"""Synthesize the page from the predictions
Returns:
synthesized page
"""
return synthesize_kie_page(self.export(), **kwargs)
def export_as_xml(self, file_title: str = "docTR - XML export (hOCR)") -> Tuple[bytes, ET.ElementTree]:
"""Export the page as XML (hOCR-format)
convention: https://github.com/kba/hocr-spec/blob/master/1.2/spec.md
Args:
file_title: the title of the XML file
Returns:
a tuple of the XML byte string, and its ElementTree
"""
p_idx = self.page_idx
prediction_count: int = 1
height, width = self.dimensions
language = self.language if "language" in self.language.keys() else "en"
# Create the XML root element
page_hocr = ETElement("html", attrib={"xmlns": "http://www.w3.org/1999/xhtml", "xml:lang": str(language)})
# Create the header / SubElements of the root element
head = SubElement(page_hocr, "head")
SubElement(head, "title").text = file_title
SubElement(head, "meta", attrib={"http-equiv": "Content-Type", "content": "text/html; charset=utf-8"})
SubElement(
head,
"meta",
attrib={"name": "ocr-system", "content": f"python-doctr {doctr.__version__}"}, # type: ignore[attr-defined]
)
SubElement(
head,
"meta",
attrib={"name": "ocr-capabilities", "content": "ocr_page ocr_carea ocr_par ocr_line ocrx_word"},
)
# Create the body
body = SubElement(page_hocr, "body")
SubElement(
body,
"div",
attrib={
"class": "ocr_page",
"id": f"page_{p_idx + 1}",
"title": f"image; bbox 0 0 {width} {height}; ppageno 0",
},
)
# iterate over the blocks / lines / words and create the XML elements in body line by line with the attributes
for class_name, predictions in self.predictions.items():
for prediction in predictions:
if len(prediction.geometry) != 2:
raise TypeError("XML export is only available for straight bounding boxes for now.")
(xmin, ymin), (xmax, ymax) = prediction.geometry
prediction_div = SubElement(
body,
"div",
attrib={
"class": "ocr_carea",
"id": f"{class_name}_prediction_{prediction_count}",
"title": f"bbox {int(round(xmin * width))} {int(round(ymin * height))} \
{int(round(xmax * width))} {int(round(ymax * height))}",
},
)
prediction_div.text = prediction.value
prediction_count += 1
return ET.tostring(page_hocr, encoding="utf-8", method="xml"), ET.ElementTree(page_hocr)
@classmethod
def from_dict(cls, save_dict: Dict[str, Any], **kwargs):
kwargs = {k: save_dict[k] for k in cls._exported_keys}
kwargs.update(
{"predictions": [Prediction.from_dict(predictions_dict) for predictions_dict in save_dict["predictions"]]}
)
return cls(**kwargs)
class Document(Element):
"""Implements a document element as a collection of pages
Args:
pages: list of page elements
"""
_children_names: List[str] = ["pages"]
pages: List[Page] = []
def __init__(
self,
pages: List[Page],
) -> None:
super().__init__(pages=pages)
def render(self, page_break: str = "\n\n\n\n") -> str:
"""Renders the full text of the element"""
return page_break.join(p.render() for p in self.pages)
def show(self, pages: List[np.ndarray], **kwargs) -> None:
"""Overlay the result on a given image
Args:
pages: list of images encoded as numpy arrays in uint8
"""
for img, result in zip(pages, self.pages):
result.show(img, **kwargs)
def synthesize(self, **kwargs) -> List[np.ndarray]:
"""Synthesize all pages from their predictions
Returns:
list of synthesized pages
"""
return [page.synthesize() for page in self.pages]
def export_as_xml(self, **kwargs) -> List[Tuple[bytes, ET.ElementTree]]:
"""Export the document as XML (hOCR-format)
Args:
**kwargs: additional keyword arguments passed to the Page.export_as_xml method
Returns:
list of tuple of (bytes, ElementTree)
"""
return [page.export_as_xml(**kwargs) for page in self.pages]
@classmethod
def from_dict(cls, save_dict: Dict[str, Any], **kwargs):
kwargs = {k: save_dict[k] for k in cls._exported_keys}
kwargs.update({"pages": [Page.from_dict(page_dict) for page_dict in save_dict["pages"]]})
return cls(**kwargs)
class KIEDocument(Document):
"""Implements a document element as a collection of pages
Args:
pages: list of page elements
"""
_children_names: List[str] = ["pages"]
pages: List[KIEPage] = [] # type: ignore[assignment]
def __init__(
self,
pages: List[KIEPage],
) -> None:
super().__init__(pages=pages) # type: ignore[arg-type]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any
from weasyprint import HTML
__all__ = ["read_html"]
def read_html(url: str, **kwargs: Any) -> bytes:
"""Read a PDF file and convert it into an image in numpy format
>>> from doctr.documents import read_html
>>> doc = read_html("https://www.yoursite.com")
Args:
url: URL of the target web page
Returns:
decoded PDF file as a bytes stream
"""
return HTML(url, **kwargs).write_pdf()
|
from .elements import *
from .html import *
from .image import *
from .pdf import *
from .reader import *
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from pathlib import Path
from typing import Any, List, Optional
import numpy as np
import pypdfium2 as pdfium
from doctr.utils.common_types import AbstractFile
__all__ = ["read_pdf"]
def read_pdf(
file: AbstractFile,
scale: float = 2,
rgb_mode: bool = True,
password: Optional[str] = None,
**kwargs: Any,
) -> List[np.ndarray]:
"""Read a PDF file and convert it into an image in numpy format
>>> from doctr.documents import read_pdf
>>> doc = read_pdf("path/to/your/doc.pdf")
Args:
file: the path to the PDF file
scale: rendering scale (1 corresponds to 72dpi)
rgb_mode: if True, the output will be RGB, otherwise BGR
password: a password to unlock the document, if encrypted
kwargs: additional parameters to :meth:`pypdfium2.PdfDocument.render_to`
Returns:
the list of pages decoded as numpy ndarray of shape H x W x C
"""
if isinstance(file, Path):
file = str(file)
# Rasterise pages to numpy ndarrays with pypdfium2
pdf = pdfium.PdfDocument(file, password=password)
renderer = pdf.render_to(pdfium.BitmapConv.numpy_ndarray, scale=scale, rev_byteorder=rgb_mode, **kwargs)
return [img for img, _ in renderer]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from pathlib import Path
from typing import List, Sequence, Union
import numpy as np
from doctr.utils.common_types import AbstractFile
from .html import read_html
from .image import read_img_as_numpy
from .pdf import read_pdf
__all__ = ["DocumentFile"]
class DocumentFile:
"""Read a document from multiple extensions"""
@classmethod
def from_pdf(cls, file: AbstractFile, **kwargs) -> List[np.ndarray]:
"""Read a PDF file
>>> from doctr.documents import DocumentFile
>>> doc = DocumentFile.from_pdf("path/to/your/doc.pdf")
Args:
file: the path to the PDF file or a binary stream
Returns:
the list of pages decoded as numpy ndarray of shape H x W x 3
"""
return read_pdf(file, **kwargs)
@classmethod
def from_url(cls, url: str, **kwargs) -> List[np.ndarray]:
"""Interpret a web page as a PDF document
>>> from doctr.documents import DocumentFile
>>> doc = DocumentFile.from_url("https://www.yoursite.com")
Args:
url: the URL of the target web page
Returns:
the list of pages decoded as numpy ndarray of shape H x W x 3
"""
pdf_stream = read_html(url)
return cls.from_pdf(pdf_stream, **kwargs)
@classmethod
def from_images(cls, files: Union[Sequence[AbstractFile], AbstractFile], **kwargs) -> List[np.ndarray]:
"""Read an image file (or a collection of image files) and convert it into an image in numpy format
>>> from doctr.documents import DocumentFile
>>> pages = DocumentFile.from_images(["path/to/your/page1.png", "path/to/your/page2.png"])
Args:
files: the path to the image file or a binary stream, or a collection of those
Returns:
the list of pages decoded as numpy ndarray of shape H x W x 3
"""
if isinstance(files, (str, Path, bytes)):
files = [files]
return [read_img_as_numpy(file, **kwargs) for file in files]
|
from doctr.file_utils import is_tf_available, is_torch_available
from .base import *
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import *
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Tuple
import numpy as np
import tensorflow as tf
from PIL import Image
if tf.__version__ >= "2.6.0":
from tensorflow.keras.utils import img_to_array
else:
from tensorflow.keras.preprocessing.image import img_to_array
from doctr.utils.common_types import AbstractPath
__all__ = ["tensor_from_pil", "read_img_as_tensor", "decode_img_as_tensor", "tensor_from_numpy", "get_img_shape"]
def tensor_from_pil(pil_img: Image, dtype: tf.dtypes.DType = tf.float32) -> tf.Tensor:
"""Convert a PIL Image to a TensorFlow tensor
Args:
pil_img: a PIL image
dtype: the output tensor data type
Returns:
decoded image as tensor
"""
npy_img = img_to_array(pil_img)
return tensor_from_numpy(npy_img, dtype)
def read_img_as_tensor(img_path: AbstractPath, dtype: tf.dtypes.DType = tf.float32) -> tf.Tensor:
"""Read an image file as a TensorFlow tensor
Args:
img_path: location of the image file
dtype: the desired data type of the output tensor. If it is float-related, values will be divided by 255.
Returns:
decoded image as a tensor
"""
if dtype not in (tf.uint8, tf.float16, tf.float32):
raise ValueError("insupported value for dtype")
img = tf.io.read_file(img_path)
img = tf.image.decode_jpeg(img, channels=3)
if dtype != tf.uint8:
img = tf.image.convert_image_dtype(img, dtype=dtype)
img = tf.clip_by_value(img, 0, 1)
return img
def decode_img_as_tensor(img_content: bytes, dtype: tf.dtypes.DType = tf.float32) -> tf.Tensor:
"""Read a byte stream as a TensorFlow tensor
Args:
img_content: bytes of a decoded image
dtype: the desired data type of the output tensor. If it is float-related, values will be divided by 255.
Returns:
decoded image as a tensor
"""
if dtype not in (tf.uint8, tf.float16, tf.float32):
raise ValueError("insupported value for dtype")
img = tf.io.decode_image(img_content, channels=3)
if dtype != tf.uint8:
img = tf.image.convert_image_dtype(img, dtype=dtype)
img = tf.clip_by_value(img, 0, 1)
return img
def tensor_from_numpy(npy_img: np.ndarray, dtype: tf.dtypes.DType = tf.float32) -> tf.Tensor:
"""Read an image file as a TensorFlow tensor
Args:
img: image encoded as a numpy array of shape (H, W, C) in np.uint8
dtype: the desired data type of the output tensor. If it is float-related, values will be divided by 255.
Returns:
same image as a tensor of shape (H, W, C)
"""
if dtype not in (tf.uint8, tf.float16, tf.float32):
raise ValueError("insupported value for dtype")
if dtype == tf.uint8:
img = tf.convert_to_tensor(npy_img, dtype=dtype)
else:
img = tf.image.convert_image_dtype(npy_img, dtype=dtype)
img = tf.clip_by_value(img, 0, 1)
return img
def get_img_shape(img: tf.Tensor) -> Tuple[int, int]:
return img.shape[:2]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from io import BytesIO
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torchvision.transforms.functional import to_tensor
from doctr.utils.common_types import AbstractPath
__all__ = ["tensor_from_pil", "read_img_as_tensor", "decode_img_as_tensor", "tensor_from_numpy", "get_img_shape"]
def tensor_from_pil(pil_img: Image, dtype: torch.dtype = torch.float32) -> torch.Tensor:
"""Convert a PIL Image to a PyTorch tensor
Args:
pil_img: a PIL image
dtype: the output tensor data type
Returns:
decoded image as tensor
"""
if dtype == torch.float32:
img = to_tensor(pil_img)
else:
img = tensor_from_numpy(np.array(pil_img, np.uint8, copy=True), dtype)
return img
def read_img_as_tensor(img_path: AbstractPath, dtype: torch.dtype = torch.float32) -> torch.Tensor:
"""Read an image file as a PyTorch tensor
Args:
img_path: location of the image file
dtype: the desired data type of the output tensor. If it is float-related, values will be divided by 255.
Returns:
decoded image as a tensor
"""
if dtype not in (torch.uint8, torch.float16, torch.float32):
raise ValueError("insupported value for dtype")
pil_img = Image.open(img_path, mode="r").convert("RGB")
return tensor_from_pil(pil_img, dtype)
def decode_img_as_tensor(img_content: bytes, dtype: torch.dtype = torch.float32) -> torch.Tensor:
"""Read a byte stream as a PyTorch tensor
Args:
img_content: bytes of a decoded image
dtype: the desired data type of the output tensor. If it is float-related, values will be divided by 255.
Returns:
decoded image as a tensor
"""
if dtype not in (torch.uint8, torch.float16, torch.float32):
raise ValueError("insupported value for dtype")
pil_img = Image.open(BytesIO(img_content), mode="r").convert("RGB")
return tensor_from_pil(pil_img, dtype)
def tensor_from_numpy(npy_img: np.ndarray, dtype: torch.dtype = torch.float32) -> torch.Tensor:
"""Read an image file as a PyTorch tensor
Args:
img: image encoded as a numpy array of shape (H, W, C) in np.uint8
dtype: the desired data type of the output tensor. If it is float-related, values will be divided by 255.
Returns:
same image as a tensor of shape (C, H, W)
"""
if dtype not in (torch.uint8, torch.float16, torch.float32):
raise ValueError("insupported value for dtype")
if dtype == torch.float32:
img = to_tensor(npy_img)
else:
img = torch.from_numpy(npy_img)
# put it from HWC to CHW format
img = img.permute((2, 0, 1)).contiguous()
if dtype == torch.float16:
# Switch to FP16
img = img.to(dtype=torch.float16).div(255)
return img
def get_img_shape(img: torch.Tensor) -> Tuple[int, int]:
return img.shape[-2:] # type: ignore[return-value]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from pathlib import Path
from typing import Optional, Tuple
import cv2
import numpy as np
from doctr.utils.common_types import AbstractFile
__all__ = ["read_img_as_numpy"]
def read_img_as_numpy(
file: AbstractFile,
output_size: Optional[Tuple[int, int]] = None,
rgb_output: bool = True,
) -> np.ndarray:
"""Read an image file into numpy format
>>> from doctr.documents import read_img
>>> page = read_img("path/to/your/doc.jpg")
Args:
file: the path to the image file
output_size: the expected output size of each page in format H x W
rgb_output: whether the output ndarray channel order should be RGB instead of BGR.
Returns:
the page decoded as numpy ndarray of shape H x W x 3
"""
if isinstance(file, (str, Path)):
if not Path(file).is_file():
raise FileNotFoundError(f"unable to access {file}")
img = cv2.imread(str(file), cv2.IMREAD_COLOR)
elif isinstance(file, bytes):
_file: np.ndarray = np.frombuffer(file, np.uint8)
img = cv2.imdecode(_file, cv2.IMREAD_COLOR)
else:
raise TypeError("unsupported object type for argument 'file'")
# Validity check
if img is None:
raise ValueError("unable to read file.")
# Resizing
if isinstance(output_size, tuple):
img = cv2.resize(img, output_size[::-1], interpolation=cv2.INTER_LINEAR)
# Switch the channel order
if rgb_output:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Dict, List, Optional, Tuple
import cv2
import numpy as np
from scipy.optimize import linear_sum_assignment
from unidecode import unidecode
__all__ = [
"TextMatch",
"box_iou",
"box_ioa",
"mask_iou",
"polygon_iou",
"nms",
"LocalizationConfusion",
"OCRMetric",
"DetectionMetric",
]
def string_match(word1: str, word2: str) -> Tuple[bool, bool, bool, bool]:
"""Performs string comparison with multiple levels of tolerance
Args:
word1: a string
word2: another string
Returns:
a tuple with booleans specifying respectively whether the raw strings, their lower-case counterparts, their
unidecode counterparts and their lower-case unidecode counterparts match
"""
raw_match = word1 == word2
caseless_match = word1.lower() == word2.lower()
unidecode_match = unidecode(word1) == unidecode(word2)
# Warning: the order is important here otherwise the pair ("EUR", "€") cannot be matched
unicase_match = unidecode(word1).lower() == unidecode(word2).lower()
return raw_match, caseless_match, unidecode_match, unicase_match
class TextMatch:
r"""Implements text match metric (word-level accuracy) for recognition task.
The raw aggregated metric is computed as follows:
.. math::
\forall X, Y \in \mathcal{W}^N,
TextMatch(X, Y) = \frac{1}{N} \sum\limits_{i=1}^N f_{Y_i}(X_i)
with the indicator function :math:`f_{a}` defined as:
.. math::
\forall a, x \in \mathcal{W},
f_a(x) = \left\{
\begin{array}{ll}
1 & \mbox{if } x = a \\
0 & \mbox{otherwise.}
\end{array}
\right.
where :math:`\mathcal{W}` is the set of all possible character sequences,
:math:`N` is a strictly positive integer.
>>> from doctr.utils import TextMatch
>>> metric = TextMatch()
>>> metric.update(['Hello', 'world'], ['hello', 'world'])
>>> metric.summary()
"""
def __init__(self) -> None:
self.reset()
def update(
self,
gt: List[str],
pred: List[str],
) -> None:
"""Update the state of the metric with new predictions
Args:
gt: list of groung-truth character sequences
pred: list of predicted character sequences
"""
if len(gt) != len(pred):
raise AssertionError("prediction size does not match with ground-truth labels size")
for gt_word, pred_word in zip(gt, pred):
_raw, _caseless, _unidecode, _unicase = string_match(gt_word, pred_word)
self.raw += int(_raw)
self.caseless += int(_caseless)
self.unidecode += int(_unidecode)
self.unicase += int(_unicase)
self.total += len(gt)
def summary(self) -> Dict[str, float]:
"""Computes the aggregated metrics
Returns:
a dictionary with the exact match score for the raw data, its lower-case counterpart, its unidecode
counterpart and its lower-case unidecode counterpart
"""
if self.total == 0:
raise AssertionError("you need to update the metric before getting the summary")
return dict(
raw=self.raw / self.total,
caseless=self.caseless / self.total,
unidecode=self.unidecode / self.total,
unicase=self.unicase / self.total,
)
def reset(self) -> None:
self.raw = 0
self.caseless = 0
self.unidecode = 0
self.unicase = 0
self.total = 0
def box_iou(boxes_1: np.ndarray, boxes_2: np.ndarray) -> np.ndarray:
"""Computes the IoU between two sets of bounding boxes
Args:
boxes_1: bounding boxes of shape (N, 4) in format (xmin, ymin, xmax, ymax)
boxes_2: bounding boxes of shape (M, 4) in format (xmin, ymin, xmax, ymax)
Returns:
the IoU matrix of shape (N, M)
"""
iou_mat: np.ndarray = np.zeros((boxes_1.shape[0], boxes_2.shape[0]), dtype=np.float32)
if boxes_1.shape[0] > 0 and boxes_2.shape[0] > 0:
l1, t1, r1, b1 = np.split(boxes_1, 4, axis=1)
l2, t2, r2, b2 = np.split(boxes_2, 4, axis=1)
left = np.maximum(l1, l2.T)
top = np.maximum(t1, t2.T)
right = np.minimum(r1, r2.T)
bot = np.minimum(b1, b2.T)
intersection = np.clip(right - left, 0, np.Inf) * np.clip(bot - top, 0, np.Inf)
union = (r1 - l1) * (b1 - t1) + ((r2 - l2) * (b2 - t2)).T - intersection
iou_mat = intersection / union
return iou_mat
def box_ioa(boxes_1: np.ndarray, boxes_2: np.ndarray) -> np.ndarray:
"""Computes the IoA (intersection over area) between two sets of bounding boxes:
ioa(i, j) = inter(i, j) / area(i)
Args:
boxes_1: bounding boxes of shape (N, 4) in format (xmin, ymin, xmax, ymax)
boxes_2: bounding boxes of shape (M, 4) in format (xmin, ymin, xmax, ymax)
Returns:
the IoA matrix of shape (N, M)
"""
ioa_mat: np.ndarray = np.zeros((boxes_1.shape[0], boxes_2.shape[0]), dtype=np.float32)
if boxes_1.shape[0] > 0 and boxes_2.shape[0] > 0:
l1, t1, r1, b1 = np.split(boxes_1, 4, axis=1)
l2, t2, r2, b2 = np.split(boxes_2, 4, axis=1)
left = np.maximum(l1, l2.T)
top = np.maximum(t1, t2.T)
right = np.minimum(r1, r2.T)
bot = np.minimum(b1, b2.T)
intersection = np.clip(right - left, 0, np.Inf) * np.clip(bot - top, 0, np.Inf)
area = (r1 - l1) * (b1 - t1)
ioa_mat = intersection / area
return ioa_mat
def mask_iou(masks_1: np.ndarray, masks_2: np.ndarray) -> np.ndarray:
"""Computes the IoU between two sets of boolean masks
Args:
masks_1: boolean masks of shape (N, H, W)
masks_2: boolean masks of shape (M, H, W)
Returns:
the IoU matrix of shape (N, M)
"""
if masks_1.shape[1:] != masks_2.shape[1:]:
raise AssertionError("both boolean masks should have the same spatial shape")
iou_mat: np.ndarray = np.zeros((masks_1.shape[0], masks_2.shape[0]), dtype=np.float32)
if masks_1.shape[0] > 0 and masks_2.shape[0] > 0:
axes = tuple(range(2, masks_1.ndim + 1))
intersection = np.logical_and(masks_1[:, None, ...], masks_2[None, ...]).sum(axis=axes)
union = np.logical_or(masks_1[:, None, ...], masks_2[None, ...]).sum(axis=axes)
iou_mat = intersection / union
return iou_mat
def polygon_iou(
polys_1: np.ndarray, polys_2: np.ndarray, mask_shape: Tuple[int, int], use_broadcasting: bool = False
) -> np.ndarray:
"""Computes the IoU between two sets of rotated bounding boxes
Args:
polys_1: rotated bounding boxes of shape (N, 4, 2)
polys_2: rotated bounding boxes of shape (M, 4, 2)
mask_shape: spatial shape of the intermediate masks
use_broadcasting: if set to True, leverage broadcasting speedup by consuming more memory
Returns:
the IoU matrix of shape (N, M)
"""
if polys_1.ndim != 3 or polys_2.ndim != 3:
raise AssertionError("expects boxes to be in format (N, 4, 2)")
iou_mat: np.ndarray = np.zeros((polys_1.shape[0], polys_2.shape[0]), dtype=np.float32)
if polys_1.shape[0] > 0 and polys_2.shape[0] > 0:
if use_broadcasting:
masks_1 = rbox_to_mask(polys_1, shape=mask_shape)
masks_2 = rbox_to_mask(polys_2, shape=mask_shape)
iou_mat = mask_iou(masks_1, masks_2)
else:
# Save memory by doing the computation for each pair
for idx, b1 in enumerate(polys_1):
m1 = _rbox_to_mask(b1, mask_shape)
for _idx, b2 in enumerate(polys_2):
m2 = _rbox_to_mask(b2, mask_shape)
iou_mat[idx, _idx] = np.logical_and(m1, m2).sum() / np.logical_or(m1, m2).sum()
return iou_mat
def _rbox_to_mask(box: np.ndarray, shape: Tuple[int, int]) -> np.ndarray:
"""Converts a rotated bounding box to a boolean mask
Args:
box: rotated bounding box of shape (4, 2)
shape: spatial shapes of the output masks
Returns:
the boolean mask of the specified shape
"""
mask: np.ndarray = np.zeros(shape, dtype=np.uint8)
# Get absolute coords
if box.dtype != int:
abs_box = box.copy()
abs_box[:, 0] = abs_box[:, 0] * shape[1]
abs_box[:, 1] = abs_box[:, 1] * shape[0]
abs_box = abs_box.round().astype(int)
else:
abs_box = box
abs_box[2:] = abs_box[2:] + 1
cv2.fillPoly(mask, [abs_box - 1], 1)
return mask.astype(bool)
def rbox_to_mask(boxes: np.ndarray, shape: Tuple[int, int]) -> np.ndarray:
"""Converts rotated bounding boxes to boolean masks
Args:
boxes: rotated bounding boxes of shape (N, 4, 2)
shape: spatial shapes of the output masks
Returns:
the boolean masks of shape (N, H, W)
"""
masks: np.ndarray = np.zeros((boxes.shape[0], *shape), dtype=np.uint8)
if boxes.shape[0] > 0:
# Get absolute coordinates
if boxes.dtype != int:
abs_boxes = boxes.copy()
abs_boxes[:, :, 0] = abs_boxes[:, :, 0] * shape[1]
abs_boxes[:, :, 1] = abs_boxes[:, :, 1] * shape[0]
abs_boxes = abs_boxes.round().astype(int)
else:
abs_boxes = boxes
abs_boxes[:, 2:] = abs_boxes[:, 2:] + 1
# TODO: optimize slicing to improve vectorization
for idx, _box in enumerate(abs_boxes):
cv2.fillPoly(masks[idx], [_box - 1], 1)
return masks.astype(bool)
def nms(boxes: np.ndarray, thresh: float = 0.5) -> List[int]:
"""Perform non-max suppression, borrowed from <https://github.com/rbgirshick/fast-rcnn>`_.
Args:
boxes: np array of straight boxes: (*, 5), (xmin, ymin, xmax, ymax, score)
thresh: iou threshold to perform box suppression.
Returns:
A list of box indexes to keep
"""
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
scores = boxes[:, 4]
areas = (x2 - x1) * (y2 - y1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
class LocalizationConfusion:
r"""Implements common confusion metrics and mean IoU for localization evaluation.
The aggregated metrics are computed as follows:
.. math::
\forall Y \in \mathcal{B}^N, \forall X \in \mathcal{B}^M, \\
Recall(X, Y) = \frac{1}{N} \sum\limits_{i=1}^N g_{X}(Y_i) \\
Precision(X, Y) = \frac{1}{M} \sum\limits_{i=1}^M g_{X}(Y_i) \\
meanIoU(X, Y) = \frac{1}{M} \sum\limits_{i=1}^M \max\limits_{j \in [1, N]} IoU(X_i, Y_j)
with the function :math:`IoU(x, y)` being the Intersection over Union between bounding boxes :math:`x` and
:math:`y`, and the function :math:`g_{X}` defined as:
.. math::
\forall y \in \mathcal{B},
g_X(y) = \left\{
\begin{array}{ll}
1 & \mbox{if } y\mbox{ has been assigned to any }(X_i)_i\mbox{ with an }IoU \geq 0.5 \\
0 & \mbox{otherwise.}
\end{array}
\right.
where :math:`\mathcal{B}` is the set of possible bounding boxes,
:math:`N` (number of ground truths) and :math:`M` (number of predictions) are strictly positive integers.
>>> import numpy as np
>>> from doctr.utils import LocalizationConfusion
>>> metric = LocalizationConfusion(iou_thresh=0.5)
>>> metric.update(np.asarray([[0, 0, 100, 100]]), np.asarray([[0, 0, 70, 70], [110, 95, 200, 150]]))
>>> metric.summary()
Args:
iou_thresh: minimum IoU to consider a pair of prediction and ground truth as a match
use_polygons: if set to True, predictions and targets will be expected to have rotated format
mask_shape: if use_polygons is True, describes the spatial shape of the image used
use_broadcasting: if use_polygons is True, use broadcasting for IoU computation by consuming more memory
"""
def __init__(
self,
iou_thresh: float = 0.5,
use_polygons: bool = False,
mask_shape: Tuple[int, int] = (1024, 1024),
use_broadcasting: bool = True,
) -> None:
self.iou_thresh = iou_thresh
self.use_polygons = use_polygons
self.mask_shape = mask_shape
self.use_broadcasting = use_broadcasting
self.reset()
def update(self, gts: np.ndarray, preds: np.ndarray) -> None:
"""Updates the metric
Args:
gts: a set of relative bounding boxes either of shape (N, 4) or (N, 5) if they are rotated ones
preds: a set of relative bounding boxes either of shape (M, 4) or (M, 5) if they are rotated ones
"""
if preds.shape[0] > 0:
# Compute IoU
if self.use_polygons:
iou_mat = polygon_iou(gts, preds, self.mask_shape, self.use_broadcasting)
else:
iou_mat = box_iou(gts, preds)
self.tot_iou += float(iou_mat.max(axis=0).sum())
# Assign pairs
gt_indices, pred_indices = linear_sum_assignment(-iou_mat)
self.matches += int((iou_mat[gt_indices, pred_indices] >= self.iou_thresh).sum())
# Update counts
self.num_gts += gts.shape[0]
self.num_preds += preds.shape[0]
def summary(self) -> Tuple[Optional[float], Optional[float], Optional[float]]:
"""Computes the aggregated metrics
Returns:
a tuple with the recall, precision and meanIoU scores
"""
# Recall
recall = self.matches / self.num_gts if self.num_gts > 0 else None
# Precision
precision = self.matches / self.num_preds if self.num_preds > 0 else None
# mean IoU
mean_iou = self.tot_iou / self.num_preds if self.num_preds > 0 else None
return recall, precision, mean_iou
def reset(self) -> None:
self.num_gts = 0
self.num_preds = 0
self.matches = 0
self.tot_iou = 0.0
class OCRMetric:
r"""Implements an end-to-end OCR metric.
The aggregated metrics are computed as follows:
.. math::
\forall (B, L) \in \mathcal{B}^N \times \mathcal{L}^N,
\forall (\hat{B}, \hat{L}) \in \mathcal{B}^M \times \mathcal{L}^M, \\
Recall(B, \hat{B}, L, \hat{L}) = \frac{1}{N} \sum\limits_{i=1}^N h_{B,L}(\hat{B}_i, \hat{L}_i) \\
Precision(B, \hat{B}, L, \hat{L}) = \frac{1}{M} \sum\limits_{i=1}^M h_{B,L}(\hat{B}_i, \hat{L}_i) \\
meanIoU(B, \hat{B}) = \frac{1}{M} \sum\limits_{i=1}^M \max\limits_{j \in [1, N]} IoU(\hat{B}_i, B_j)
with the function :math:`IoU(x, y)` being the Intersection over Union between bounding boxes :math:`x` and
:math:`y`, and the function :math:`h_{B, L}` defined as:
.. math::
\forall (b, l) \in \mathcal{B} \times \mathcal{L},
h_{B,L}(b, l) = \left\{
\begin{array}{ll}
1 & \mbox{if } b\mbox{ has been assigned to a given }B_j\mbox{ with an } \\
& IoU \geq 0.5 \mbox{ and that for this assignment, } l = L_j\\
0 & \mbox{otherwise.}
\end{array}
\right.
where :math:`\mathcal{B}` is the set of possible bounding boxes,
:math:`\mathcal{L}` is the set of possible character sequences,
:math:`N` (number of ground truths) and :math:`M` (number of predictions) are strictly positive integers.
>>> import numpy as np
>>> from doctr.utils import OCRMetric
>>> metric = OCRMetric(iou_thresh=0.5)
>>> metric.update(np.asarray([[0, 0, 100, 100]]), np.asarray([[0, 0, 70, 70], [110, 95, 200, 150]]),
>>> ['hello'], ['hello', 'world'])
>>> metric.summary()
Args:
iou_thresh: minimum IoU to consider a pair of prediction and ground truth as a match
use_polygons: if set to True, predictions and targets will be expected to have rotated format
mask_shape: if use_polygons is True, describes the spatial shape of the image used
use_broadcasting: if use_polygons is True, use broadcasting for IoU computation by consuming more memory
"""
def __init__(
self,
iou_thresh: float = 0.5,
use_polygons: bool = False,
mask_shape: Tuple[int, int] = (1024, 1024),
use_broadcasting: bool = True,
) -> None:
self.iou_thresh = iou_thresh
self.use_polygons = use_polygons
self.mask_shape = mask_shape
self.use_broadcasting = use_broadcasting
self.reset()
def update(
self,
gt_boxes: np.ndarray,
pred_boxes: np.ndarray,
gt_labels: List[str],
pred_labels: List[str],
) -> None:
"""Updates the metric
Args:
gt_boxes: a set of relative bounding boxes either of shape (N, 4) or (N, 5) if they are rotated ones
pred_boxes: a set of relative bounding boxes either of shape (M, 4) or (M, 5) if they are rotated ones
gt_labels: a list of N string labels
pred_labels: a list of M string labels
"""
if gt_boxes.shape[0] != len(gt_labels) or pred_boxes.shape[0] != len(pred_labels):
raise AssertionError(
"there should be the same number of boxes and string both for the ground truth " "and the predictions"
)
# Compute IoU
if pred_boxes.shape[0] > 0:
if self.use_polygons:
iou_mat = polygon_iou(gt_boxes, pred_boxes, self.mask_shape, self.use_broadcasting)
else:
iou_mat = box_iou(gt_boxes, pred_boxes)
self.tot_iou += float(iou_mat.max(axis=0).sum())
# Assign pairs
gt_indices, pred_indices = linear_sum_assignment(-iou_mat)
is_kept = iou_mat[gt_indices, pred_indices] >= self.iou_thresh
# String comparison
for gt_idx, pred_idx in zip(gt_indices[is_kept], pred_indices[is_kept]):
_raw, _caseless, _unidecode, _unicase = string_match(gt_labels[gt_idx], pred_labels[pred_idx])
self.raw_matches += int(_raw)
self.caseless_matches += int(_caseless)
self.unidecode_matches += int(_unidecode)
self.unicase_matches += int(_unicase)
self.num_gts += gt_boxes.shape[0]
self.num_preds += pred_boxes.shape[0]
def summary(self) -> Tuple[Dict[str, Optional[float]], Dict[str, Optional[float]], Optional[float]]:
"""Computes the aggregated metrics
Returns:
a tuple with the recall & precision for each string comparison and the mean IoU
"""
# Recall
recall = dict(
raw=self.raw_matches / self.num_gts if self.num_gts > 0 else None,
caseless=self.caseless_matches / self.num_gts if self.num_gts > 0 else None,
unidecode=self.unidecode_matches / self.num_gts if self.num_gts > 0 else None,
unicase=self.unicase_matches / self.num_gts if self.num_gts > 0 else None,
)
# Precision
precision = dict(
raw=self.raw_matches / self.num_preds if self.num_preds > 0 else None,
caseless=self.caseless_matches / self.num_preds if self.num_preds > 0 else None,
unidecode=self.unidecode_matches / self.num_preds if self.num_preds > 0 else None,
unicase=self.unicase_matches / self.num_preds if self.num_preds > 0 else None,
)
# mean IoU (overall detected boxes)
mean_iou = self.tot_iou / self.num_preds if self.num_preds > 0 else None
return recall, precision, mean_iou
def reset(self) -> None:
self.num_gts = 0
self.num_preds = 0
self.tot_iou = 0.0
self.raw_matches = 0
self.caseless_matches = 0
self.unidecode_matches = 0
self.unicase_matches = 0
class DetectionMetric:
r"""Implements an object detection metric.
The aggregated metrics are computed as follows:
.. math::
\forall (B, C) \in \mathcal{B}^N \times \mathcal{C}^N,
\forall (\hat{B}, \hat{C}) \in \mathcal{B}^M \times \mathcal{C}^M, \\
Recall(B, \hat{B}, C, \hat{C}) = \frac{1}{N} \sum\limits_{i=1}^N h_{B,C}(\hat{B}_i, \hat{C}_i) \\
Precision(B, \hat{B}, C, \hat{C}) = \frac{1}{M} \sum\limits_{i=1}^M h_{B,C}(\hat{B}_i, \hat{C}_i) \\
meanIoU(B, \hat{B}) = \frac{1}{M} \sum\limits_{i=1}^M \max\limits_{j \in [1, N]} IoU(\hat{B}_i, B_j)
with the function :math:`IoU(x, y)` being the Intersection over Union between bounding boxes :math:`x` and
:math:`y`, and the function :math:`h_{B, C}` defined as:
.. math::
\forall (b, c) \in \mathcal{B} \times \mathcal{C},
h_{B,C}(b, c) = \left\{
\begin{array}{ll}
1 & \mbox{if } b\mbox{ has been assigned to a given }B_j\mbox{ with an } \\
& IoU \geq 0.5 \mbox{ and that for this assignment, } c = C_j\\
0 & \mbox{otherwise.}
\end{array}
\right.
where :math:`\mathcal{B}` is the set of possible bounding boxes,
:math:`\mathcal{C}` is the set of possible class indices,
:math:`N` (number of ground truths) and :math:`M` (number of predictions) are strictly positive integers.
>>> import numpy as np
>>> from doctr.utils import DetectionMetric
>>> metric = DetectionMetric(iou_thresh=0.5)
>>> metric.update(np.asarray([[0, 0, 100, 100]]), np.asarray([[0, 0, 70, 70], [110, 95, 200, 150]]),
>>> np.zeros(1, dtype=np.int64), np.array([0, 1], dtype=np.int64))
>>> metric.summary()
Args:
iou_thresh: minimum IoU to consider a pair of prediction and ground truth as a match
use_polygons: if set to True, predictions and targets will be expected to have rotated format
mask_shape: if use_polygons is True, describes the spatial shape of the image used
use_broadcasting: if use_polygons is True, use broadcasting for IoU computation by consuming more memory
"""
def __init__(
self,
iou_thresh: float = 0.5,
use_polygons: bool = False,
mask_shape: Tuple[int, int] = (1024, 1024),
use_broadcasting: bool = True,
) -> None:
self.iou_thresh = iou_thresh
self.use_polygons = use_polygons
self.mask_shape = mask_shape
self.use_broadcasting = use_broadcasting
self.reset()
def update(
self,
gt_boxes: np.ndarray,
pred_boxes: np.ndarray,
gt_labels: np.ndarray,
pred_labels: np.ndarray,
) -> None:
"""Updates the metric
Args:
gt_boxes: a set of relative bounding boxes either of shape (N, 4) or (N, 5) if they are rotated ones
pred_boxes: a set of relative bounding boxes either of shape (M, 4) or (M, 5) if they are rotated ones
gt_labels: an array of class indices of shape (N,)
pred_labels: an array of class indices of shape (M,)
"""
if gt_boxes.shape[0] != gt_labels.shape[0] or pred_boxes.shape[0] != pred_labels.shape[0]:
raise AssertionError(
"there should be the same number of boxes and string both for the ground truth " "and the predictions"
)
# Compute IoU
if pred_boxes.shape[0] > 0:
if self.use_polygons:
iou_mat = polygon_iou(gt_boxes, pred_boxes, self.mask_shape, self.use_broadcasting)
else:
iou_mat = box_iou(gt_boxes, pred_boxes)
self.tot_iou += float(iou_mat.max(axis=0).sum())
# Assign pairs
gt_indices, pred_indices = linear_sum_assignment(-iou_mat)
is_kept = iou_mat[gt_indices, pred_indices] >= self.iou_thresh
# Category comparison
self.num_matches += int((gt_labels[gt_indices[is_kept]] == pred_labels[pred_indices[is_kept]]).sum())
self.num_gts += gt_boxes.shape[0]
self.num_preds += pred_boxes.shape[0]
def summary(self) -> Tuple[Optional[float], Optional[float], Optional[float]]:
"""Computes the aggregated metrics
Returns:
a tuple with the recall & precision for each class prediction and the mean IoU
"""
# Recall
recall = self.num_matches / self.num_gts if self.num_gts > 0 else None
# Precision
precision = self.num_matches / self.num_preds if self.num_preds > 0 else None
# mean IoU (overall detected boxes)
mean_iou = self.tot_iou / self.num_preds if self.num_preds > 0 else None
return recall, precision, mean_iou
def reset(self) -> None:
self.num_gts = 0
self.num_preds = 0
self.tot_iou = 0.0
self.num_matches = 0
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from pathlib import Path
from typing import List, Tuple, Union
__all__ = ["Point2D", "BoundingBox", "Polygon4P", "Polygon", "Bbox"]
Point2D = Tuple[float, float]
BoundingBox = Tuple[Point2D, Point2D]
Polygon4P = Tuple[Point2D, Point2D, Point2D, Point2D]
Polygon = List[Point2D]
AbstractPath = Union[str, Path]
AbstractFile = Union[AbstractPath, bytes]
Bbox = Tuple[float, float, float, float]
|
from .common_types import *
from .data import *
from .geometry import *
from .metrics import *
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import colorsys
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple, Union
import cv2
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import mplcursors
import numpy as np
from matplotlib.figure import Figure
from PIL import Image, ImageDraw
from unidecode import unidecode
from .common_types import BoundingBox, Polygon4P
from .fonts import get_font
__all__ = ["visualize_page", "synthesize_page", "visualize_kie_page", "synthesize_kie_page", "draw_boxes"]
def rect_patch(
geometry: BoundingBox,
page_dimensions: Tuple[int, int],
label: Optional[str] = None,
color: Tuple[float, float, float] = (0, 0, 0),
alpha: float = 0.3,
linewidth: int = 2,
fill: bool = True,
preserve_aspect_ratio: bool = False,
) -> patches.Rectangle:
"""Create a matplotlib rectangular patch for the element
Args:
geometry: bounding box of the element
page_dimensions: dimensions of the Page in format (height, width)
label: label to display when hovered
color: color to draw box
alpha: opacity parameter to fill the boxes, 0 = transparent
linewidth: line width
fill: whether the patch should be filled
preserve_aspect_ratio: pass True if you passed True to the predictor
Returns:
a rectangular Patch
"""
if len(geometry) != 2 or any(not isinstance(elt, tuple) or len(elt) != 2 for elt in geometry):
raise ValueError("invalid geometry format")
# Unpack
height, width = page_dimensions
(xmin, ymin), (xmax, ymax) = geometry
# Switch to absolute coords
if preserve_aspect_ratio:
width = height = max(height, width)
xmin, w = xmin * width, (xmax - xmin) * width
ymin, h = ymin * height, (ymax - ymin) * height
return patches.Rectangle(
(xmin, ymin),
w,
h,
fill=fill,
linewidth=linewidth,
edgecolor=(*color, alpha),
facecolor=(*color, alpha),
label=label,
)
def polygon_patch(
geometry: np.ndarray,
page_dimensions: Tuple[int, int],
label: Optional[str] = None,
color: Tuple[float, float, float] = (0, 0, 0),
alpha: float = 0.3,
linewidth: int = 2,
fill: bool = True,
preserve_aspect_ratio: bool = False,
) -> patches.Polygon:
"""Create a matplotlib polygon patch for the element
Args:
geometry: bounding box of the element
page_dimensions: dimensions of the Page in format (height, width)
label: label to display when hovered
color: color to draw box
alpha: opacity parameter to fill the boxes, 0 = transparent
linewidth: line width
fill: whether the patch should be filled
preserve_aspect_ratio: pass True if you passed True to the predictor
Returns:
a polygon Patch
"""
if not geometry.shape == (4, 2):
raise ValueError("invalid geometry format")
# Unpack
height, width = page_dimensions
geometry[:, 0] = geometry[:, 0] * (max(width, height) if preserve_aspect_ratio else width)
geometry[:, 1] = geometry[:, 1] * (max(width, height) if preserve_aspect_ratio else height)
return patches.Polygon(
geometry,
fill=fill,
linewidth=linewidth,
edgecolor=(*color, alpha),
facecolor=(*color, alpha),
label=label,
)
def create_obj_patch(
geometry: Union[BoundingBox, Polygon4P, np.ndarray],
page_dimensions: Tuple[int, int],
**kwargs: Any,
) -> patches.Patch:
"""Create a matplotlib patch for the element
Args:
geometry: bounding box (straight or rotated) of the element
page_dimensions: dimensions of the page in format (height, width)
Returns:
a matplotlib Patch
"""
if isinstance(geometry, tuple):
if len(geometry) == 2: # straight word BB (2 pts)
return rect_patch(geometry, page_dimensions, **kwargs) # type: ignore[arg-type]
elif len(geometry) == 4: # rotated word BB (4 pts)
return polygon_patch(np.asarray(geometry), page_dimensions, **kwargs)
elif isinstance(geometry, np.ndarray) and geometry.shape == (4, 2): # rotated line
return polygon_patch(geometry, page_dimensions, **kwargs)
raise ValueError("invalid geometry format")
def get_colors(num_colors: int) -> List[Tuple[float, float, float]]:
"""Generate num_colors color for matplotlib
Args:
num_colors: number of colors to generate
Returns:
colors: list of generated colors
"""
colors = []
for i in np.arange(0.0, 360.0, 360.0 / num_colors):
hue = i / 360.0
lightness = (50 + np.random.rand() * 10) / 100.0
saturation = (90 + np.random.rand() * 10) / 100.0
colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))
return colors
def visualize_page(
page: Dict[str, Any],
image: np.ndarray,
words_only: bool = True,
display_artefacts: bool = True,
scale: float = 10,
interactive: bool = True,
add_labels: bool = True,
**kwargs: Any,
) -> Figure:
"""Visualize a full page with predicted blocks, lines and words
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from doctr.utils.visualization import visualize_page
>>> from doctr.models import ocr_db_crnn
>>> model = ocr_db_crnn(pretrained=True)
>>> input_page = (255 * np.random.rand(600, 800, 3)).astype(np.uint8)
>>> out = model([[input_page]])
>>> visualize_page(out[0].pages[0].export(), input_page)
>>> plt.show()
Args:
page: the exported Page of a Document
image: np array of the page, needs to have the same shape than page['dimensions']
words_only: whether only words should be displayed
display_artefacts: whether artefacts should be displayed
scale: figsize of the largest windows side
interactive: whether the plot should be interactive
add_labels: for static plot, adds text labels on top of bounding box
"""
# Get proper scale and aspect ratio
h, w = image.shape[:2]
size = (scale * w / h, scale) if h > w else (scale, h / w * scale)
fig, ax = plt.subplots(figsize=size)
# Display the image
ax.imshow(image)
# hide both axis
ax.axis("off")
if interactive:
artists: List[patches.Patch] = [] # instantiate an empty list of patches (to be drawn on the page)
for block in page["blocks"]:
if not words_only:
rect = create_obj_patch(
block["geometry"], page["dimensions"], label="block", color=(0, 1, 0), linewidth=1, **kwargs
)
# add patch on figure
ax.add_patch(rect)
if interactive:
# add patch to cursor's artists
artists.append(rect)
for line in block["lines"]:
if not words_only:
rect = create_obj_patch(
line["geometry"], page["dimensions"], label="line", color=(1, 0, 0), linewidth=1, **kwargs
)
ax.add_patch(rect)
if interactive:
artists.append(rect)
for word in line["words"]:
rect = create_obj_patch(
word["geometry"],
page["dimensions"],
label=f"{word['value']} (confidence: {word['confidence']:.2%})",
color=(0, 0, 1),
**kwargs,
)
ax.add_patch(rect)
if interactive:
artists.append(rect)
elif add_labels:
if len(word["geometry"]) == 5:
text_loc = (
int(page["dimensions"][1] * (word["geometry"][0] - word["geometry"][2] / 2)),
int(page["dimensions"][0] * (word["geometry"][1] - word["geometry"][3] / 2)),
)
else:
text_loc = (
int(page["dimensions"][1] * word["geometry"][0][0]),
int(page["dimensions"][0] * word["geometry"][0][1]),
)
if len(word["geometry"]) == 2:
# We draw only if boxes are in straight format
ax.text(
*text_loc,
word["value"],
size=10,
alpha=0.5,
color=(0, 0, 1),
)
if display_artefacts:
for artefact in block["artefacts"]:
rect = create_obj_patch(
artefact["geometry"],
page["dimensions"],
label="artefact",
color=(0.5, 0.5, 0.5),
linewidth=1,
**kwargs,
)
ax.add_patch(rect)
if interactive:
artists.append(rect)
if interactive:
# Create mlp Cursor to hover patches in artists
mplcursors.Cursor(artists, hover=2).connect("add", lambda sel: sel.annotation.set_text(sel.artist.get_label()))
fig.tight_layout(pad=0.0)
return fig
def synthesize_page(
page: Dict[str, Any],
draw_proba: bool = False,
font_family: Optional[str] = None,
) -> np.ndarray:
"""Draw a the content of the element page (OCR response) on a blank page.
Args:
page: exported Page object to represent
draw_proba: if True, draw words in colors to represent confidence. Blue: p=1, red: p=0
font_size: size of the font, default font = 13
font_family: family of the font
Return:
the synthesized page
"""
# Draw template
h, w = page["dimensions"]
response = 255 * np.ones((h, w, 3), dtype=np.int32)
# Draw each word
for block in page["blocks"]:
for line in block["lines"]:
for word in line["words"]:
# Get aboslute word geometry
(xmin, ymin), (xmax, ymax) = word["geometry"]
xmin, xmax = int(round(w * xmin)), int(round(w * xmax))
ymin, ymax = int(round(h * ymin)), int(round(h * ymax))
# White drawing context adapted to font size, 0.75 factor to convert pts --> pix
font = get_font(font_family, int(0.75 * (ymax - ymin)))
img = Image.new("RGB", (xmax - xmin, ymax - ymin), color=(255, 255, 255))
d = ImageDraw.Draw(img)
# Draw in black the value of the word
try:
d.text((0, 0), word["value"], font=font, fill=(0, 0, 0))
except UnicodeEncodeError:
# When character cannot be encoded, use its unidecode version
d.text((0, 0), unidecode(word["value"]), font=font, fill=(0, 0, 0))
# Colorize if draw_proba
if draw_proba:
p = int(255 * word["confidence"])
mask = np.where(np.array(img) == 0, 1, 0)
proba: np.ndarray = np.array([255 - p, 0, p])
color = mask * proba[np.newaxis, np.newaxis, :]
white_mask = 255 * (1 - mask)
img = color + white_mask
# Write to response page
response[ymin:ymax, xmin:xmax, :] = np.array(img)
return response
def visualize_kie_page(
page: Dict[str, Any],
image: np.ndarray,
words_only: bool = False,
display_artefacts: bool = True,
scale: float = 10,
interactive: bool = True,
add_labels: bool = True,
**kwargs: Any,
) -> Figure:
"""Visualize a full page with predicted blocks, lines and words
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from doctr.utils.visualization import visualize_page
>>> from doctr.models import ocr_db_crnn
>>> model = ocr_db_crnn(pretrained=True)
>>> input_page = (255 * np.random.rand(600, 800, 3)).astype(np.uint8)
>>> out = model([[input_page]])
>>> visualize_kie_page(out[0].pages[0].export(), input_page)
>>> plt.show()
Args:
page: the exported Page of a Document
image: np array of the page, needs to have the same shape than page['dimensions']
words_only: whether only words should be displayed
display_artefacts: whether artefacts should be displayed
scale: figsize of the largest windows side
interactive: whether the plot should be interactive
add_labels: for static plot, adds text labels on top of bounding box
"""
# Get proper scale and aspect ratio
h, w = image.shape[:2]
size = (scale * w / h, scale) if h > w else (scale, h / w * scale)
fig, ax = plt.subplots(figsize=size)
# Display the image
ax.imshow(image)
# hide both axis
ax.axis("off")
if interactive:
artists: List[patches.Patch] = [] # instantiate an empty list of patches (to be drawn on the page)
colors = {k: color for color, k in zip(get_colors(len(page["predictions"])), page["predictions"])}
for key, value in page["predictions"].items():
for prediction in value:
if not words_only:
rect = create_obj_patch(
prediction["geometry"],
page["dimensions"],
label=f"{key} \n {prediction['value']} (confidence: {prediction['confidence']:.2%}",
color=colors[key],
linewidth=1,
**kwargs,
)
# add patch on figure
ax.add_patch(rect)
if interactive:
# add patch to cursor's artists
artists.append(rect)
if interactive:
# Create mlp Cursor to hover patches in artists
mplcursors.Cursor(artists, hover=2).connect("add", lambda sel: sel.annotation.set_text(sel.artist.get_label()))
fig.tight_layout(pad=0.0)
return fig
def synthesize_kie_page(
page: Dict[str, Any],
draw_proba: bool = False,
font_family: Optional[str] = None,
) -> np.ndarray:
"""Draw a the content of the element page (OCR response) on a blank page.
Args:
page: exported Page object to represent
draw_proba: if True, draw words in colors to represent confidence. Blue: p=1, red: p=0
font_size: size of the font, default font = 13
font_family: family of the font
Return:
the synthesized page
"""
# Draw template
h, w = page["dimensions"]
response = 255 * np.ones((h, w, 3), dtype=np.int32)
# Draw each word
for predictions in page["predictions"].values():
for prediction in predictions:
# Get aboslute word geometry
(xmin, ymin), (xmax, ymax) = prediction["geometry"]
xmin, xmax = int(round(w * xmin)), int(round(w * xmax))
ymin, ymax = int(round(h * ymin)), int(round(h * ymax))
# White drawing context adapted to font size, 0.75 factor to convert pts --> pix
font = get_font(font_family, int(0.75 * (ymax - ymin)))
img = Image.new("RGB", (xmax - xmin, ymax - ymin), color=(255, 255, 255))
d = ImageDraw.Draw(img)
# Draw in black the value of the word
try:
d.text((0, 0), prediction["value"], font=font, fill=(0, 0, 0))
except UnicodeEncodeError:
# When character cannot be encoded, use its unidecode version
d.text((0, 0), unidecode(prediction["value"]), font=font, fill=(0, 0, 0))
# Colorize if draw_proba
if draw_proba:
p = int(255 * prediction["confidence"])
mask = np.where(np.array(img) == 0, 1, 0)
proba: np.ndarray = np.array([255 - p, 0, p])
color = mask * proba[np.newaxis, np.newaxis, :]
white_mask = 255 * (1 - mask)
img = color + white_mask
# Write to response page
response[ymin:ymax, xmin:xmax, :] = np.array(img)
return response
def draw_boxes(boxes: np.ndarray, image: np.ndarray, color: Optional[Tuple[int, int, int]] = None, **kwargs) -> None:
"""Draw an array of relative straight boxes on an image
Args:
boxes: array of relative boxes, of shape (*, 4)
image: np array, float32 or uint8
color: color to use for bounding box edges
"""
h, w = image.shape[:2]
# Convert boxes to absolute coords
_boxes = deepcopy(boxes)
_boxes[:, [0, 2]] *= w
_boxes[:, [1, 3]] *= h
_boxes = _boxes.astype(np.int32)
for box in _boxes.tolist():
xmin, ymin, xmax, ymax = box
image = cv2.rectangle(
image, (xmin, ymin), (xmax, ymax), color=color if isinstance(color, tuple) else (0, 0, 255), thickness=2
)
plt.imshow(image)
plt.plot(**kwargs)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
# Adapted from https://github.com/pytorch/torch/blob/master/torch/nn/modules/module.py
from typing import List
__all__ = ["NestedObject"]
def _addindent(s_, num_spaces):
s = s_.split("\n")
# don't do anything for single-line stuff
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
class NestedObject:
_children_names: List[str]
def extra_repr(self) -> str:
return ""
def __repr__(self):
# We treat the extra repr like the sub-object, one item per line
extra_lines = []
extra_repr = self.extra_repr()
# empty string will be split into list ['']
if extra_repr:
extra_lines = extra_repr.split("\n")
child_lines = []
if hasattr(self, "_children_names"):
for key in self._children_names:
child = getattr(self, key)
if isinstance(child, list) and len(child) > 0:
child_str = ",\n".join([repr(subchild) for subchild in child])
if len(child) > 1:
child_str = _addindent(f"\n{child_str},", 2) + "\n"
child_str = f"[{child_str}]"
else:
child_str = repr(child)
child_str = _addindent(child_str, 2)
child_lines.append("(" + key + "): " + child_str)
lines = extra_lines + child_lines
main_str = self.__class__.__name__ + "("
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += "\n " + "\n ".join(lines) + "\n"
main_str += ")"
return main_str
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from math import ceil
from typing import List, Optional, Tuple, Union
import cv2
import numpy as np
from .common_types import BoundingBox, Polygon4P
__all__ = [
"bbox_to_polygon",
"polygon_to_bbox",
"resolve_enclosing_bbox",
"resolve_enclosing_rbbox",
"rotate_boxes",
"compute_expanded_shape",
"rotate_image",
"estimate_page_angle",
"convert_to_relative_coords",
"rotate_abs_geoms",
"extract_crops",
"extract_rcrops",
]
def bbox_to_polygon(bbox: BoundingBox) -> Polygon4P:
return bbox[0], (bbox[1][0], bbox[0][1]), (bbox[0][0], bbox[1][1]), bbox[1]
def polygon_to_bbox(polygon: Polygon4P) -> BoundingBox:
x, y = zip(*polygon)
return (min(x), min(y)), (max(x), max(y))
def resolve_enclosing_bbox(bboxes: Union[List[BoundingBox], np.ndarray]) -> Union[BoundingBox, np.ndarray]:
"""Compute enclosing bbox either from:
- an array of boxes: (*, 5), where boxes have this shape:
(xmin, ymin, xmax, ymax, score)
- a list of BoundingBox
Return a (1, 5) array (enclosing boxarray), or a BoundingBox
"""
if isinstance(bboxes, np.ndarray):
xmin, ymin, xmax, ymax, score = np.split(bboxes, 5, axis=1)
return np.array([xmin.min(), ymin.min(), xmax.max(), ymax.max(), score.mean()])
else:
x, y = zip(*[point for box in bboxes for point in box])
return (min(x), min(y)), (max(x), max(y))
def resolve_enclosing_rbbox(rbboxes: List[np.ndarray], intermed_size: int = 1024) -> np.ndarray:
cloud: np.ndarray = np.concatenate(rbboxes, axis=0)
# Convert to absolute for minAreaRect
cloud *= intermed_size
rect = cv2.minAreaRect(cloud.astype(np.int32))
return cv2.boxPoints(rect) / intermed_size
def rotate_abs_points(points: np.ndarray, angle: float = 0.0) -> np.ndarray:
"""Rotate points counter-clockwise.
Points: array of size (N, 2)
"""
angle_rad = angle * np.pi / 180.0 # compute radian angle for np functions
rotation_mat = np.array(
[[np.cos(angle_rad), -np.sin(angle_rad)], [np.sin(angle_rad), np.cos(angle_rad)]], dtype=points.dtype
)
return np.matmul(points, rotation_mat.T)
def compute_expanded_shape(img_shape: Tuple[int, int], angle: float) -> Tuple[int, int]:
"""Compute the shape of an expanded rotated image
Args:
img_shape: the height and width of the image
angle: angle between -90 and +90 degrees
Returns:
the height and width of the rotated image
"""
points: np.ndarray = np.array(
[
[img_shape[1] / 2, img_shape[0] / 2],
[-img_shape[1] / 2, img_shape[0] / 2],
]
)
rotated_points = rotate_abs_points(points, angle)
wh_shape = 2 * np.abs(rotated_points).max(axis=0)
return wh_shape[1], wh_shape[0]
def rotate_abs_geoms(
geoms: np.ndarray,
angle: float,
img_shape: Tuple[int, int],
expand: bool = True,
) -> np.ndarray:
"""Rotate a batch of bounding boxes or polygons by an angle around the
image center.
Args:
boxes: (N, 4) or (N, 4, 2) array of ABSOLUTE coordinate boxes
angle: anti-clockwise rotation angle in degrees
img_shape: the height and width of the image
expand: whether the image should be padded to avoid information loss
Returns:
A batch of rotated polygons (N, 4, 2)
"""
# Switch to polygons
polys = (
np.stack([geoms[:, [0, 1]], geoms[:, [2, 1]], geoms[:, [2, 3]], geoms[:, [0, 3]]], axis=1)
if geoms.ndim == 2
else geoms
)
polys = polys.astype(np.float32)
# Switch to image center as referential
polys[..., 0] -= img_shape[1] / 2
polys[..., 1] = img_shape[0] / 2 - polys[..., 1]
# Rotated them around image center
rotated_polys = rotate_abs_points(polys.reshape(-1, 2), angle).reshape(-1, 4, 2)
# Switch back to top-left corner as referential
target_shape = compute_expanded_shape(img_shape, angle) if expand else img_shape
# Clip coords to fit since there is no expansion
rotated_polys[..., 0] = (rotated_polys[..., 0] + target_shape[1] / 2).clip(0, target_shape[1])
rotated_polys[..., 1] = (target_shape[0] / 2 - rotated_polys[..., 1]).clip(0, target_shape[0])
return rotated_polys
def remap_boxes(loc_preds: np.ndarray, orig_shape: Tuple[int, int], dest_shape: Tuple[int, int]) -> np.ndarray:
"""Remaps a batch of rotated locpred (N, 4, 2) expressed for an origin_shape to a destination_shape.
This does not impact the absolute shape of the boxes, but allow to calculate the new relative RotatedBbox
coordinates after a resizing of the image.
Args:
loc_preds: (N, 4, 2) array of RELATIVE loc_preds
orig_shape: shape of the origin image
dest_shape: shape of the destination image
Returns:
A batch of rotated loc_preds (N, 4, 2) expressed in the destination referencial
"""
if len(dest_shape) != 2:
raise ValueError(f"Mask length should be 2, was found at: {len(dest_shape)}")
if len(orig_shape) != 2:
raise ValueError(f"Image_shape length should be 2, was found at: {len(orig_shape)}")
orig_height, orig_width = orig_shape
dest_height, dest_width = dest_shape
mboxes = loc_preds.copy()
mboxes[:, :, 0] = ((loc_preds[:, :, 0] * orig_width) + (dest_width - orig_width) / 2) / dest_width
mboxes[:, :, 1] = ((loc_preds[:, :, 1] * orig_height) + (dest_height - orig_height) / 2) / dest_height
return mboxes
def rotate_boxes(
loc_preds: np.ndarray,
angle: float,
orig_shape: Tuple[int, int],
min_angle: float = 1.0,
target_shape: Optional[Tuple[int, int]] = None,
) -> np.ndarray:
"""Rotate a batch of straight bounding boxes (xmin, ymin, xmax, ymax, c) or rotated bounding boxes
(4, 2) of an angle, if angle > min_angle, around the center of the page.
If target_shape is specified, the boxes are remapped to the target shape after the rotation. This
is done to remove the padding that is created by rotate_page(expand=True)
Args:
loc_preds: (N, 5) or (N, 4, 2) array of RELATIVE boxes
angle: angle between -90 and +90 degrees
orig_shape: shape of the origin image
min_angle: minimum angle to rotate boxes
Returns:
A batch of rotated boxes (N, 4, 2): or a batch of straight bounding boxes
"""
# Change format of the boxes to rotated boxes
_boxes = loc_preds.copy()
if _boxes.ndim == 2:
_boxes = np.stack(
[
_boxes[:, [0, 1]],
_boxes[:, [2, 1]],
_boxes[:, [2, 3]],
_boxes[:, [0, 3]],
],
axis=1,
)
# If small angle, return boxes (no rotation)
if abs(angle) < min_angle or abs(angle) > 90 - min_angle:
return _boxes
# Compute rotation matrix
angle_rad = angle * np.pi / 180.0 # compute radian angle for np functions
rotation_mat = np.array(
[[np.cos(angle_rad), -np.sin(angle_rad)], [np.sin(angle_rad), np.cos(angle_rad)]], dtype=_boxes.dtype
)
# Rotate absolute points
points: np.ndarray = np.stack((_boxes[:, :, 0] * orig_shape[1], _boxes[:, :, 1] * orig_shape[0]), axis=-1)
image_center = (orig_shape[1] / 2, orig_shape[0] / 2)
rotated_points = image_center + np.matmul(points - image_center, rotation_mat)
rotated_boxes: np.ndarray = np.stack(
(rotated_points[:, :, 0] / orig_shape[1], rotated_points[:, :, 1] / orig_shape[0]), axis=-1
)
# Apply a mask if requested
if target_shape is not None:
rotated_boxes = remap_boxes(rotated_boxes, orig_shape=orig_shape, dest_shape=target_shape)
return rotated_boxes
def rotate_image(
image: np.ndarray,
angle: float,
expand: bool = False,
preserve_origin_shape: bool = False,
) -> np.ndarray:
"""Rotate an image counterclockwise by an given angle.
Args:
image: numpy tensor to rotate
angle: rotation angle in degrees, between -90 and +90
expand: whether the image should be padded before the rotation
preserve_origin_shape: if expand is set to True, resizes the final output to the original image size
Returns:
Rotated array, padded by 0 by default.
"""
# Compute the expanded padding
exp_img: np.ndarray
if expand:
exp_shape = compute_expanded_shape(image.shape[:2], angle) # type: ignore[arg-type]
h_pad, w_pad = int(max(0, ceil(exp_shape[0] - image.shape[0]))), int(
max(0, ceil(exp_shape[1] - image.shape[1]))
)
exp_img = np.pad(image, ((h_pad // 2, h_pad - h_pad // 2), (w_pad // 2, w_pad - w_pad // 2), (0, 0)))
else:
exp_img = image
height, width = exp_img.shape[:2]
rot_mat = cv2.getRotationMatrix2D((width / 2, height / 2), angle, 1.0)
rot_img = cv2.warpAffine(exp_img, rot_mat, (width, height))
if expand:
# Pad to get the same aspect ratio
if (image.shape[0] / image.shape[1]) != (rot_img.shape[0] / rot_img.shape[1]):
# Pad width
if (rot_img.shape[0] / rot_img.shape[1]) > (image.shape[0] / image.shape[1]):
h_pad, w_pad = 0, int(rot_img.shape[0] * image.shape[1] / image.shape[0] - rot_img.shape[1])
# Pad height
else:
h_pad, w_pad = int(rot_img.shape[1] * image.shape[0] / image.shape[1] - rot_img.shape[0]), 0
rot_img = np.pad(rot_img, ((h_pad // 2, h_pad - h_pad // 2), (w_pad // 2, w_pad - w_pad // 2), (0, 0)))
if preserve_origin_shape:
# rescale
rot_img = cv2.resize(rot_img, image.shape[:-1][::-1], interpolation=cv2.INTER_LINEAR)
return rot_img
def estimate_page_angle(polys: np.ndarray) -> float:
"""Takes a batch of rotated previously ORIENTED polys (N, 4, 2) (rectified by the classifier) and return the
estimated angle ccw in degrees
"""
# Compute mean left points and mean right point with respect to the reading direction (oriented polygon)
xleft = polys[:, 0, 0] + polys[:, 3, 0]
yleft = polys[:, 0, 1] + polys[:, 3, 1]
xright = polys[:, 1, 0] + polys[:, 2, 0]
yright = polys[:, 1, 1] + polys[:, 2, 1]
return float(np.median(np.arctan((yleft - yright) / (xright - xleft))) * 180 / np.pi) # Y axis from top to bottom!
def convert_to_relative_coords(geoms: np.ndarray, img_shape: Tuple[int, int]) -> np.ndarray:
"""Convert a geometry to relative coordinates
Args:
geoms: a set of polygons of shape (N, 4, 2) or of straight boxes of shape (N, 4)
img_shape: the height and width of the image
Returns:
the updated geometry
"""
# Polygon
if geoms.ndim == 3 and geoms.shape[1:] == (4, 2):
polygons: np.ndarray = np.empty(geoms.shape, dtype=np.float32)
polygons[..., 0] = geoms[..., 0] / img_shape[1]
polygons[..., 1] = geoms[..., 1] / img_shape[0]
return polygons.clip(0, 1)
if geoms.ndim == 2 and geoms.shape[1] == 4:
boxes: np.ndarray = np.empty(geoms.shape, dtype=np.float32)
boxes[:, ::2] = geoms[:, ::2] / img_shape[1]
boxes[:, 1::2] = geoms[:, 1::2] / img_shape[0]
return boxes.clip(0, 1)
raise ValueError(f"invalid format for arg `geoms`: {geoms.shape}")
def extract_crops(img: np.ndarray, boxes: np.ndarray, channels_last: bool = True) -> List[np.ndarray]:
"""Created cropped images from list of bounding boxes
Args:
img: input image
boxes: bounding boxes of shape (N, 4) where N is the number of boxes, and the relative
coordinates (xmin, ymin, xmax, ymax)
channels_last: whether the channel dimensions is the last one instead of the last one
Returns:
list of cropped images
"""
if boxes.shape[0] == 0:
return []
if boxes.shape[1] != 4:
raise AssertionError("boxes are expected to be relative and in order (xmin, ymin, xmax, ymax)")
# Project relative coordinates
_boxes = boxes.copy()
h, w = img.shape[:2] if channels_last else img.shape[-2:]
if _boxes.dtype != int:
_boxes[:, [0, 2]] *= w
_boxes[:, [1, 3]] *= h
_boxes = _boxes.round().astype(int)
# Add last index
_boxes[2:] += 1
if channels_last:
return deepcopy([img[box[1] : box[3], box[0] : box[2]] for box in _boxes])
return deepcopy([img[:, box[1] : box[3], box[0] : box[2]] for box in _boxes])
def extract_rcrops(
img: np.ndarray, polys: np.ndarray, dtype=np.float32, channels_last: bool = True
) -> List[np.ndarray]:
"""Created cropped images from list of rotated bounding boxes
Args:
img: input image
polys: bounding boxes of shape (N, 4, 2)
dtype: target data type of bounding boxes
channels_last: whether the channel dimensions is the last one instead of the last one
Returns:
list of cropped images
"""
if polys.shape[0] == 0:
return []
if polys.shape[1:] != (4, 2):
raise AssertionError("polys are expected to be quadrilateral, of shape (N, 4, 2)")
# Project relative coordinates
_boxes = polys.copy()
height, width = img.shape[:2] if channels_last else img.shape[-2:]
if _boxes.dtype != int:
_boxes[:, :, 0] *= width
_boxes[:, :, 1] *= height
src_pts = _boxes[:, :3].astype(np.float32)
# Preserve size
d1 = np.linalg.norm(src_pts[:, 0] - src_pts[:, 1], axis=-1)
d2 = np.linalg.norm(src_pts[:, 1] - src_pts[:, 2], axis=-1)
# (N, 3, 2)
dst_pts = np.zeros((_boxes.shape[0], 3, 2), dtype=dtype)
dst_pts[:, 1, 0] = dst_pts[:, 2, 0] = d1 - 1
dst_pts[:, 2, 1] = d2 - 1
# Use a warp transformation to extract the crop
crops = [
cv2.warpAffine(
img if channels_last else img.transpose(1, 2, 0),
# Transformation matrix
cv2.getAffineTransform(src_pts[idx], dst_pts[idx]),
(int(d1[idx]), int(d2[idx])),
)
for idx in range(_boxes.shape[0])
]
return crops
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import logging
import platform
from typing import Optional
from PIL import ImageFont
__all__ = ["get_font"]
def get_font(font_family: Optional[str] = None, font_size: int = 13) -> ImageFont.ImageFont:
"""Resolves a compatible ImageFont for the system
Args:
font_family: the font family to use
font_size: the size of the font upon rendering
Returns:
the Pillow font
"""
# Font selection
if font_family is None:
try:
font = ImageFont.truetype("FreeMono.ttf" if platform.system() == "Linux" else "Arial.ttf", font_size)
except OSError:
font = ImageFont.load_default()
logging.warning(
"unable to load recommended font family. Loading default PIL font,"
"font size issues may be expected."
"To prevent this, it is recommended to specify the value of 'font_family'."
)
else:
font = ImageFont.truetype(font_family, font_size)
return font
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
# Adapted from https://github.com/pytorch/vision/blob/master/torchvision/datasets/utils.py
import hashlib
import logging
import os
import re
import urllib
import urllib.error
import urllib.request
from pathlib import Path
from typing import Optional, Union
from tqdm.auto import tqdm
__all__ = ["download_from_url"]
# matches bfd8deac from resnet18-bfd8deac.ckpt
HASH_REGEX = re.compile(r"-([a-f0-9]*)\.")
USER_AGENT = "mindee/doctr"
def _urlretrieve(url: str, filename: Union[Path, str], chunk_size: int = 1024) -> None:
with open(filename, "wb") as fh:
with urllib.request.urlopen(urllib.request.Request(url, headers={"User-Agent": USER_AGENT})) as response:
with tqdm(total=response.length) as pbar:
for chunk in iter(lambda: response.read(chunk_size), ""):
if not chunk:
break
pbar.update(chunk_size)
fh.write(chunk)
def _check_integrity(file_path: Union[str, Path], hash_prefix: str) -> bool:
with open(file_path, "rb") as f:
sha_hash = hashlib.sha256(f.read()).hexdigest()
return sha_hash[: len(hash_prefix)] == hash_prefix
def download_from_url(
url: str,
file_name: Optional[str] = None,
hash_prefix: Optional[str] = None,
cache_dir: Optional[str] = None,
cache_subdir: Optional[str] = None,
) -> Path:
"""Download a file using its URL
>>> from doctr.models import download_from_url
>>> download_from_url("https://yoursource.com/yourcheckpoint-yourhash.zip")
Args:
url: the URL of the file to download
file_name: optional name of the file once downloaded
hash_prefix: optional expected SHA256 hash of the file
cache_dir: cache directory
cache_subdir: subfolder to use in the cache
Returns:
the location of the downloaded file
Note:
You can change cache directory location by using `DOCTR_CACHE_DIR` environment variable.
"""
if not isinstance(file_name, str):
file_name = url.rpartition("/")[-1].split("&")[0]
cache_dir = (
str(os.environ.get("DOCTR_CACHE_DIR", os.path.join(os.path.expanduser("~"), ".cache", "doctr")))
if cache_dir is None
else cache_dir
)
# Check hash in file name
if hash_prefix is None:
r = HASH_REGEX.search(file_name)
hash_prefix = r.group(1) if r else None
folder_path = Path(cache_dir) if cache_subdir is None else Path(cache_dir, cache_subdir)
file_path = folder_path.joinpath(file_name)
# Check file existence
if file_path.is_file() and (hash_prefix is None or _check_integrity(file_path, hash_prefix)):
logging.info(f"Using downloaded & verified file: {file_path}")
return file_path
try:
# Create folder hierarchy
folder_path.mkdir(parents=True, exist_ok=True)
except OSError:
error_message = f"Failed creating cache direcotry at {folder_path}"
if os.environ.get("DOCTR_CACHE_DIR", ""):
error_message += " using path from 'DOCTR_CACHE_DIR' environment variable."
else:
error_message += (
". You can change default cache directory using 'DOCTR_CACHE_DIR' environment variable if needed."
)
logging.error(error_message)
raise
# Download the file
try:
print(f"Downloading {url} to {file_path}")
_urlretrieve(url, file_path)
except (urllib.error.URLError, IOError) as e:
if url[:5] == "https":
url = url.replace("https:", "http:")
print("Failed download. Trying https -> http instead." f" Downloading {url} to {file_path}")
_urlretrieve(url, file_path)
else:
raise e
# Remove corrupted files
if isinstance(hash_prefix, str) and not _check_integrity(file_path, hash_prefix):
# Remove file
os.remove(file_path)
raise ValueError(f"corrupted download, the hash of {url} does not match its expected value")
return file_path
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import multiprocessing as mp
import os
from multiprocessing.pool import ThreadPool
from typing import Any, Callable, Iterable, Iterator, Optional
from doctr.file_utils import ENV_VARS_TRUE_VALUES
__all__ = ["multithread_exec"]
def multithread_exec(func: Callable[[Any], Any], seq: Iterable[Any], threads: Optional[int] = None) -> Iterator[Any]:
"""Execute a given function in parallel for each element of a given sequence
>>> from doctr.utils.multithreading import multithread_exec
>>> entries = [1, 4, 8]
>>> results = multithread_exec(lambda x: x ** 2, entries)
Args:
func: function to be executed on each element of the iterable
seq: iterable
threads: number of workers to be used for multiprocessing
Returns:
iterator of the function's results using the iterable as inputs
Notes:
This function uses ThreadPool from multiprocessing package, which uses `/dev/shm` directory for shared memory.
If you do not have write permissions for this directory (if you run `doctr` on AWS Lambda for instance),
you might want to disable multiprocessing. To achieve that, set 'DOCTR_MULTIPROCESSING_DISABLE' to 'TRUE'.
"""
threads = threads if isinstance(threads, int) else min(16, mp.cpu_count())
# Single-thread
if threads < 2 or os.environ.get("DOCTR_MULTIPROCESSING_DISABLE", "").upper() in ENV_VARS_TRUE_VALUES:
results = map(func, seq)
# Multi-threading
else:
with ThreadPool(threads) as tp:
# ThreadPool's map function returns a list, but seq could be of a different type
# That's why wrapping result in map to return iterator
results = map(lambda x: x, tp.map(func, seq))
return results
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any
from .detection.zoo import detection_predictor
from .kie_predictor import KIEPredictor
from .predictor import OCRPredictor
from .recognition.zoo import recognition_predictor
__all__ = ["ocr_predictor", "kie_predictor"]
def _predictor(
det_arch: Any,
reco_arch: Any,
pretrained: bool,
pretrained_backbone: bool = True,
assume_straight_pages: bool = True,
preserve_aspect_ratio: bool = False,
symmetric_pad: bool = True,
det_bs: int = 2,
reco_bs: int = 128,
detect_orientation: bool = False,
detect_language: bool = False,
**kwargs,
) -> OCRPredictor:
# Detection
det_predictor = detection_predictor(
det_arch,
pretrained=pretrained,
pretrained_backbone=pretrained_backbone,
batch_size=det_bs,
assume_straight_pages=assume_straight_pages,
preserve_aspect_ratio=preserve_aspect_ratio,
symmetric_pad=symmetric_pad,
)
# Recognition
reco_predictor = recognition_predictor(
reco_arch, pretrained=pretrained, pretrained_backbone=pretrained_backbone, batch_size=reco_bs
)
return OCRPredictor(
det_predictor,
reco_predictor,
assume_straight_pages=assume_straight_pages,
preserve_aspect_ratio=preserve_aspect_ratio,
symmetric_pad=symmetric_pad,
detect_orientation=detect_orientation,
detect_language=detect_language,
**kwargs,
)
def ocr_predictor(
det_arch: Any = "db_resnet50",
reco_arch: Any = "crnn_vgg16_bn",
pretrained: bool = False,
pretrained_backbone: bool = True,
assume_straight_pages: bool = True,
preserve_aspect_ratio: bool = False,
symmetric_pad: bool = True,
export_as_straight_boxes: bool = False,
detect_orientation: bool = False,
detect_language: bool = False,
**kwargs: Any,
) -> OCRPredictor:
"""End-to-end OCR architecture using one model for localization, and another for text recognition.
>>> import numpy as np
>>> from doctr.models import ocr_predictor
>>> model = ocr_predictor('db_resnet50', 'crnn_vgg16_bn', pretrained=True)
>>> input_page = (255 * np.random.rand(600, 800, 3)).astype(np.uint8)
>>> out = model([input_page])
Args:
det_arch: name of the detection architecture or the model itself to use
(e.g. 'db_resnet50', 'db_mobilenet_v3_large')
reco_arch: name of the recognition architecture or the model itself to use
(e.g. 'crnn_vgg16_bn', 'sar_resnet31')
pretrained: If True, returns a model pre-trained on our OCR dataset
pretrained_backbone: If True, returns a model with a pretrained backbone
assume_straight_pages: if True, speeds up the inference by assuming you only pass straight pages
without rotated textual elements.
preserve_aspect_ratio: If True, pad the input document image to preserve the aspect ratio before
running the detection model on it.
symmetric_pad: if True, pad the image symmetrically instead of padding at the bottom-right.
export_as_straight_boxes: when assume_straight_pages is set to False, export final predictions
(potentially rotated) as straight bounding boxes.
detect_orientation: if True, the estimated general page orientation will be added to the predictions for each
page. Doing so will slightly deteriorate the overall latency.
detect_language: if True, the language prediction will be added to the predictions for each
page. Doing so will slightly deteriorate the overall latency.
kwargs: keyword args of `OCRPredictor`
Returns:
OCR predictor
"""
return _predictor(
det_arch,
reco_arch,
pretrained,
pretrained_backbone=pretrained_backbone,
assume_straight_pages=assume_straight_pages,
preserve_aspect_ratio=preserve_aspect_ratio,
symmetric_pad=symmetric_pad,
export_as_straight_boxes=export_as_straight_boxes,
detect_orientation=detect_orientation,
detect_language=detect_language,
**kwargs,
)
def _kie_predictor(
det_arch: Any,
reco_arch: Any,
pretrained: bool,
pretrained_backbone: bool = True,
assume_straight_pages: bool = True,
preserve_aspect_ratio: bool = False,
symmetric_pad: bool = True,
det_bs: int = 2,
reco_bs: int = 128,
detect_orientation: bool = False,
detect_language: bool = False,
**kwargs,
) -> KIEPredictor:
# Detection
det_predictor = detection_predictor(
det_arch,
pretrained=pretrained,
pretrained_backbone=pretrained_backbone,
batch_size=det_bs,
assume_straight_pages=assume_straight_pages,
preserve_aspect_ratio=preserve_aspect_ratio,
symmetric_pad=symmetric_pad,
)
# Recognition
reco_predictor = recognition_predictor(
reco_arch, pretrained=pretrained, pretrained_backbone=pretrained_backbone, batch_size=reco_bs
)
return KIEPredictor(
det_predictor,
reco_predictor,
assume_straight_pages=assume_straight_pages,
preserve_aspect_ratio=preserve_aspect_ratio,
symmetric_pad=symmetric_pad,
detect_orientation=detect_orientation,
detect_language=detect_language,
**kwargs,
)
def kie_predictor(
det_arch: Any = "db_resnet50",
reco_arch: Any = "crnn_vgg16_bn",
pretrained: bool = False,
pretrained_backbone: bool = True,
assume_straight_pages: bool = True,
preserve_aspect_ratio: bool = False,
symmetric_pad: bool = True,
export_as_straight_boxes: bool = False,
detect_orientation: bool = False,
detect_language: bool = False,
**kwargs: Any,
) -> KIEPredictor:
"""End-to-end KIE architecture using one model for localization, and another for text recognition.
>>> import numpy as np
>>> from doctr.models import ocr_predictor
>>> model = ocr_predictor('db_resnet50', 'crnn_vgg16_bn', pretrained=True)
>>> input_page = (255 * np.random.rand(600, 800, 3)).astype(np.uint8)
>>> out = model([input_page])
Args:
det_arch: name of the detection architecture or the model itself to use
(e.g. 'db_resnet50', 'db_mobilenet_v3_large')
reco_arch: name of the recognition architecture or the model itself to use
(e.g. 'crnn_vgg16_bn', 'sar_resnet31')
pretrained: If True, returns a model pre-trained on our OCR dataset
pretrained_backbone: If True, returns a model with a pretrained backbone
assume_straight_pages: if True, speeds up the inference by assuming you only pass straight pages
without rotated textual elements.
preserve_aspect_ratio: If True, pad the input document image to preserve the aspect ratio before
running the detection model on it.
symmetric_pad: if True, pad the image symmetrically instead of padding at the bottom-right.
export_as_straight_boxes: when assume_straight_pages is set to False, export final predictions
(potentially rotated) as straight bounding boxes.
detect_orientation: if True, the estimated general page orientation will be added to the predictions for each
page. Doing so will slightly deteriorate the overall latency.
detect_language: if True, the language prediction will be added to the predictions for each
page. Doing so will slightly deteriorate the overall latency.
kwargs: keyword args of `OCRPredictor`
Returns:
KIE predictor
"""
return _kie_predictor(
det_arch,
reco_arch,
pretrained,
pretrained_backbone=pretrained_backbone,
assume_straight_pages=assume_straight_pages,
preserve_aspect_ratio=preserve_aspect_ratio,
symmetric_pad=symmetric_pad,
export_as_straight_boxes=export_as_straight_boxes,
detect_orientation=detect_orientation,
detect_language=detect_language,
**kwargs,
)
|
from . import artefacts
from .classification import *
from .detection import *
from .recognition import *
from .zoo import *
from .factory import *
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, Dict, Optional
from doctr.utils.repr import NestedObject
__all__ = ["BaseModel"]
class BaseModel(NestedObject):
"""Implements abstract DetectionModel class"""
def __init__(self, cfg: Optional[Dict[str, Any]] = None) -> None:
super().__init__()
self.cfg = cfg
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from scipy.cluster.hierarchy import fclusterdata
from doctr.io.elements import Block, Document, KIEDocument, KIEPage, Line, Page, Prediction, Word
from doctr.utils.geometry import estimate_page_angle, resolve_enclosing_bbox, resolve_enclosing_rbbox, rotate_boxes
from doctr.utils.repr import NestedObject
__all__ = ["DocumentBuilder"]
class DocumentBuilder(NestedObject):
"""Implements a document builder
Args:
resolve_lines: whether words should be automatically grouped into lines
resolve_blocks: whether lines should be automatically grouped into blocks
paragraph_break: relative length of the minimum space separating paragraphs
export_as_straight_boxes: if True, force straight boxes in the export (fit a rectangle
box to all rotated boxes). Else, keep the boxes format unchanged, no matter what it is.
"""
def __init__(
self,
resolve_lines: bool = True,
resolve_blocks: bool = True,
paragraph_break: float = 0.035,
export_as_straight_boxes: bool = False,
) -> None:
self.resolve_lines = resolve_lines
self.resolve_blocks = resolve_blocks
self.paragraph_break = paragraph_break
self.export_as_straight_boxes = export_as_straight_boxes
@staticmethod
def _sort_boxes(boxes: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Sort bounding boxes from top to bottom, left to right
Args:
boxes: bounding boxes of shape (N, 4) or (N, 4, 2) (in case of rotated bbox)
Returns:
tuple: indices of ordered boxes of shape (N,), boxes
If straight boxes are passed tpo the function, boxes are unchanged
else: boxes returned are straight boxes fitted to the straightened rotated boxes
so that we fit the lines afterwards to the straigthened page
"""
if boxes.ndim == 3:
boxes = rotate_boxes(
loc_preds=boxes,
angle=-estimate_page_angle(boxes),
orig_shape=(1024, 1024),
min_angle=5.0,
)
boxes = np.concatenate((boxes.min(1), boxes.max(1)), -1)
return (boxes[:, 0] + 2 * boxes[:, 3] / np.median(boxes[:, 3] - boxes[:, 1])).argsort(), boxes
def _resolve_sub_lines(self, boxes: np.ndarray, word_idcs: List[int]) -> List[List[int]]:
"""Split a line in sub_lines
Args:
boxes: bounding boxes of shape (N, 4)
word_idcs: list of indexes for the words of the line
Returns:
A list of (sub-)lines computed from the original line (words)
"""
lines = []
# Sort words horizontally
word_idcs = [word_idcs[idx] for idx in boxes[word_idcs, 0].argsort().tolist()]
# Eventually split line horizontally
if len(word_idcs) < 2:
lines.append(word_idcs)
else:
sub_line = [word_idcs[0]]
for i in word_idcs[1:]:
horiz_break = True
prev_box = boxes[sub_line[-1]]
# Compute distance between boxes
dist = boxes[i, 0] - prev_box[2]
# If distance between boxes is lower than paragraph break, same sub-line
if dist < self.paragraph_break:
horiz_break = False
if horiz_break:
lines.append(sub_line)
sub_line = []
sub_line.append(i)
lines.append(sub_line)
return lines
def _resolve_lines(self, boxes: np.ndarray) -> List[List[int]]:
"""Order boxes to group them in lines
Args:
boxes: bounding boxes of shape (N, 4) or (N, 4, 2) in case of rotated bbox
Returns:
nested list of box indices
"""
# Sort boxes, and straighten the boxes if they are rotated
idxs, boxes = self._sort_boxes(boxes)
# Compute median for boxes heights
y_med = np.median(boxes[:, 3] - boxes[:, 1])
lines = []
words = [idxs[0]] # Assign the top-left word to the first line
# Define a mean y-center for the line
y_center_sum = boxes[idxs[0]][[1, 3]].mean()
for idx in idxs[1:]:
vert_break = True
# Compute y_dist
y_dist = abs(boxes[idx][[1, 3]].mean() - y_center_sum / len(words))
# If y-center of the box is close enough to mean y-center of the line, same line
if y_dist < y_med / 2:
vert_break = False
if vert_break:
# Compute sub-lines (horizontal split)
lines.extend(self._resolve_sub_lines(boxes, words))
words = []
y_center_sum = 0
words.append(idx)
y_center_sum += boxes[idx][[1, 3]].mean()
# Use the remaining words to form the last(s) line(s)
if len(words) > 0:
# Compute sub-lines (horizontal split)
lines.extend(self._resolve_sub_lines(boxes, words))
return lines
@staticmethod
def _resolve_blocks(boxes: np.ndarray, lines: List[List[int]]) -> List[List[List[int]]]:
"""Order lines to group them in blocks
Args:
boxes: bounding boxes of shape (N, 4) or (N, 4, 2)
lines: list of lines, each line is a list of idx
Returns:
nested list of box indices
"""
# Resolve enclosing boxes of lines
if boxes.ndim == 3:
box_lines: np.ndarray = np.asarray(
[
resolve_enclosing_rbbox([tuple(boxes[idx, :, :]) for idx in line]) # type: ignore[misc]
for line in lines
]
)
else:
_box_lines = [
resolve_enclosing_bbox(
[(tuple(boxes[idx, :2]), tuple(boxes[idx, 2:])) for idx in line] # type: ignore[misc]
)
for line in lines
]
box_lines = np.asarray([(x1, y1, x2, y2) for ((x1, y1), (x2, y2)) in _box_lines])
# Compute geometrical features of lines to clusterize
# Clusterizing only with box centers yield to poor results for complex documents
if boxes.ndim == 3:
box_features: np.ndarray = np.stack(
(
(box_lines[:, 0, 0] + box_lines[:, 0, 1]) / 2,
(box_lines[:, 0, 0] + box_lines[:, 2, 0]) / 2,
(box_lines[:, 0, 0] + box_lines[:, 2, 1]) / 2,
(box_lines[:, 0, 1] + box_lines[:, 2, 1]) / 2,
(box_lines[:, 0, 1] + box_lines[:, 2, 0]) / 2,
(box_lines[:, 2, 0] + box_lines[:, 2, 1]) / 2,
),
axis=-1,
)
else:
box_features = np.stack(
(
(box_lines[:, 0] + box_lines[:, 3]) / 2,
(box_lines[:, 1] + box_lines[:, 2]) / 2,
(box_lines[:, 0] + box_lines[:, 2]) / 2,
(box_lines[:, 1] + box_lines[:, 3]) / 2,
box_lines[:, 0],
box_lines[:, 1],
),
axis=-1,
)
# Compute clusters
clusters = fclusterdata(box_features, t=0.1, depth=4, criterion="distance", metric="euclidean")
_blocks: Dict[int, List[int]] = {}
# Form clusters
for line_idx, cluster_idx in enumerate(clusters):
if cluster_idx in _blocks.keys():
_blocks[cluster_idx].append(line_idx)
else:
_blocks[cluster_idx] = [line_idx]
# Retrieve word-box level to return a fully nested structure
blocks = [[lines[idx] for idx in block] for block in _blocks.values()]
return blocks
def _build_blocks(self, boxes: np.ndarray, word_preds: List[Tuple[str, float]]) -> List[Block]:
"""Gather independent words in structured blocks
Args:
boxes: bounding boxes of all detected words of the page, of shape (N, 5) or (N, 4, 2)
word_preds: list of all detected words of the page, of shape N
Returns:
list of block elements
"""
if boxes.shape[0] != len(word_preds):
raise ValueError(f"Incompatible argument lengths: {boxes.shape[0]}, {len(word_preds)}")
if boxes.shape[0] == 0:
return []
# Decide whether we try to form lines
_boxes = boxes
if self.resolve_lines:
lines = self._resolve_lines(_boxes if _boxes.ndim == 3 else _boxes[:, :4])
# Decide whether we try to form blocks
if self.resolve_blocks and len(lines) > 1:
_blocks = self._resolve_blocks(_boxes if _boxes.ndim == 3 else _boxes[:, :4], lines)
else:
_blocks = [lines]
else:
# Sort bounding boxes, one line for all boxes, one block for the line
lines = [self._sort_boxes(_boxes if _boxes.ndim == 3 else _boxes[:, :4])[0]] # type: ignore[list-item]
_blocks = [lines]
blocks = [
Block(
[
Line(
[
Word(
*word_preds[idx],
tuple([tuple(pt) for pt in boxes[idx].tolist()]), # type: ignore[arg-type]
)
if boxes.ndim == 3
else Word(
*word_preds[idx], ((boxes[idx, 0], boxes[idx, 1]), (boxes[idx, 2], boxes[idx, 3]))
)
for idx in line
]
)
for line in lines
]
)
for lines in _blocks
]
return blocks
def extra_repr(self) -> str:
return (
f"resolve_lines={self.resolve_lines}, resolve_blocks={self.resolve_blocks}, "
f"paragraph_break={self.paragraph_break}, "
f"export_as_straight_boxes={self.export_as_straight_boxes}"
)
def __call__(
self,
boxes: List[np.ndarray],
text_preds: List[List[Tuple[str, float]]],
page_shapes: List[Tuple[int, int]],
orientations: Optional[List[Dict[str, Any]]] = None,
languages: Optional[List[Dict[str, Any]]] = None,
) -> Document:
"""Re-arrange detected words into structured blocks
Args:
boxes: list of N elements, where each element represents the localization predictions, of shape (*, 5)
or (*, 6) for all words for a given page
text_preds: list of N elements, where each element is the list of all word prediction (text + confidence)
page_shape: shape of each page, of size N
Returns:
document object
"""
if len(boxes) != len(text_preds) or len(boxes) != len(page_shapes):
raise ValueError("All arguments are expected to be lists of the same size")
_orientations = (
orientations if isinstance(orientations, list) else [None] * len(boxes) # type: ignore[list-item]
)
_languages = languages if isinstance(languages, list) else [None] * len(boxes) # type: ignore[list-item]
if self.export_as_straight_boxes and len(boxes) > 0:
# If boxes are already straight OK, else fit a bounding rect
if boxes[0].ndim == 3:
straight_boxes: List[np.ndarray] = []
# Iterate over pages
for p_boxes in boxes:
# Iterate over boxes of the pages
straight_boxes.append(np.concatenate((p_boxes.min(1), p_boxes.max(1)), 1))
boxes = straight_boxes
_pages = [
Page(
self._build_blocks(
page_boxes,
word_preds,
),
_idx,
shape,
orientation,
language,
)
for _idx, shape, page_boxes, word_preds, orientation, language in zip(
range(len(boxes)), page_shapes, boxes, text_preds, _orientations, _languages
)
]
return Document(_pages)
class KIEDocumentBuilder(DocumentBuilder):
"""Implements a KIE document builder
Args:
resolve_lines: whether words should be automatically grouped into lines
resolve_blocks: whether lines should be automatically grouped into blocks
paragraph_break: relative length of the minimum space separating paragraphs
export_as_straight_boxes: if True, force straight boxes in the export (fit a rectangle
box to all rotated boxes). Else, keep the boxes format unchanged, no matter what it is.
"""
def __call__( # type: ignore[override]
self,
boxes: List[Dict[str, np.ndarray]],
text_preds: List[Dict[str, List[Tuple[str, float]]]],
page_shapes: List[Tuple[int, int]],
orientations: Optional[List[Dict[str, Any]]] = None,
languages: Optional[List[Dict[str, Any]]] = None,
) -> KIEDocument:
"""Re-arrange detected words into structured predictions
Args:
boxes: list of N dictionaries, where each element represents the localization predictions for a class,
of shape (*, 5) or (*, 6) for all predictions
text_preds: list of N dictionaries, where each element is the list of all word prediction
page_shape: shape of each page, of size N
Returns:
document object
"""
if len(boxes) != len(text_preds) or len(boxes) != len(page_shapes):
raise ValueError("All arguments are expected to be lists of the same size")
_orientations = (
orientations if isinstance(orientations, list) else [None] * len(boxes) # type: ignore[list-item]
)
_languages = languages if isinstance(languages, list) else [None] * len(boxes) # type: ignore[list-item]
if self.export_as_straight_boxes and len(boxes) > 0:
# If boxes are already straight OK, else fit a bounding rect
if next(iter(boxes[0].values())).ndim == 3:
straight_boxes: List[Dict[str, np.ndarray]] = []
# Iterate over pages
for p_boxes in boxes:
# Iterate over boxes of the pages
straight_boxes_dict = {}
for k, box in p_boxes.items():
straight_boxes_dict[k] = np.concatenate((box.min(1), box.max(1)), 1)
straight_boxes.append(straight_boxes_dict)
boxes = straight_boxes
_pages = [
KIEPage(
{
k: self._build_blocks(
page_boxes[k],
word_preds[k],
)
for k in page_boxes.keys()
},
_idx,
shape,
orientation,
language,
)
for _idx, shape, page_boxes, word_preds, orientation, language in zip(
range(len(boxes)), page_shapes, boxes, text_preds, _orientations, _languages
)
]
return KIEDocument(_pages)
def _build_blocks( # type: ignore[override]
self,
boxes: np.ndarray,
word_preds: List[Tuple[str, float]],
) -> List[Prediction]:
"""Gather independent words in structured blocks
Args:
boxes: bounding boxes of all detected words of the page, of shape (N, 5) or (N, 4, 2)
word_preds: list of all detected words of the page, of shape N
Returns:
list of block elements
"""
if boxes.shape[0] != len(word_preds):
raise ValueError(f"Incompatible argument lengths: {boxes.shape[0]}, {len(word_preds)}")
if boxes.shape[0] == 0:
return []
# Decide whether we try to form lines
_boxes = boxes
idxs, _ = self._sort_boxes(_boxes if _boxes.ndim == 3 else _boxes[:, :4])
predictions = [
Prediction(
value=word_preds[idx][0],
confidence=word_preds[idx][1],
geometry=tuple([tuple(pt) for pt in boxes[idx].tolist()]), # type: ignore[arg-type]
)
if boxes.ndim == 3
else Prediction(
value=word_preds[idx][0],
confidence=word_preds[idx][1],
geometry=((boxes[idx, 0], boxes[idx, 1]), (boxes[idx, 2], boxes[idx, 3])),
)
for idx in idxs
]
return predictions
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from math import floor
from statistics import median_low
from typing import Any, Dict, List, Optional, Tuple, Union
import cv2
import numpy as np
from langdetect import LangDetectException, detect_langs
__all__ = ["estimate_orientation", "get_bitmap_angle", "get_language", "invert_data_structure"]
def get_max_width_length_ratio(contour: np.ndarray) -> float:
"""Get the maximum shape ratio of a contour.
Args:
contour: the contour from cv2.findContour
Returns: the maximum shape ratio
"""
_, (w, h), _ = cv2.minAreaRect(contour)
return max(w / h, h / w)
def estimate_orientation(img: np.ndarray, n_ct: int = 50, ratio_threshold_for_lines: float = 5) -> float:
"""Estimate the angle of the general document orientation based on the
lines of the document and the assumption that they should be horizontal.
Args:
img: the img to analyze
n_ct: the number of contours used for the orientation estimation
ratio_threshold_for_lines: this is the ratio w/h used to discriminates lines
Returns:
the angle of the general document orientation
"""
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray_img = cv2.medianBlur(gray_img, 5)
thresh = cv2.threshold(gray_img, thresh=0, maxval=255, type=cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# try to merge words in lines
(h, w) = img.shape[:2]
k_x = max(1, (floor(w / 100)))
k_y = max(1, (floor(h / 100)))
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (k_x, k_y))
thresh = cv2.dilate(thresh, kernel, iterations=1)
# extract contours
contours, _ = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Sort contours
contours = sorted(contours, key=get_max_width_length_ratio, reverse=True)
angles = []
for contour in contours[:n_ct]:
_, (w, h), angle = cv2.minAreaRect(contour)
if w / h > ratio_threshold_for_lines: # select only contours with ratio like lines
angles.append(angle)
elif w / h < 1 / ratio_threshold_for_lines: # if lines are vertical, substract 90 degree
angles.append(angle - 90)
if len(angles) == 0:
return 0 # in case no angles is found
else:
return -median_low(angles)
def get_bitmap_angle(bitmap: np.ndarray, n_ct: int = 20, std_max: float = 3.0) -> float:
"""From a binarized segmentation map, find contours and fit min area rectangles to determine page angle
Args:
bitmap: binarized segmentation map
n_ct: number of contours to use to fit page angle
std_max: maximum deviation of the angle distribution to consider the mean angle reliable
Returns:
The angle of the page
"""
# Find all contours on binarized seg map
contours, _ = cv2.findContours(bitmap.astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Sort contours
contours = sorted(contours, key=cv2.contourArea, reverse=True)
# Find largest contours and fit angles
# Track heights and widths to find aspect ratio (determine is rotation is clockwise)
angles, heights, widths = [], [], []
for ct in contours[:n_ct]:
_, (w, h), alpha = cv2.minAreaRect(ct)
widths.append(w)
heights.append(h)
angles.append(alpha)
if np.std(angles) > std_max:
# Edge case with angles of both 0 and 90°, or multi_oriented docs
angle = 0.0
else:
angle = -np.mean(angles)
# Determine rotation direction (clockwise/counterclockwise)
# Angle coverage: [-90°, +90°], half of the quadrant
if np.sum(widths) < np.sum(heights): # CounterClockwise
angle = 90 + angle
return angle
def rectify_crops(
crops: List[np.ndarray],
orientations: List[int],
) -> List[np.ndarray]:
"""Rotate each crop of the list according to the predicted orientation:
0: already straight, no rotation
1: 90 ccw, rotate 3 times ccw
2: 180, rotate 2 times ccw
3: 270 ccw, rotate 1 time ccw
"""
# Inverse predictions (if angle of +90 is detected, rotate by -90)
orientations = [4 - pred if pred != 0 else 0 for pred in orientations]
return (
[crop if orientation == 0 else np.rot90(crop, orientation) for orientation, crop in zip(orientations, crops)]
if len(orientations) > 0
else []
)
def rectify_loc_preds(
page_loc_preds: np.ndarray,
orientations: List[int],
) -> Optional[np.ndarray]:
"""Orient the quadrangle (Polygon4P) according to the predicted orientation,
so that the points are in this order: top L, top R, bot R, bot L if the crop is readable
"""
return (
np.stack(
[
np.roll(page_loc_pred, orientation, axis=0)
for orientation, page_loc_pred in zip(orientations, page_loc_preds)
],
axis=0,
)
if len(orientations) > 0
else None
)
def get_language(text: str) -> Tuple[str, float]:
"""Get languages of a text using langdetect model.
Get the language with the highest probability or no language if only a few words or a low probability
Args:
text (str): text
Returns:
The detected language in ISO 639 code and confidence score
"""
try:
lang = detect_langs(text.lower())[0]
except LangDetectException:
return "unknown", 0.0
if len(text) <= 1 or (len(text) <= 5 and lang.prob <= 0.2):
return "unknown", 0.0
return lang.lang, lang.prob
def invert_data_structure(
x: Union[List[Dict[str, Any]], Dict[str, List[Any]]]
) -> Union[List[Dict[str, Any]], Dict[str, List[Any]]]:
"""Invert a List of Dict of elements to a Dict of list of elements and the other way around
Args:
x: a list of dictionaries with the same keys or a dictionary of lists of the same length
Returns:
dictionary of list when x is a list of dictionaries or a list of dictionaries when x is dictionary of lists
"""
if isinstance(x, dict):
assert (
len(set([len(v) for v in x.values()])) == 1
), "All the lists in the dictionnary should have the same length."
return [dict(zip(x, t)) for t in zip(*x.values())]
elif isinstance(x, list):
return {k: [dic[k] for dic in x] for k in x[0]}
else:
raise TypeError(f"Expected input to be either a dict or a list, got {type(input)} instead.")
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, List
from doctr.file_utils import is_tf_available
from .. import classification
from ..preprocessor import PreProcessor
from .predictor import CropOrientationPredictor
__all__ = ["crop_orientation_predictor"]
ARCHS: List[str] = [
"magc_resnet31",
"mobilenet_v3_small",
"mobilenet_v3_small_r",
"mobilenet_v3_large",
"mobilenet_v3_large_r",
"resnet18",
"resnet31",
"resnet34",
"resnet50",
"resnet34_wide",
"vgg16_bn_r",
"vit_s",
"vit_b",
]
ORIENTATION_ARCHS: List[str] = ["mobilenet_v3_small_orientation"]
def _crop_orientation_predictor(arch: str, pretrained: bool, **kwargs: Any) -> CropOrientationPredictor:
if arch not in ORIENTATION_ARCHS:
raise ValueError(f"unknown architecture '{arch}'")
# Load directly classifier from backbone
_model = classification.__dict__[arch](pretrained=pretrained)
kwargs["mean"] = kwargs.get("mean", _model.cfg["mean"])
kwargs["std"] = kwargs.get("std", _model.cfg["std"])
kwargs["batch_size"] = kwargs.get("batch_size", 64)
input_shape = _model.cfg["input_shape"][:-1] if is_tf_available() else _model.cfg["input_shape"][1:]
predictor = CropOrientationPredictor(
PreProcessor(input_shape, preserve_aspect_ratio=True, symmetric_pad=True, **kwargs), _model
)
return predictor
def crop_orientation_predictor(
arch: str = "mobilenet_v3_small_orientation", pretrained: bool = False, **kwargs: Any
) -> CropOrientationPredictor:
"""Orientation classification architecture.
>>> import numpy as np
>>> from doctr.models import crop_orientation_predictor
>>> model = crop_orientation_predictor(arch='classif_mobilenet_v3_small', pretrained=True)
>>> input_crop = (255 * np.random.rand(600, 800, 3)).astype(np.uint8)
>>> out = model([input_crop])
Args:
arch: name of the architecture to use (e.g. 'mobilenet_v3_small')
pretrained: If True, returns a model pre-trained on our recognition crops dataset
Returns:
CropOrientationPredictor
"""
return _crop_orientation_predictor(arch, pretrained, **kwargs)
|
from .mobilenet import *
from .resnet import *
from .vgg import *
from .magc_resnet import *
from .vit import *
from .zoo import *
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import math
from copy import deepcopy
from functools import partial
from typing import Any, Dict, List, Optional, Tuple
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from doctr.datasets import VOCABS
from ...utils import load_pretrained_params
from ..resnet.tensorflow import ResNet
__all__ = ["magc_resnet31"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"magc_resnet31": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 32, 3),
"classes": list(VOCABS["french"]),
"url": None,
},
}
class MAGC(layers.Layer):
"""Implements the Multi-Aspect Global Context Attention, as described in
<https://arxiv.org/pdf/1910.02562.pdf>`_.
Args:
inplanes: input channels
headers: number of headers to split channels
attn_scale: if True, re-scale attention to counteract the variance distibutions
ratio: bottleneck ratio
**kwargs
"""
def __init__(
self,
inplanes: int,
headers: int = 8,
attn_scale: bool = False,
ratio: float = 0.0625, # bottleneck ratio of 1/16 as described in paper
**kwargs,
) -> None:
super().__init__(**kwargs)
self.headers = headers # h
self.inplanes = inplanes # C
self.attn_scale = attn_scale
self.planes = int(inplanes * ratio)
self.single_header_inplanes = int(inplanes / headers) # C / h
self.conv_mask = layers.Conv2D(filters=1, kernel_size=1, kernel_initializer=tf.initializers.he_normal())
self.transform = Sequential(
[
layers.Conv2D(filters=self.planes, kernel_size=1, kernel_initializer=tf.initializers.he_normal()),
layers.LayerNormalization([1, 2, 3]),
layers.ReLU(),
layers.Conv2D(filters=self.inplanes, kernel_size=1, kernel_initializer=tf.initializers.he_normal()),
],
name="transform",
)
def context_modeling(self, inputs: tf.Tensor) -> tf.Tensor:
b, h, w, c = (tf.shape(inputs)[i] for i in range(4))
# B, H, W, C -->> B*h, H, W, C/h
x = tf.reshape(inputs, shape=(b, h, w, self.headers, self.single_header_inplanes))
x = tf.transpose(x, perm=(0, 3, 1, 2, 4))
x = tf.reshape(x, shape=(b * self.headers, h, w, self.single_header_inplanes))
# Compute shorcut
shortcut = x
# B*h, 1, H*W, C/h
shortcut = tf.reshape(shortcut, shape=(b * self.headers, 1, h * w, self.single_header_inplanes))
# B*h, 1, C/h, H*W
shortcut = tf.transpose(shortcut, perm=[0, 1, 3, 2])
# Compute context mask
# B*h, H, W, 1
context_mask = self.conv_mask(x)
# B*h, 1, H*W, 1
context_mask = tf.reshape(context_mask, shape=(b * self.headers, 1, h * w, 1))
# scale variance
if self.attn_scale and self.headers > 1:
context_mask = context_mask / math.sqrt(self.single_header_inplanes)
# B*h, 1, H*W, 1
context_mask = tf.keras.activations.softmax(context_mask, axis=2)
# Compute context
# B*h, 1, C/h, 1
context = tf.matmul(shortcut, context_mask)
context = tf.reshape(context, shape=(b, 1, c, 1))
# B, 1, 1, C
context = tf.transpose(context, perm=(0, 1, 3, 2))
# Set shape to resolve shape when calling this module in the Sequential MAGCResnet
batch, chan = inputs.get_shape().as_list()[0], inputs.get_shape().as_list()[-1]
context.set_shape([batch, 1, 1, chan])
return context
def call(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor:
# Context modeling: B, H, W, C -> B, 1, 1, C
context = self.context_modeling(inputs)
# Transform: B, 1, 1, C -> B, 1, 1, C
transformed = self.transform(context)
return inputs + transformed
def _magc_resnet(
arch: str,
pretrained: bool,
num_blocks: List[int],
output_channels: List[int],
stage_downsample: List[bool],
stage_conv: List[bool],
stage_pooling: List[Optional[Tuple[int, int]]],
origin_stem: bool = True,
**kwargs: Any,
) -> ResNet:
kwargs["num_classes"] = kwargs.get("num_classes", len(default_cfgs[arch]["classes"]))
kwargs["input_shape"] = kwargs.get("input_shape", default_cfgs[arch]["input_shape"])
kwargs["classes"] = kwargs.get("classes", default_cfgs[arch]["classes"])
_cfg = deepcopy(default_cfgs[arch])
_cfg["num_classes"] = kwargs["num_classes"]
_cfg["classes"] = kwargs["classes"]
_cfg["input_shape"] = kwargs["input_shape"]
kwargs.pop("classes")
# Build the model
model = ResNet(
num_blocks,
output_channels,
stage_downsample,
stage_conv,
stage_pooling,
origin_stem,
attn_module=partial(MAGC, headers=8, attn_scale=True),
cfg=_cfg,
**kwargs,
)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, default_cfgs[arch]["url"])
return model
def magc_resnet31(pretrained: bool = False, **kwargs: Any) -> ResNet:
"""Resnet31 architecture with Multi-Aspect Global Context Attention as described in
`"MASTER: Multi-Aspect Non-local Network for Scene Text Recognition",
<https://arxiv.org/pdf/1910.02562.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import magc_resnet31
>>> model = magc_resnet31(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 224, 224, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A feature extractor model
"""
return _magc_resnet(
"magc_resnet31",
pretrained,
[1, 2, 5, 3],
[256, 256, 512, 512],
[False] * 4,
[True] * 4,
[(2, 2), (2, 1), None, None],
False,
stem_channels=128,
**kwargs,
)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import math
from copy import deepcopy
from functools import partial
from typing import Any, Dict, List, Optional, Tuple
import torch
from torch import nn
from doctr.datasets import VOCABS
from ...utils.pytorch import load_pretrained_params
from ..resnet.pytorch import ResNet
__all__ = ["magc_resnet31"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"magc_resnet31": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 32),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/magc_resnet31-857391d8.pt&src=0",
},
}
class MAGC(nn.Module):
"""Implements the Multi-Aspect Global Context Attention, as described in
<https://arxiv.org/pdf/1910.02562.pdf>`_.
Args:
inplanes: input channels
headers: number of headers to split channels
attn_scale: if True, re-scale attention to counteract the variance distibutions
ratio: bottleneck ratio
**kwargs
"""
def __init__(
self,
inplanes: int,
headers: int = 8,
attn_scale: bool = False,
ratio: float = 0.0625, # bottleneck ratio of 1/16 as described in paper
cfg: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
self.headers = headers
self.inplanes = inplanes
self.attn_scale = attn_scale
self.planes = int(inplanes * ratio)
self.single_header_inplanes = int(inplanes / headers)
self.conv_mask = nn.Conv2d(self.single_header_inplanes, 1, kernel_size=1)
self.softmax = nn.Softmax(dim=1)
self.transform = nn.Sequential(
nn.Conv2d(self.inplanes, self.planes, kernel_size=1),
nn.LayerNorm([self.planes, 1, 1]),
nn.ReLU(inplace=True),
nn.Conv2d(self.planes, self.inplanes, kernel_size=1),
)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
batch, _, height, width = inputs.size()
# (N * headers, C / headers, H , W)
x = inputs.view(batch * self.headers, self.single_header_inplanes, height, width)
shortcut = x
# (N * headers, C / headers, H * W)
shortcut = shortcut.view(batch * self.headers, self.single_header_inplanes, height * width)
# (N * headers, 1, H, W)
context_mask = self.conv_mask(x)
# (N * headers, H * W)
context_mask = context_mask.view(batch * self.headers, -1)
# scale variance
if self.attn_scale and self.headers > 1:
context_mask = context_mask / math.sqrt(self.single_header_inplanes)
# (N * headers, H * W)
context_mask = self.softmax(context_mask)
# (N * headers, C / headers)
context = (shortcut * context_mask.unsqueeze(1)).sum(-1)
# (N, C, 1, 1)
context = context.view(batch, self.headers * self.single_header_inplanes, 1, 1)
# Transform: B, C, 1, 1 -> B, C, 1, 1
transformed = self.transform(context)
return inputs + transformed
def _magc_resnet(
arch: str,
pretrained: bool,
num_blocks: List[int],
output_channels: List[int],
stage_stride: List[int],
stage_conv: List[bool],
stage_pooling: List[Optional[Tuple[int, int]]],
ignore_keys: Optional[List[str]] = None,
**kwargs: Any,
) -> ResNet:
kwargs["num_classes"] = kwargs.get("num_classes", len(default_cfgs[arch]["classes"]))
kwargs["classes"] = kwargs.get("classes", default_cfgs[arch]["classes"])
_cfg = deepcopy(default_cfgs[arch])
_cfg["num_classes"] = kwargs["num_classes"]
_cfg["classes"] = kwargs["classes"]
kwargs.pop("classes")
# Build the model
model = ResNet(
num_blocks,
output_channels,
stage_stride,
stage_conv,
stage_pooling,
attn_module=partial(MAGC, headers=8, attn_scale=True),
cfg=_cfg,
**kwargs,
)
# Load pretrained parameters
if pretrained:
# The number of classes is not the same as the number of classes in the pretrained model =>
# remove the last layer weights
_ignore_keys = ignore_keys if kwargs["num_classes"] != len(default_cfgs[arch]["classes"]) else None
load_pretrained_params(model, default_cfgs[arch]["url"], ignore_keys=_ignore_keys)
return model
def magc_resnet31(pretrained: bool = False, **kwargs: Any) -> ResNet:
"""Resnet31 architecture with Multi-Aspect Global Context Attention as described in
`"MASTER: Multi-Aspect Non-local Network for Scene Text Recognition",
<https://arxiv.org/pdf/1910.02562.pdf>`_.
>>> import torch
>>> from doctr.models import magc_resnet31
>>> model = magc_resnet31(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 224, 224), dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A feature extractor model
"""
return _magc_resnet(
"magc_resnet31",
pretrained,
[1, 2, 5, 3],
[256, 256, 512, 512],
[1, 1, 1, 1],
[True] * 4,
[(2, 2), (2, 1), None, None],
origin_stem=False,
stem_channels=128,
ignore_keys=["13.weight", "13.bias"],
**kwargs,
)
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import *
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
# Greatly inspired by https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenetv3.py
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple, Union
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from ....datasets import VOCABS
from ...utils import conv_sequence, load_pretrained_params
__all__ = [
"MobileNetV3",
"mobilenet_v3_small",
"mobilenet_v3_small_r",
"mobilenet_v3_large",
"mobilenet_v3_large_r",
"mobilenet_v3_small_orientation",
]
default_cfgs: Dict[str, Dict[str, Any]] = {
"mobilenet_v3_large": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 32, 3),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/mobilenet_v3_large-47d25d7e.zip&src=0",
},
"mobilenet_v3_large_r": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 32, 3),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/mobilenet_v3_large_r-a108e192.zip&src=0",
},
"mobilenet_v3_small": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 32, 3),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/mobilenet_v3_small-8a32c32c.zip&src=0",
},
"mobilenet_v3_small_r": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 32, 3),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/mobilenet_v3_small_r-3d61452e.zip&src=0",
},
"mobilenet_v3_small_orientation": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (128, 128, 3),
"classes": [0, 90, 180, 270],
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/classif_mobilenet_v3_small-1ea8db03.zip&src=0",
},
}
def hard_swish(x: tf.Tensor) -> tf.Tensor:
return x * tf.nn.relu6(x + 3.0) / 6.0
def _make_divisible(v: float, divisor: int, min_value: Optional[int] = None) -> int:
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class SqueezeExcitation(Sequential):
"""Squeeze and Excitation."""
def __init__(self, chan: int, squeeze_factor: int = 4) -> None:
super().__init__(
[
layers.GlobalAveragePooling2D(),
layers.Dense(chan // squeeze_factor, activation="relu"),
layers.Dense(chan, activation="hard_sigmoid"),
layers.Reshape((1, 1, chan)),
]
)
def call(self, inputs: tf.Tensor, **kwargs: Any) -> tf.Tensor:
x = super().call(inputs, **kwargs)
x = tf.math.multiply(inputs, x)
return x
class InvertedResidualConfig:
def __init__(
self,
input_channels: int,
kernel: int,
expanded_channels: int,
out_channels: int,
use_se: bool,
activation: str,
stride: Union[int, Tuple[int, int]],
width_mult: float = 1,
) -> None:
self.input_channels = self.adjust_channels(input_channels, width_mult)
self.kernel = kernel
self.expanded_channels = self.adjust_channels(expanded_channels, width_mult)
self.out_channels = self.adjust_channels(out_channels, width_mult)
self.use_se = use_se
self.use_hs = activation == "HS"
self.stride = stride
@staticmethod
def adjust_channels(channels: int, width_mult: float):
return _make_divisible(channels * width_mult, 8)
class InvertedResidual(layers.Layer):
"""InvertedResidual for mobilenet
Args:
conf: configuration object for inverted residual
"""
def __init__(
self,
conf: InvertedResidualConfig,
**kwargs: Any,
) -> None:
_kwargs = {"input_shape": kwargs.pop("input_shape")} if isinstance(kwargs.get("input_shape"), tuple) else {}
super().__init__(**kwargs)
act_fn = hard_swish if conf.use_hs else tf.nn.relu
_is_s1 = (isinstance(conf.stride, tuple) and conf.stride == (1, 1)) or conf.stride == 1
self.use_res_connect = _is_s1 and conf.input_channels == conf.out_channels
_layers = []
# expand
if conf.expanded_channels != conf.input_channels:
_layers.extend(conv_sequence(conf.expanded_channels, act_fn, kernel_size=1, bn=True, **_kwargs))
# depth-wise
_layers.extend(
conv_sequence(
conf.expanded_channels,
act_fn,
kernel_size=conf.kernel,
strides=conf.stride,
bn=True,
groups=conf.expanded_channels,
)
)
if conf.use_se:
_layers.append(SqueezeExcitation(conf.expanded_channels))
# project
_layers.extend(
conv_sequence(
conf.out_channels,
None,
kernel_size=1,
bn=True,
)
)
self.block = Sequential(_layers)
def call(
self,
inputs: tf.Tensor,
**kwargs: Any,
) -> tf.Tensor:
out = self.block(inputs, **kwargs)
if self.use_res_connect:
out = tf.add(out, inputs)
return out
class MobileNetV3(Sequential):
"""Implements MobileNetV3, inspired from both:
<https://github.com/xiaochus/MobileNetV3/tree/master/model>`_.
and <https://pytorch.org/vision/stable/_modules/torchvision/models/mobilenetv3.html>`_.
"""
def __init__(
self,
layout: List[InvertedResidualConfig],
include_top: bool = True,
head_chans: int = 1024,
num_classes: int = 1000,
cfg: Optional[Dict[str, Any]] = None,
input_shape: Optional[Tuple[int, int, int]] = None,
) -> None:
_layers = [
Sequential(
conv_sequence(
layout[0].input_channels, hard_swish, True, kernel_size=3, strides=2, input_shape=input_shape
),
name="stem",
)
]
for idx, conf in enumerate(layout):
_layers.append(
InvertedResidual(conf, name=f"inverted_{idx}"),
)
_layers.append(
Sequential(conv_sequence(6 * layout[-1].out_channels, hard_swish, True, kernel_size=1), name="final_block")
)
if include_top:
_layers.extend(
[
layers.GlobalAveragePooling2D(),
layers.Dense(head_chans, activation=hard_swish),
layers.Dropout(0.2),
layers.Dense(num_classes),
]
)
super().__init__(_layers)
self.cfg = cfg
def _mobilenet_v3(arch: str, pretrained: bool, rect_strides: bool = False, **kwargs: Any) -> MobileNetV3:
kwargs["num_classes"] = kwargs.get("num_classes", len(default_cfgs[arch]["classes"]))
kwargs["input_shape"] = kwargs.get("input_shape", default_cfgs[arch]["input_shape"])
kwargs["classes"] = kwargs.get("classes", default_cfgs[arch]["classes"])
_cfg = deepcopy(default_cfgs[arch])
_cfg["num_classes"] = kwargs["num_classes"]
_cfg["classes"] = kwargs["classes"]
_cfg["input_shape"] = kwargs["input_shape"]
kwargs.pop("classes")
# cf. Table 1 & 2 of the paper
if arch.startswith("mobilenet_v3_small"):
inverted_residual_setting = [
InvertedResidualConfig(16, 3, 16, 16, True, "RE", 2), # C1
InvertedResidualConfig(16, 3, 72, 24, False, "RE", (2, 1) if rect_strides else 2), # C2
InvertedResidualConfig(24, 3, 88, 24, False, "RE", 1),
InvertedResidualConfig(24, 5, 96, 40, True, "HS", (2, 1) if rect_strides else 2), # C3
InvertedResidualConfig(40, 5, 240, 40, True, "HS", 1),
InvertedResidualConfig(40, 5, 240, 40, True, "HS", 1),
InvertedResidualConfig(40, 5, 120, 48, True, "HS", 1),
InvertedResidualConfig(48, 5, 144, 48, True, "HS", 1),
InvertedResidualConfig(48, 5, 288, 96, True, "HS", (2, 1) if rect_strides else 2), # C4
InvertedResidualConfig(96, 5, 576, 96, True, "HS", 1),
InvertedResidualConfig(96, 5, 576, 96, True, "HS", 1),
]
head_chans = 1024
else:
inverted_residual_setting = [
InvertedResidualConfig(16, 3, 16, 16, False, "RE", 1),
InvertedResidualConfig(16, 3, 64, 24, False, "RE", 2), # C1
InvertedResidualConfig(24, 3, 72, 24, False, "RE", 1),
InvertedResidualConfig(24, 5, 72, 40, True, "RE", (2, 1) if rect_strides else 2), # C2
InvertedResidualConfig(40, 5, 120, 40, True, "RE", 1),
InvertedResidualConfig(40, 5, 120, 40, True, "RE", 1),
InvertedResidualConfig(40, 3, 240, 80, False, "HS", (2, 1) if rect_strides else 2), # C3
InvertedResidualConfig(80, 3, 200, 80, False, "HS", 1),
InvertedResidualConfig(80, 3, 184, 80, False, "HS", 1),
InvertedResidualConfig(80, 3, 184, 80, False, "HS", 1),
InvertedResidualConfig(80, 3, 480, 112, True, "HS", 1),
InvertedResidualConfig(112, 3, 672, 112, True, "HS", 1),
InvertedResidualConfig(112, 5, 672, 160, True, "HS", (2, 1) if rect_strides else 2), # C4
InvertedResidualConfig(160, 5, 960, 160, True, "HS", 1),
InvertedResidualConfig(160, 5, 960, 160, True, "HS", 1),
]
head_chans = 1280
kwargs["num_classes"] = _cfg["num_classes"]
kwargs["input_shape"] = _cfg["input_shape"]
# Build the model
model = MobileNetV3(
inverted_residual_setting,
head_chans=head_chans,
cfg=_cfg,
**kwargs,
)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, default_cfgs[arch]["url"])
return model
def mobilenet_v3_small(pretrained: bool = False, **kwargs: Any) -> MobileNetV3:
"""MobileNetV3-Small architecture as described in
`"Searching for MobileNetV3",
<https://arxiv.org/pdf/1905.02244.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import mobilenet_v3_small
>>> model = mobilenet_v3_small(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 512, 512, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
a keras.Model
"""
return _mobilenet_v3("mobilenet_v3_small", pretrained, False, **kwargs)
def mobilenet_v3_small_r(pretrained: bool = False, **kwargs: Any) -> MobileNetV3:
"""MobileNetV3-Small architecture as described in
`"Searching for MobileNetV3",
<https://arxiv.org/pdf/1905.02244.pdf>`_, with rectangular pooling.
>>> import tensorflow as tf
>>> from doctr.models import mobilenet_v3_small_r
>>> model = mobilenet_v3_small_r(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 512, 512, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
a keras.Model
"""
return _mobilenet_v3("mobilenet_v3_small_r", pretrained, True, **kwargs)
def mobilenet_v3_large(pretrained: bool = False, **kwargs: Any) -> MobileNetV3:
"""MobileNetV3-Large architecture as described in
`"Searching for MobileNetV3",
<https://arxiv.org/pdf/1905.02244.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import mobilenet_v3_large
>>> model = mobilenet_v3_large(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 512, 512, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
a keras.Model
"""
return _mobilenet_v3("mobilenet_v3_large", pretrained, False, **kwargs)
def mobilenet_v3_large_r(pretrained: bool = False, **kwargs: Any) -> MobileNetV3:
"""MobileNetV3-Large architecture as described in
`"Searching for MobileNetV3",
<https://arxiv.org/pdf/1905.02244.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import mobilenet_v3_large_r
>>> model = mobilenet_v3_large_r(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 512, 512, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
a keras.Model
"""
return _mobilenet_v3("mobilenet_v3_large_r", pretrained, True, **kwargs)
def mobilenet_v3_small_orientation(pretrained: bool = False, **kwargs: Any) -> MobileNetV3:
"""MobileNetV3-Small architecture as described in
`"Searching for MobileNetV3",
<https://arxiv.org/pdf/1905.02244.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import mobilenet_v3_small_orientation
>>> model = mobilenet_v3_small_orientation(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 512, 512, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
a keras.Model
"""
return _mobilenet_v3("mobilenet_v3_small_orientation", pretrained, include_top=True, **kwargs)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
# Greatly inspired by https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenetv3.py
from copy import deepcopy
from typing import Any, Dict, List, Optional
from torchvision.models import mobilenetv3
from doctr.datasets import VOCABS
from ...utils import load_pretrained_params
__all__ = [
"mobilenet_v3_small",
"mobilenet_v3_small_r",
"mobilenet_v3_large",
"mobilenet_v3_large_r",
"mobilenet_v3_small_orientation",
]
default_cfgs: Dict[str, Dict[str, Any]] = {
"mobilenet_v3_large": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 32),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/mobilenet_v3_large-11fc8cb9.pt&src=0",
},
"mobilenet_v3_large_r": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 32),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/mobilenet_v3_large_r-74a22066.pt&src=0",
},
"mobilenet_v3_small": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 32),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/mobilenet_v3_small-6a4bfa6b.pt&src=0",
},
"mobilenet_v3_small_r": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 32),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/mobilenet_v3_small_r-1a8a3530.pt&src=0",
},
"mobilenet_v3_small_orientation": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 128, 128),
"classes": [0, 90, 180, 270],
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/classif_mobilenet_v3_small-24f8ff57.pt&src=0",
},
}
def _mobilenet_v3(
arch: str,
pretrained: bool,
rect_strides: Optional[List[str]] = None,
ignore_keys: Optional[List[str]] = None,
**kwargs: Any,
) -> mobilenetv3.MobileNetV3:
kwargs["num_classes"] = kwargs.get("num_classes", len(default_cfgs[arch]["classes"]))
kwargs["classes"] = kwargs.get("classes", default_cfgs[arch]["classes"])
_cfg = deepcopy(default_cfgs[arch])
_cfg["num_classes"] = kwargs["num_classes"]
_cfg["classes"] = kwargs["classes"]
kwargs.pop("classes")
if arch.startswith("mobilenet_v3_small"):
model = mobilenetv3.mobilenet_v3_small(**kwargs)
else:
model = mobilenetv3.mobilenet_v3_large(**kwargs)
# Rectangular strides
if isinstance(rect_strides, list):
for layer_name in rect_strides:
m = model
for child in layer_name.split("."):
m = getattr(m, child)
m.stride = (2, 1)
# Load pretrained parameters
if pretrained:
# The number of classes is not the same as the number of classes in the pretrained model =>
# remove the last layer weights
_ignore_keys = ignore_keys if kwargs["num_classes"] != len(default_cfgs[arch]["classes"]) else None
load_pretrained_params(model, default_cfgs[arch]["url"], ignore_keys=_ignore_keys)
model.cfg = _cfg
return model
def mobilenet_v3_small(pretrained: bool = False, **kwargs: Any) -> mobilenetv3.MobileNetV3:
"""MobileNetV3-Small architecture as described in
`"Searching for MobileNetV3",
<https://arxiv.org/pdf/1905.02244.pdf>`_.
>>> import torch
>>> from doctr.models import mobilenet_v3_small
>>> model = mobilenetv3_small(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 512, 512), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
a torch.nn.Module
"""
return _mobilenet_v3(
"mobilenet_v3_small", pretrained, ignore_keys=["classifier.3.weight", "classifier.3.bias"], **kwargs
)
def mobilenet_v3_small_r(pretrained: bool = False, **kwargs: Any) -> mobilenetv3.MobileNetV3:
"""MobileNetV3-Small architecture as described in
`"Searching for MobileNetV3",
<https://arxiv.org/pdf/1905.02244.pdf>`_, with rectangular pooling.
>>> import torch
>>> from doctr.models import mobilenet_v3_small_r
>>> model = mobilenet_v3_small_r(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 512, 512), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
a torch.nn.Module
"""
return _mobilenet_v3(
"mobilenet_v3_small_r",
pretrained,
["features.2.block.1.0", "features.4.block.1.0", "features.9.block.1.0"],
ignore_keys=["classifier.3.weight", "classifier.3.bias"],
**kwargs,
)
def mobilenet_v3_large(pretrained: bool = False, **kwargs: Any) -> mobilenetv3.MobileNetV3:
"""MobileNetV3-Large architecture as described in
`"Searching for MobileNetV3",
<https://arxiv.org/pdf/1905.02244.pdf>`_.
>>> import torch
>>> from doctr.models import mobilenet_v3_large
>>> model = mobilenet_v3_large(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 512, 512), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
a torch.nn.Module
"""
return _mobilenet_v3(
"mobilenet_v3_large",
pretrained,
ignore_keys=["classifier.3.weight", "classifier.3.bias"],
**kwargs,
)
def mobilenet_v3_large_r(pretrained: bool = False, **kwargs: Any) -> mobilenetv3.MobileNetV3:
"""MobileNetV3-Large architecture as described in
`"Searching for MobileNetV3",
<https://arxiv.org/pdf/1905.02244.pdf>`_, with rectangular pooling.
>>> import torch
>>> from doctr.models import mobilenet_v3_large_r
>>> model = mobilenet_v3_large_r(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 512, 512), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
a torch.nn.Module
"""
return _mobilenet_v3(
"mobilenet_v3_large_r",
pretrained,
["features.4.block.1.0", "features.7.block.1.0", "features.13.block.1.0"],
ignore_keys=["classifier.3.weight", "classifier.3.bias"],
**kwargs,
)
def mobilenet_v3_small_orientation(pretrained: bool = False, **kwargs: Any) -> mobilenetv3.MobileNetV3:
"""MobileNetV3-Small architecture as described in
`"Searching for MobileNetV3",
<https://arxiv.org/pdf/1905.02244.pdf>`_.
>>> import torch
>>> from doctr.models import mobilenet_v3_small_orientation
>>> model = mobilenet_v3_small_orientation(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 512, 512), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
a torch.nn.Module
"""
return _mobilenet_v3(
"mobilenet_v3_small_orientation",
pretrained,
ignore_keys=["classifier.3.weight", "classifier.3.bias"],
**kwargs,
)
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.models import Sequential
from doctr.datasets import VOCABS
from ...utils import conv_sequence, load_pretrained_params
__all__ = ["ResNet", "resnet18", "resnet31", "resnet34", "resnet50", "resnet34_wide"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"resnet18": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 32, 3),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/resnet18-d4634669.zip&src=0",
},
"resnet31": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 32, 3),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.5.0/resnet31-5a47a60b.zip&src=0",
},
"resnet34": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 32, 3),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.5.0/resnet34-5dcc97ca.zip&src=0",
},
"resnet50": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 32, 3),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.5.0/resnet50-e75e4cdf.zip&src=0",
},
"resnet34_wide": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 32, 3),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.5.0/resnet34_wide-c1271816.zip&src=0",
},
}
class ResnetBlock(layers.Layer):
"""Implements a resnet31 block with shortcut
Args:
conv_shortcut: Use of shortcut
output_channels: number of channels to use in Conv2D
kernel_size: size of square kernels
strides: strides to use in the first convolution of the block
"""
def __init__(self, output_channels: int, conv_shortcut: bool, strides: int = 1, **kwargs) -> None:
super().__init__(**kwargs)
if conv_shortcut:
self.shortcut = Sequential(
[
layers.Conv2D(
filters=output_channels,
strides=strides,
padding="same",
kernel_size=1,
use_bias=False,
kernel_initializer="he_normal",
),
layers.BatchNormalization(),
]
)
else:
self.shortcut = layers.Lambda(lambda x: x)
self.conv_block = Sequential(self.conv_resnetblock(output_channels, 3, strides))
self.act = layers.Activation("relu")
@staticmethod
def conv_resnetblock(
output_channels: int,
kernel_size: int,
strides: int = 1,
) -> List[layers.Layer]:
return [
*conv_sequence(output_channels, "relu", bn=True, strides=strides, kernel_size=kernel_size),
*conv_sequence(output_channels, None, bn=True, kernel_size=kernel_size),
]
def call(self, inputs: tf.Tensor) -> tf.Tensor:
clone = self.shortcut(inputs)
conv_out = self.conv_block(inputs)
out = self.act(clone + conv_out)
return out
def resnet_stage(
num_blocks: int, out_channels: int, shortcut: bool = False, downsample: bool = False
) -> List[layers.Layer]:
_layers: List[layers.Layer] = [ResnetBlock(out_channels, conv_shortcut=shortcut, strides=2 if downsample else 1)]
for _ in range(1, num_blocks):
_layers.append(ResnetBlock(out_channels, conv_shortcut=False))
return _layers
class ResNet(Sequential):
"""Implements a ResNet architecture
Args:
num_blocks: number of resnet block in each stage
output_channels: number of channels in each stage
stage_downsample: whether the first residual block of a stage should downsample
stage_conv: whether to add a conv_sequence after each stage
stage_pooling: pooling to add after each stage (if None, no pooling)
origin_stem: whether to use the orginal ResNet stem or ResNet-31's
stem_channels: number of output channels of the stem convolutions
attn_module: attention module to use in each stage
include_top: whether the classifier head should be instantiated
num_classes: number of output classes
input_shape: shape of inputs
"""
def __init__(
self,
num_blocks: List[int],
output_channels: List[int],
stage_downsample: List[bool],
stage_conv: List[bool],
stage_pooling: List[Optional[Tuple[int, int]]],
origin_stem: bool = True,
stem_channels: int = 64,
attn_module: Optional[Callable[[int], layers.Layer]] = None,
include_top: bool = True,
num_classes: int = 1000,
cfg: Optional[Dict[str, Any]] = None,
input_shape: Optional[Tuple[int, int, int]] = None,
) -> None:
inplanes = stem_channels
if origin_stem:
_layers = [
*conv_sequence(inplanes, "relu", True, kernel_size=7, strides=2, input_shape=input_shape),
layers.MaxPool2D(pool_size=(3, 3), strides=2, padding="same"),
]
else:
_layers = [
*conv_sequence(inplanes // 2, "relu", True, kernel_size=3, input_shape=input_shape),
*conv_sequence(inplanes, "relu", True, kernel_size=3),
layers.MaxPool2D(pool_size=2, strides=2, padding="valid"),
]
for n_blocks, out_chan, down, conv, pool in zip(
num_blocks, output_channels, stage_downsample, stage_conv, stage_pooling
):
_layers.extend(resnet_stage(n_blocks, out_chan, out_chan != inplanes, down))
if attn_module is not None:
_layers.append(attn_module(out_chan))
if conv:
_layers.extend(conv_sequence(out_chan, activation="relu", bn=True, kernel_size=3))
if pool:
_layers.append(layers.MaxPool2D(pool_size=pool, strides=pool, padding="valid"))
inplanes = out_chan
if include_top:
_layers.extend(
[
layers.GlobalAveragePooling2D(),
layers.Dense(num_classes),
]
)
super().__init__(_layers)
self.cfg = cfg
def _resnet(
arch: str,
pretrained: bool,
num_blocks: List[int],
output_channels: List[int],
stage_downsample: List[bool],
stage_conv: List[bool],
stage_pooling: List[Optional[Tuple[int, int]]],
origin_stem: bool = True,
**kwargs: Any,
) -> ResNet:
kwargs["num_classes"] = kwargs.get("num_classes", len(default_cfgs[arch]["classes"]))
kwargs["input_shape"] = kwargs.get("input_shape", default_cfgs[arch]["input_shape"])
kwargs["classes"] = kwargs.get("classes", default_cfgs[arch]["classes"])
_cfg = deepcopy(default_cfgs[arch])
_cfg["num_classes"] = kwargs["num_classes"]
_cfg["classes"] = kwargs["classes"]
_cfg["input_shape"] = kwargs["input_shape"]
kwargs.pop("classes")
# Build the model
model = ResNet(
num_blocks, output_channels, stage_downsample, stage_conv, stage_pooling, origin_stem, cfg=_cfg, **kwargs
)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, default_cfgs[arch]["url"])
return model
def resnet18(pretrained: bool = False, **kwargs: Any) -> ResNet:
"""Resnet-18 architecture as described in `"Deep Residual Learning for Image Recognition",
<https://arxiv.org/pdf/1512.03385.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import resnet18
>>> model = resnet18(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 512, 512, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A classification model
"""
return _resnet(
"resnet18",
pretrained,
[2, 2, 2, 2],
[64, 128, 256, 512],
[False, True, True, True],
[False] * 4,
[None] * 4,
True,
**kwargs,
)
def resnet31(pretrained: bool = False, **kwargs: Any) -> ResNet:
"""Resnet31 architecture with rectangular pooling windows as described in
`"Show, Attend and Read:A Simple and Strong Baseline for Irregular Text Recognition",
<https://arxiv.org/pdf/1811.00751.pdf>`_. Downsizing: (H, W) --> (H/8, W/4)
>>> import tensorflow as tf
>>> from doctr.models import resnet31
>>> model = resnet31(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 512, 512, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A classification model
"""
return _resnet(
"resnet31",
pretrained,
[1, 2, 5, 3],
[256, 256, 512, 512],
[False] * 4,
[True] * 4,
[(2, 2), (2, 1), None, None],
False,
stem_channels=128,
**kwargs,
)
def resnet34(pretrained: bool = False, **kwargs: Any) -> ResNet:
"""Resnet-34 architecture as described in `"Deep Residual Learning for Image Recognition",
<https://arxiv.org/pdf/1512.03385.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import resnet34
>>> model = resnet34(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 512, 512, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A classification model
"""
return _resnet(
"resnet34",
pretrained,
[3, 4, 6, 3],
[64, 128, 256, 512],
[False, True, True, True],
[False] * 4,
[None] * 4,
True,
**kwargs,
)
def resnet50(pretrained: bool = False, **kwargs: Any) -> ResNet:
"""Resnet-50 architecture as described in `"Deep Residual Learning for Image Recognition",
<https://arxiv.org/pdf/1512.03385.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import resnet50
>>> model = resnet50(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 512, 512, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A classification model
"""
kwargs["num_classes"] = kwargs.get("num_classes", len(default_cfgs["resnet50"]["classes"]))
kwargs["input_shape"] = kwargs.get("input_shape", default_cfgs["resnet50"]["input_shape"])
kwargs["classes"] = kwargs.get("classes", default_cfgs["resnet50"]["classes"])
_cfg = deepcopy(default_cfgs["resnet50"])
_cfg["num_classes"] = kwargs["num_classes"]
_cfg["classes"] = kwargs["classes"]
_cfg["input_shape"] = kwargs["input_shape"]
kwargs.pop("classes")
model = ResNet50(
weights=None,
include_top=True,
pooling=True,
input_shape=kwargs["input_shape"],
classes=kwargs["num_classes"],
classifier_activation=None,
)
model.cfg = _cfg
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, default_cfgs["resnet50"]["url"])
return model
def resnet34_wide(pretrained: bool = False, **kwargs: Any) -> ResNet:
"""Resnet-34 architecture as described in `"Deep Residual Learning for Image Recognition",
<https://arxiv.org/pdf/1512.03385.pdf>`_ with twice as many output channels for each stage.
>>> import tensorflow as tf
>>> from doctr.models import resnet34_wide
>>> model = resnet34_wide(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 512, 512, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A classification model
"""
return _resnet(
"resnet34_wide",
pretrained,
[3, 4, 6, 3],
[128, 256, 512, 1024],
[False, True, True, True],
[False] * 4,
[None] * 4,
True,
stem_channels=128,
**kwargs,
)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple
from torch import nn
from torchvision.models.resnet import BasicBlock
from torchvision.models.resnet import ResNet as TVResNet
from torchvision.models.resnet import resnet18 as tv_resnet18
from torchvision.models.resnet import resnet34 as tv_resnet34
from torchvision.models.resnet import resnet50 as tv_resnet50
from doctr.datasets import VOCABS
from ...utils import conv_sequence_pt, load_pretrained_params
__all__ = ["ResNet", "resnet18", "resnet31", "resnet34", "resnet50", "resnet34_wide", "resnet_stage"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"resnet18": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 32),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/resnet18-244bf390.pt&src=0",
},
"resnet31": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 32),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/resnet31-1056cc5c.pt&src=0",
},
"resnet34": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 32),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.5.0/resnet34-bd8725db.pt&src=0",
},
"resnet50": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 32),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.5.0/resnet50-1a6c155e.pt&src=0",
},
"resnet34_wide": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 32),
"classes": list(VOCABS["french"]),
"url": None,
},
}
def resnet_stage(in_channels: int, out_channels: int, num_blocks: int, stride: int) -> List[nn.Module]:
_layers: List[nn.Module] = []
in_chan = in_channels
s = stride
for _ in range(num_blocks):
downsample = None
if in_chan != out_channels:
downsample = nn.Sequential(*conv_sequence_pt(in_chan, out_channels, False, True, kernel_size=1, stride=s))
_layers.append(BasicBlock(in_chan, out_channels, stride=s, downsample=downsample))
in_chan = out_channels
# Only the first block can have stride != 1
s = 1
return _layers
class ResNet(nn.Sequential):
"""Implements a ResNet-31 architecture from `"Show, Attend and Read:A Simple and Strong Baseline for Irregular
Text Recognition" <https://arxiv.org/pdf/1811.00751.pdf>`_.
Args:
num_blocks: number of resnet block in each stage
output_channels: number of channels in each stage
stage_conv: whether to add a conv_sequence after each stage
stage_pooling: pooling to add after each stage (if None, no pooling)
origin_stem: whether to use the orginal ResNet stem or ResNet-31's
stem_channels: number of output channels of the stem convolutions
attn_module: attention module to use in each stage
include_top: whether the classifier head should be instantiated
num_classes: number of output classes
"""
def __init__(
self,
num_blocks: List[int],
output_channels: List[int],
stage_stride: List[int],
stage_conv: List[bool],
stage_pooling: List[Optional[Tuple[int, int]]],
origin_stem: bool = True,
stem_channels: int = 64,
attn_module: Optional[Callable[[int], nn.Module]] = None,
include_top: bool = True,
num_classes: int = 1000,
cfg: Optional[Dict[str, Any]] = None,
) -> None:
_layers: List[nn.Module]
if origin_stem:
_layers = [
*conv_sequence_pt(3, stem_channels, True, True, kernel_size=7, padding=3, stride=2),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
]
else:
_layers = [
*conv_sequence_pt(3, stem_channels // 2, True, True, kernel_size=3, padding=1),
*conv_sequence_pt(stem_channels // 2, stem_channels, True, True, kernel_size=3, padding=1),
nn.MaxPool2d(2),
]
in_chans = [stem_channels] + output_channels[:-1]
for n_blocks, in_chan, out_chan, stride, conv, pool in zip(
num_blocks, in_chans, output_channels, stage_stride, stage_conv, stage_pooling
):
_stage = resnet_stage(in_chan, out_chan, n_blocks, stride)
if attn_module is not None:
_stage.append(attn_module(out_chan))
if conv:
_stage.extend(conv_sequence_pt(out_chan, out_chan, True, True, kernel_size=3, padding=1))
if pool is not None:
_stage.append(nn.MaxPool2d(pool))
_layers.append(nn.Sequential(*_stage))
if include_top:
_layers.extend(
[
nn.AdaptiveAvgPool2d(1),
nn.Flatten(1),
nn.Linear(output_channels[-1], num_classes, bias=True),
]
)
super().__init__(*_layers)
self.cfg = cfg
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _resnet(
arch: str,
pretrained: bool,
num_blocks: List[int],
output_channels: List[int],
stage_stride: List[int],
stage_conv: List[bool],
stage_pooling: List[Optional[Tuple[int, int]]],
ignore_keys: Optional[List[str]] = None,
**kwargs: Any,
) -> ResNet:
kwargs["num_classes"] = kwargs.get("num_classes", len(default_cfgs[arch]["classes"]))
kwargs["classes"] = kwargs.get("classes", default_cfgs[arch]["classes"])
_cfg = deepcopy(default_cfgs[arch])
_cfg["num_classes"] = kwargs["num_classes"]
_cfg["classes"] = kwargs["classes"]
kwargs.pop("classes")
# Build the model
model = ResNet(num_blocks, output_channels, stage_stride, stage_conv, stage_pooling, cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
# The number of classes is not the same as the number of classes in the pretrained model =>
# remove the last layer weights
_ignore_keys = ignore_keys if kwargs["num_classes"] != len(default_cfgs[arch]["classes"]) else None
load_pretrained_params(model, default_cfgs[arch]["url"], ignore_keys=_ignore_keys)
return model
def _tv_resnet(
arch: str,
pretrained: bool,
arch_fn,
ignore_keys: Optional[List[str]] = None,
**kwargs: Any,
) -> TVResNet:
kwargs["num_classes"] = kwargs.get("num_classes", len(default_cfgs[arch]["classes"]))
kwargs["classes"] = kwargs.get("classes", default_cfgs[arch]["classes"])
_cfg = deepcopy(default_cfgs[arch])
_cfg["num_classes"] = kwargs["num_classes"]
_cfg["classes"] = kwargs["classes"]
kwargs.pop("classes")
# Build the model
model = arch_fn(**kwargs)
# Load pretrained parameters
if pretrained:
# The number of classes is not the same as the number of classes in the pretrained model =>
# remove the last layer weights
_ignore_keys = ignore_keys if kwargs["num_classes"] != len(default_cfgs[arch]["classes"]) else None
load_pretrained_params(model, default_cfgs[arch]["url"], ignore_keys=_ignore_keys)
model.cfg = _cfg
return model
def resnet18(pretrained: bool = False, **kwargs: Any) -> TVResNet:
"""ResNet-18 architecture as described in `"Deep Residual Learning for Image Recognition",
<https://arxiv.org/pdf/1512.03385.pdf>`_.
>>> import torch
>>> from doctr.models import resnet18
>>> model = resnet18(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 512, 512), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A resnet18 model
"""
return _tv_resnet(
"resnet18",
pretrained,
tv_resnet18,
ignore_keys=["fc.weight", "fc.bias"],
**kwargs,
)
def resnet31(pretrained: bool = False, **kwargs: Any) -> ResNet:
"""Resnet31 architecture with rectangular pooling windows as described in
`"Show, Attend and Read:A Simple and Strong Baseline for Irregular Text Recognition",
<https://arxiv.org/pdf/1811.00751.pdf>`_. Downsizing: (H, W) --> (H/8, W/4)
>>> import torch
>>> from doctr.models import resnet31
>>> model = resnet31(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 512, 512), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A resnet31 model
"""
return _resnet(
"resnet31",
pretrained,
[1, 2, 5, 3],
[256, 256, 512, 512],
[1, 1, 1, 1],
[True] * 4,
[(2, 2), (2, 1), None, None],
origin_stem=False,
stem_channels=128,
ignore_keys=["13.weight", "13.bias"],
**kwargs,
)
def resnet34(pretrained: bool = False, **kwargs: Any) -> TVResNet:
"""ResNet-34 architecture as described in `"Deep Residual Learning for Image Recognition",
<https://arxiv.org/pdf/1512.03385.pdf>`_.
>>> import torch
>>> from doctr.models import resnet34
>>> model = resnet34(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 512, 512), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A resnet34 model
"""
return _tv_resnet(
"resnet34",
pretrained,
tv_resnet34,
ignore_keys=["fc.weight", "fc.bias"],
**kwargs,
)
def resnet34_wide(pretrained: bool = False, **kwargs: Any) -> ResNet:
"""ResNet-34 architecture as described in `"Deep Residual Learning for Image Recognition",
<https://arxiv.org/pdf/1512.03385.pdf>`_ with twice as many output channels.
>>> import torch
>>> from doctr.models import resnet34_wide
>>> model = resnet34_wide(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 512, 512), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A resnet34_wide model
"""
return _resnet(
"resnet34_wide",
pretrained,
[3, 4, 6, 3],
[128, 256, 512, 1024],
[1, 2, 2, 2],
[False] * 4,
[None] * 4,
origin_stem=True,
stem_channels=128,
ignore_keys=["10.weight", "10.bias"],
**kwargs,
)
def resnet50(pretrained: bool = False, **kwargs: Any) -> TVResNet:
"""ResNet-50 architecture as described in `"Deep Residual Learning for Image Recognition",
<https://arxiv.org/pdf/1512.03385.pdf>`_.
>>> import torch
>>> from doctr.models import resnet50
>>> model = resnet50(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 512, 512), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A resnet50 model
"""
return _tv_resnet(
"resnet50",
pretrained,
tv_resnet50,
ignore_keys=["fc.weight", "fc.bias"],
**kwargs,
)
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from typing import Any, Dict, Optional, Tuple
import tensorflow as tf
from tensorflow.keras import Sequential, layers
from tensorflow_addons.layers import GELU
from doctr.datasets import VOCABS
from doctr.models.modules.transformer import EncoderBlock
from doctr.models.modules.vision_transformer.tensorflow import PatchEmbedding
from doctr.utils.repr import NestedObject
from ...utils import load_pretrained_params
__all__ = ["vit_s", "vit_b"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"vit_s": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 32),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.5.1/vit_s-7a23bea4.zip&src=0",
},
"vit_b": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 32, 3),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.5.1/vit_b-983c86b5.zip&src=0",
},
}
class ClassifierHead(layers.Layer, NestedObject):
"""Classifier head for Vision Transformer
Args:
num_classes: number of output classes
"""
def __init__(self, num_classes: int) -> None:
super().__init__()
self.head = layers.Dense(num_classes, kernel_initializer="he_normal", name="dense")
def call(self, x: tf.Tensor) -> tf.Tensor:
# (batch_size, num_classes) cls token
return self.head(x[:, 0])
class VisionTransformer(Sequential):
"""VisionTransformer architecture as described in
`"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale",
<https://arxiv.org/pdf/2010.11929.pdf>`_.
Args:
d_model: dimension of the transformer layers
num_layers: number of transformer layers
num_heads: number of attention heads
ffd_ratio: multiplier for the hidden dimension of the feedforward layer
input_shape: size of the input image
dropout: dropout rate
num_classes: number of output classes
include_top: whether the classifier head should be instantiated
"""
def __init__(
self,
d_model: int,
num_layers: int,
num_heads: int,
ffd_ratio: int,
input_shape: Tuple[int, int, int] = (32, 32, 3),
dropout: float = 0.0,
num_classes: int = 1000,
include_top: bool = True,
cfg: Optional[Dict[str, Any]] = None,
) -> None:
_layers = [
PatchEmbedding(input_shape, d_model),
EncoderBlock(num_layers, num_heads, d_model, d_model * ffd_ratio, dropout, activation_fct=GELU()),
]
if include_top:
_layers.append(ClassifierHead(num_classes))
super().__init__(_layers)
self.cfg = cfg
def _vit(
arch: str,
pretrained: bool,
**kwargs: Any,
) -> VisionTransformer:
kwargs["num_classes"] = kwargs.get("num_classes", len(default_cfgs[arch]["classes"]))
kwargs["input_shape"] = kwargs.get("input_shape", default_cfgs[arch]["input_shape"])
kwargs["classes"] = kwargs.get("classes", default_cfgs[arch]["classes"])
_cfg = deepcopy(default_cfgs[arch])
_cfg["num_classes"] = kwargs["num_classes"]
_cfg["input_shape"] = kwargs["input_shape"]
_cfg["classes"] = kwargs["classes"]
kwargs.pop("classes")
# Build the model
model = VisionTransformer(cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, default_cfgs[arch]["url"])
return model
def vit_s(pretrained: bool = False, **kwargs: Any) -> VisionTransformer:
"""VisionTransformer-S architecture
`"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale",
<https://arxiv.org/pdf/2010.11929.pdf>`_. Patches: (H, W) -> (H/8, W/8)
NOTE: unofficial config used in ViTSTR and ParSeq
>>> import tensorflow as tf
>>> from doctr.models import vit_s
>>> model = vit_s(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 32, 32, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A feature extractor model
"""
return _vit(
"vit_s",
pretrained,
d_model=384,
num_layers=12,
num_heads=6,
ffd_ratio=4,
**kwargs,
)
def vit_b(pretrained: bool = False, **kwargs: Any) -> VisionTransformer:
"""VisionTransformer-B architecture as described in
`"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale",
<https://arxiv.org/pdf/2010.11929.pdf>`_. Patches: (H, W) -> (H/8, W/8)
>>> import tensorflow as tf
>>> from doctr.models import vit_b
>>> model = vit_b(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 32, 32, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A feature extractor model
"""
return _vit(
"vit_b",
pretrained,
d_model=768,
num_layers=12,
num_heads=12,
ffd_ratio=4,
**kwargs,
)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
import torch
from torch import nn
from doctr.datasets import VOCABS
from doctr.models.modules.transformer import EncoderBlock
from doctr.models.modules.vision_transformer.pytorch import PatchEmbedding
from ...utils.pytorch import load_pretrained_params
__all__ = ["vit_s", "vit_b"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"vit_b": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 32),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.5.1/vit_b-103002d1.pt&src=0",
},
"vit_s": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 32),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.5.1/vit_s-cd3472bd.pt&src=0",
},
}
class ClassifierHead(nn.Module):
"""Classifier head for Vision Transformer
Args:
in_channels: number of input channels
num_classes: number of output classes
"""
def __init__(
self,
in_channels: int,
num_classes: int,
) -> None:
super().__init__()
self.head = nn.Linear(in_channels, num_classes)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# (batch_size, num_classes) cls token
return self.head(x[:, 0])
class VisionTransformer(nn.Sequential):
"""VisionTransformer architecture as described in
`"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale",
<https://arxiv.org/pdf/2010.11929.pdf>`_.
Args:
d_model: dimension of the transformer layers
num_layers: number of transformer layers
num_heads: number of attention heads
ffd_ratio: multiplier for the hidden dimension of the feedforward layer
input_shape: size of the input image
dropout: dropout rate
num_classes: number of output classes
include_top: whether the classifier head should be instantiated
"""
def __init__(
self,
d_model: int,
num_layers: int,
num_heads: int,
ffd_ratio: int,
input_shape: Tuple[int, int, int] = (3, 32, 32),
dropout: float = 0.0,
num_classes: int = 1000,
include_top: bool = True,
cfg: Optional[Dict[str, Any]] = None,
) -> None:
_layers: List[nn.Module] = [
PatchEmbedding(input_shape, d_model),
EncoderBlock(num_layers, num_heads, d_model, d_model * ffd_ratio, dropout, nn.GELU()),
]
if include_top:
_layers.append(ClassifierHead(d_model, num_classes))
super().__init__(*_layers)
self.cfg = cfg
def _vit(
arch: str,
pretrained: bool,
ignore_keys: Optional[List[str]] = None,
**kwargs: Any,
) -> VisionTransformer:
kwargs["num_classes"] = kwargs.get("num_classes", len(default_cfgs[arch]["classes"]))
kwargs["input_shape"] = kwargs.get("input_shape", default_cfgs[arch]["input_shape"])
kwargs["classes"] = kwargs.get("classes", default_cfgs[arch]["classes"])
_cfg = deepcopy(default_cfgs[arch])
_cfg["num_classes"] = kwargs["num_classes"]
_cfg["input_shape"] = kwargs["input_shape"]
_cfg["classes"] = kwargs["classes"]
kwargs.pop("classes")
# Build the model
model = VisionTransformer(cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
# The number of classes is not the same as the number of classes in the pretrained model =>
# remove the last layer weights
_ignore_keys = ignore_keys if kwargs["num_classes"] != len(default_cfgs[arch]["classes"]) else None
load_pretrained_params(model, default_cfgs[arch]["url"], ignore_keys=_ignore_keys)
return model
def vit_s(pretrained: bool = False, **kwargs: Any) -> VisionTransformer:
"""VisionTransformer-S architecture
`"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale",
<https://arxiv.org/pdf/2010.11929.pdf>`_. Patches: (H, W) -> (H/8, W/8)
NOTE: unofficial config used in ViTSTR and ParSeq
>>> import torch
>>> from doctr.models import vit_s
>>> model = vit_s(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 32, 32), dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A feature extractor model
"""
return _vit(
"vit_s",
pretrained,
d_model=384,
num_layers=12,
num_heads=6,
ffd_ratio=4,
ignore_keys=["2.head.weight", "2.head.bias"],
**kwargs,
)
def vit_b(pretrained: bool = False, **kwargs: Any) -> VisionTransformer:
"""VisionTransformer-B architecture as described in
`"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale",
<https://arxiv.org/pdf/2010.11929.pdf>`_. Patches: (H, W) -> (H/8, W/8)
>>> import torch
>>> from doctr.models import vit_b
>>> model = vit_b(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 32, 32), dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained: boolean, True if model is pretrained
Returns:
A feature extractor model
"""
return _vit(
"vit_b",
pretrained,
d_model=768,
num_layers=12,
num_heads=12,
ffd_ratio=4,
ignore_keys=["2.head.weight", "2.head.bias"],
**kwargs,
)
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import List, Union
import numpy as np
import tensorflow as tf
from tensorflow import keras
from doctr.models.preprocessor import PreProcessor
from doctr.utils.repr import NestedObject
__all__ = ["CropOrientationPredictor"]
class CropOrientationPredictor(NestedObject):
"""Implements an object able to detect the reading direction of a text box.
4 possible orientations: 0, 90, 180, 270 degrees counter clockwise.
Args:
pre_processor: transform inputs for easier batched model inference
model: core classification architecture (backbone + classification head)
"""
_children_names: List[str] = ["pre_processor", "model"]
def __init__(
self,
pre_processor: PreProcessor,
model: keras.Model,
) -> None:
self.pre_processor = pre_processor
self.model = model
def __call__(
self,
crops: List[Union[np.ndarray, tf.Tensor]],
) -> List[int]:
# Dimension check
if any(crop.ndim != 3 for crop in crops):
raise ValueError("incorrect input shape: all crops are expected to be multi-channel 2D images.")
processed_batches = self.pre_processor(crops)
predicted_batches = [self.model(batch, training=False) for batch in processed_batches]
# Postprocess predictions
predicted_batches = [out_batch.numpy().argmax(1) for out_batch in predicted_batches]
return [int(pred) for batch in predicted_batches for pred in batch]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import List, Union
import numpy as np
import torch
from torch import nn
from doctr.models.preprocessor import PreProcessor
__all__ = ["CropOrientationPredictor"]
class CropOrientationPredictor(nn.Module):
"""Implements an object able to detect the reading direction of a text box.
4 possible orientations: 0, 90, 180, 270 degrees counter clockwise.
Args:
pre_processor: transform inputs for easier batched model inference
model: core classification architecture (backbone + classification head)
"""
def __init__(
self,
pre_processor: PreProcessor,
model: nn.Module,
) -> None:
super().__init__()
self.pre_processor = pre_processor
self.model = model.eval()
@torch.no_grad()
def forward(
self,
crops: List[Union[np.ndarray, torch.Tensor]],
) -> List[int]:
# Dimension check
if any(crop.ndim != 3 for crop in crops):
raise ValueError("incorrect input shape: all pages are expected to be multi-channel 2D images.")
processed_batches = self.pre_processor(crops)
_device = next(self.model.parameters()).device
predicted_batches = [self.model(batch.to(device=_device)).to(device=_device) for batch in processed_batches]
# Postprocess predictions
predicted_batches = [out_batch.argmax(dim=1).cpu().detach().numpy() for out_batch in predicted_batches]
return [int(pred) for batch in predicted_batches for pred in batch]
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import *
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from doctr.datasets import VOCABS
from ...utils import conv_sequence, load_pretrained_params
__all__ = ["VGG", "vgg16_bn_r"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"vgg16_bn_r": {
"mean": (0.5, 0.5, 0.5),
"std": (1.0, 1.0, 1.0),
"input_shape": (32, 32, 3),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/vgg16_bn_r-c5836cea.zip&src=0",
},
}
class VGG(Sequential):
"""Implements the VGG architecture from `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
num_blocks: number of convolutional block in each stage
planes: number of output channels in each stage
rect_pools: whether pooling square kernels should be replace with rectangular ones
include_top: whether the classifier head should be instantiated
num_classes: number of output classes
input_shape: shapes of the input tensor
"""
def __init__(
self,
num_blocks: List[int],
planes: List[int],
rect_pools: List[bool],
include_top: bool = False,
num_classes: int = 1000,
input_shape: Optional[Tuple[int, int, int]] = None,
cfg: Optional[Dict[str, Any]] = None,
) -> None:
_layers = []
# Specify input_shape only for the first layer
kwargs = {"input_shape": input_shape}
for nb_blocks, out_chan, rect_pool in zip(num_blocks, planes, rect_pools):
for _ in range(nb_blocks):
_layers.extend(conv_sequence(out_chan, "relu", True, kernel_size=3, **kwargs)) # type: ignore[arg-type]
kwargs = {}
_layers.append(layers.MaxPooling2D((2, 1 if rect_pool else 2)))
if include_top:
_layers.extend([layers.GlobalAveragePooling2D(), layers.Dense(num_classes)])
super().__init__(_layers)
self.cfg = cfg
def _vgg(
arch: str, pretrained: bool, num_blocks: List[int], planes: List[int], rect_pools: List[bool], **kwargs: Any
) -> VGG:
kwargs["num_classes"] = kwargs.get("num_classes", len(default_cfgs[arch]["classes"]))
kwargs["input_shape"] = kwargs.get("input_shape", default_cfgs[arch]["input_shape"])
kwargs["classes"] = kwargs.get("classes", default_cfgs[arch]["classes"])
_cfg = deepcopy(default_cfgs[arch])
_cfg["num_classes"] = kwargs["num_classes"]
_cfg["classes"] = kwargs["classes"]
_cfg["input_shape"] = kwargs["input_shape"]
kwargs.pop("classes")
# Build the model
model = VGG(num_blocks, planes, rect_pools, cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, default_cfgs[arch]["url"])
return model
def vgg16_bn_r(pretrained: bool = False, **kwargs: Any) -> VGG:
"""VGG-16 architecture as described in `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/pdf/1409.1556.pdf>`_, modified by adding batch normalization, rectangular pooling and a simpler
classification head.
>>> import tensorflow as tf
>>> from doctr.models import vgg16_bn_r
>>> model = vgg16_bn_r(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 512, 512, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Returns:
VGG feature extractor
"""
return _vgg(
"vgg16_bn_r", pretrained, [2, 2, 3, 3, 3], [64, 128, 256, 512, 512], [False, False, True, True, True], **kwargs
)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from typing import Any, Dict, List, Optional
from torch import nn
from torchvision.models import vgg as tv_vgg
from doctr.datasets import VOCABS
from ...utils import load_pretrained_params
__all__ = ["vgg16_bn_r"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"vgg16_bn_r": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 32),
"classes": list(VOCABS["french"]),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/vgg16_bn_r-d108c19c.pt&src=0",
},
}
def _vgg(
arch: str,
pretrained: bool,
tv_arch: str,
num_rect_pools: int = 3,
ignore_keys: Optional[List[str]] = None,
**kwargs: Any,
) -> tv_vgg.VGG:
kwargs["num_classes"] = kwargs.get("num_classes", len(default_cfgs[arch]["classes"]))
kwargs["classes"] = kwargs.get("classes", default_cfgs[arch]["classes"])
_cfg = deepcopy(default_cfgs[arch])
_cfg["num_classes"] = kwargs["num_classes"]
_cfg["classes"] = kwargs["classes"]
kwargs.pop("classes")
# Build the model
model = tv_vgg.__dict__[tv_arch](**kwargs)
# List the MaxPool2d
pool_idcs = [idx for idx, m in enumerate(model.features) if isinstance(m, nn.MaxPool2d)]
# Replace their kernel with rectangular ones
for idx in pool_idcs[-num_rect_pools:]:
model.features[idx] = nn.MaxPool2d((2, 1))
# Patch average pool & classification head
model.avgpool = nn.AdaptiveAvgPool2d((1, 1))
model.classifier = nn.Linear(512, kwargs["num_classes"])
# Load pretrained parameters
if pretrained:
# The number of classes is not the same as the number of classes in the pretrained model =>
# remove the last layer weights
_ignore_keys = ignore_keys if kwargs["num_classes"] != len(default_cfgs[arch]["classes"]) else None
load_pretrained_params(model, default_cfgs[arch]["url"], ignore_keys=_ignore_keys)
model.cfg = _cfg
return model
def vgg16_bn_r(pretrained: bool = False, **kwargs: Any) -> tv_vgg.VGG:
"""VGG-16 architecture as described in `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/pdf/1409.1556.pdf>`_, modified by adding batch normalization, rectangular pooling and a simpler
classification head.
>>> import torch
>>> from doctr.models import vgg16_bn_r
>>> model = vgg16_bn_r(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 512, 512), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Returns:
VGG feature extractor
"""
return _vgg(
"vgg16_bn_r",
pretrained,
"vgg16_bn",
3,
ignore_keys=["classifier.weight", "classifier.bias"],
**kwargs,
)
|
from .barcode import *
from .face import *
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import List, Tuple
import cv2
import numpy as np
__all__ = ["BarCodeDetector"]
class BarCodeDetector:
"""Implements a Bar-code detector.
For now, only horizontal (or with a small angle) bar-codes are supported
Args:
min_size: minimum relative size of a barcode on the page
canny_minval: lower bound for canny hysteresis
canny_maxval: upper-bound for canny hysteresis
"""
def __init__(self, min_size: float = 1 / 6, canny_minval: int = 50, canny_maxval: int = 150) -> None:
self.min_size = min_size
self.canny_minval = canny_minval
self.canny_maxval = canny_maxval
def __call__(
self,
img: np.ndarray,
) -> List[Tuple[float, float, float, float]]:
"""Detect Barcodes on the image
Args:
img: np image
Returns:
A list of tuples: [(xmin, ymin, xmax, ymax), ...] containing barcodes rel. coordinates
"""
# get image size and define parameters
height, width = img.shape[:2]
k = (1 + int(width / 512)) * 10 # spatial extension of kernels, 512 -> 20, 1024 -> 30, ...
min_w = int(width * self.min_size) # minimal size of a possible barcode
# Detect edges
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, self.canny_minval, self.canny_maxval, apertureSize=3)
# Horizontal dilation to aggregate bars of the potential barcode
# without aggregating text lines of the page vertically
edges = cv2.dilate(edges, np.ones((1, k), np.uint8))
# Instantiate a barcode-shaped kernel and erode to keep only vertical-bar structures
bar_code_kernel: np.ndarray = np.zeros((k, 3), np.uint8)
bar_code_kernel[..., [0, 2]] = 1
edges = cv2.erode(edges, bar_code_kernel, iterations=1)
# Opening to remove noise
edges = cv2.morphologyEx(edges, cv2.MORPH_OPEN, np.ones((k, k), np.uint8))
# Dilation to retrieve vertical length (lost at the first dilation)
edges = cv2.dilate(edges, np.ones((k, 1), np.uint8))
# Find contours, and keep the widest as barcodes
contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
barcodes = []
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
if w >= min_w:
barcodes.append((x / width, y / height, (x + w) / width, (y + h) / height))
return barcodes
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import List, Tuple
import cv2
import numpy as np
from doctr.utils.repr import NestedObject
__all__ = ["FaceDetector"]
class FaceDetector(NestedObject):
"""Implements a face detector to detect profile pictures on resumes, IDS, driving licenses, passports...
Based on open CV CascadeClassifier (haarcascades)
Args:
n_faces: maximal number of faces to detect on a single image, default = 1
"""
def __init__(
self,
n_faces: int = 1,
) -> None:
self.n_faces = n_faces
# Instantiate classifier
self.detector = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
def extra_repr(self) -> str:
return f"n_faces={self.n_faces}"
def __call__(
self,
img: np.ndarray,
) -> List[Tuple[float, float, float, float]]:
"""Detect n_faces on the img
Args:
img: image to detect faces on
Returns:
A list of size n_faces, each face is a tuple of relative xmin, ymin, xmax, ymax
"""
height, width = img.shape[:2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = self.detector.detectMultiScale(gray, 1.5, 3)
# If faces are detected, keep only the biggest ones
rel_faces = []
if len(faces) > 0:
x, y, w, h = sorted(faces, key=lambda x: x[2] + x[3])[-min(self.n_faces, len(faces))]
xmin, ymin, xmax, ymax = x / width, y / height, (x + w) / width, (y + h) / height
rel_faces.append((xmin, ymin, xmax, ymax))
return rel_faces
|
from doctr.file_utils import is_tf_available
if is_tf_available():
from .tensorflow import *
else:
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, Dict, List, Union
import numpy as np
import tensorflow as tf
from doctr.io.elements import Document
from doctr.models._utils import estimate_orientation, get_language, invert_data_structure
from doctr.models.detection.predictor import DetectionPredictor
from doctr.models.recognition.predictor import RecognitionPredictor
from doctr.utils.geometry import rotate_boxes, rotate_image
from doctr.utils.repr import NestedObject
from .base import _KIEPredictor
__all__ = ["KIEPredictor"]
class KIEPredictor(NestedObject, _KIEPredictor):
"""Implements an object able to localize and identify text elements in a set of documents
Args:
det_predictor: detection module
reco_predictor: recognition module
assume_straight_pages: if True, speeds up the inference by assuming you only pass straight pages
without rotated textual elements.
straighten_pages: if True, estimates the page general orientation based on the median line orientation.
Then, rotates page before passing it to the deep learning modules. The final predictions will be remapped
accordingly. Doing so will improve performances for documents with page-uniform rotations.
detect_orientation: if True, the estimated general page orientation will be added to the predictions for each
page. Doing so will slightly deteriorate the overall latency.
detect_language: if True, the language prediction will be added to the predictions for each
page. Doing so will slightly deteriorate the overall latency.
kwargs: keyword args of `DocumentBuilder`
"""
_children_names = ["det_predictor", "reco_predictor", "doc_builder"]
def __init__(
self,
det_predictor: DetectionPredictor,
reco_predictor: RecognitionPredictor,
assume_straight_pages: bool = True,
straighten_pages: bool = False,
preserve_aspect_ratio: bool = False,
symmetric_pad: bool = True,
detect_orientation: bool = False,
detect_language: bool = False,
**kwargs: Any,
) -> None:
self.det_predictor = det_predictor
self.reco_predictor = reco_predictor
_KIEPredictor.__init__(
self, assume_straight_pages, straighten_pages, preserve_aspect_ratio, symmetric_pad, **kwargs
)
self.detect_orientation = detect_orientation
self.detect_language = detect_language
def __call__(
self,
pages: List[Union[np.ndarray, tf.Tensor]],
**kwargs: Any,
) -> Document:
# Dimension check
if any(page.ndim != 3 for page in pages):
raise ValueError("incorrect input shape: all pages are expected to be multi-channel 2D images.")
origin_page_shapes = [page.shape[:2] for page in pages]
# Detect document rotation and rotate pages
if self.detect_orientation:
origin_page_orientations = [estimate_orientation(page) for page in pages]
orientations = [
{"value": orientation_page, "confidence": 1.0} for orientation_page in origin_page_orientations
]
else:
orientations = None
if self.straighten_pages:
origin_page_orientations = (
origin_page_orientations if self.detect_orientation else [estimate_orientation(page) for page in pages]
)
pages = [rotate_image(page, -angle, expand=True) for page, angle in zip(pages, origin_page_orientations)]
# Localize text elements
loc_preds = self.det_predictor(pages, **kwargs)
dict_loc_preds: Dict[str, List[np.ndarray]] = invert_data_structure(loc_preds) # type: ignore[assignment]
# Rectify crops if aspect ratio
dict_loc_preds = {k: self._remove_padding(pages, loc_pred) for k, loc_pred in dict_loc_preds.items()}
# Crop images
crops = {}
for class_name in dict_loc_preds.keys():
crops[class_name], dict_loc_preds[class_name] = self._prepare_crops(
pages, dict_loc_preds[class_name], channels_last=True, assume_straight_pages=self.assume_straight_pages
)
# Rectify crop orientation
if not self.assume_straight_pages:
for class_name in dict_loc_preds.keys():
crops[class_name], dict_loc_preds[class_name] = self._rectify_crops(
crops[class_name], dict_loc_preds[class_name]
)
# Identify character sequences
word_preds = {
k: self.reco_predictor([crop for page_crops in crop_value for crop in page_crops], **kwargs)
for k, crop_value in crops.items()
}
boxes: Dict = {}
text_preds: Dict = {}
for class_name in dict_loc_preds.keys():
boxes[class_name], text_preds[class_name] = self._process_predictions(
dict_loc_preds[class_name], word_preds[class_name]
)
boxes_per_page: List[Dict] = invert_data_structure(boxes) # type: ignore[assignment]
text_preds_per_page: List[Dict] = invert_data_structure(text_preds) # type: ignore[assignment]
if self.detect_language:
languages = [get_language(self.get_text(text_pred)) for text_pred in text_preds_per_page]
languages_dict = [{"value": lang[0], "confidence": lang[1]} for lang in languages]
else:
languages_dict = None
# Rotate back pages and boxes while keeping original image size
if self.straighten_pages:
boxes_per_page = [
{
k: rotate_boxes(
page_boxes,
angle,
orig_shape=page.shape[:2] if isinstance(page, np.ndarray) else page.shape[-2:],
target_shape=mask, # type: ignore[arg-type]
)
for k, page_boxes in page_boxes_dict.items()
}
for page_boxes_dict, page, angle, mask in zip(
boxes_per_page, pages, origin_page_orientations, origin_page_shapes
)
]
out = self.doc_builder(
boxes_per_page,
text_preds_per_page,
origin_page_shapes, # type: ignore[arg-type]
orientations,
languages_dict,
)
return out
@staticmethod
def get_text(text_pred: Dict) -> str:
text = []
for value in text_pred.values():
text += [item[0] for item in value]
return " ".join(text)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, Dict, List, Union
import numpy as np
import torch
from torch import nn
from doctr.io.elements import Document
from doctr.models._utils import estimate_orientation, get_language, invert_data_structure
from doctr.models.detection.predictor import DetectionPredictor
from doctr.models.recognition.predictor import RecognitionPredictor
from doctr.utils.geometry import rotate_boxes, rotate_image
from .base import _KIEPredictor
__all__ = ["KIEPredictor"]
class KIEPredictor(nn.Module, _KIEPredictor):
"""Implements an object able to localize and identify text elements in a set of documents
Args:
det_predictor: detection module
reco_predictor: recognition module
assume_straight_pages: if True, speeds up the inference by assuming you only pass straight pages
without rotated textual elements.
straighten_pages: if True, estimates the page general orientation based on the median line orientation.
Then, rotates page before passing it to the deep learning modules. The final predictions will be remapped
accordingly. Doing so will improve performances for documents with page-uniform rotations.
detect_orientation: if True, the estimated general page orientation will be added to the predictions for each
page. Doing so will slightly deteriorate the overall latency.
detect_language: if True, the language prediction will be added to the predictions for each
page. Doing so will slightly deteriorate the overall latency.
kwargs: keyword args of `DocumentBuilder`
"""
def __init__(
self,
det_predictor: DetectionPredictor,
reco_predictor: RecognitionPredictor,
assume_straight_pages: bool = True,
straighten_pages: bool = False,
preserve_aspect_ratio: bool = False,
symmetric_pad: bool = True,
detect_orientation: bool = False,
detect_language: bool = False,
**kwargs: Any,
) -> None:
nn.Module.__init__(self)
self.det_predictor = det_predictor.eval() # type: ignore[attr-defined]
self.reco_predictor = reco_predictor.eval() # type: ignore[attr-defined]
_KIEPredictor.__init__(
self, assume_straight_pages, straighten_pages, preserve_aspect_ratio, symmetric_pad, **kwargs
)
self.detect_orientation = detect_orientation
self.detect_language = detect_language
@torch.no_grad()
def forward(
self,
pages: List[Union[np.ndarray, torch.Tensor]],
**kwargs: Any,
) -> Document:
# Dimension check
if any(page.ndim != 3 for page in pages):
raise ValueError("incorrect input shape: all pages are expected to be multi-channel 2D images.")
origin_page_shapes = [page.shape[:2] if isinstance(page, np.ndarray) else page.shape[-2:] for page in pages]
# Detect document rotation and rotate pages
if self.detect_orientation:
origin_page_orientations = [estimate_orientation(page) for page in pages] # type: ignore[arg-type]
orientations = [
{"value": orientation_page, "confidence": 1.0} for orientation_page in origin_page_orientations
]
else:
orientations = None
if self.straighten_pages:
origin_page_orientations = (
origin_page_orientations
if self.detect_orientation
else [estimate_orientation(page) for page in pages] # type: ignore[arg-type]
)
pages = [
rotate_image(page, -angle, expand=True) # type: ignore[arg-type]
for page, angle in zip(pages, origin_page_orientations)
]
# Localize text elements
loc_preds = self.det_predictor(pages, **kwargs)
dict_loc_preds: Dict[str, List[np.ndarray]] = invert_data_structure(loc_preds) # type: ignore[assignment]
# Check whether crop mode should be switched to channels first
channels_last = len(pages) == 0 or isinstance(pages[0], np.ndarray)
# Rectify crops if aspect ratio
dict_loc_preds = {
k: self._remove_padding(pages, loc_pred) for k, loc_pred in dict_loc_preds.items() # type: ignore[arg-type]
}
# Crop images
crops = {}
for class_name in dict_loc_preds.keys():
crops[class_name], dict_loc_preds[class_name] = self._prepare_crops(
pages, # type: ignore[arg-type]
dict_loc_preds[class_name],
channels_last=channels_last,
assume_straight_pages=self.assume_straight_pages,
)
# Rectify crop orientation
if not self.assume_straight_pages:
for class_name in dict_loc_preds.keys():
crops[class_name], dict_loc_preds[class_name] = self._rectify_crops(
crops[class_name], dict_loc_preds[class_name]
)
# Identify character sequences
word_preds = {
k: self.reco_predictor([crop for page_crops in crop_value for crop in page_crops], **kwargs)
for k, crop_value in crops.items()
}
boxes: Dict = {}
text_preds: Dict = {}
for class_name in dict_loc_preds.keys():
boxes[class_name], text_preds[class_name] = self._process_predictions(
dict_loc_preds[class_name], word_preds[class_name]
)
boxes_per_page: List[Dict] = invert_data_structure(boxes) # type: ignore[assignment]
text_preds_per_page: List[Dict] = invert_data_structure(text_preds) # type: ignore[assignment]
if self.detect_language:
languages = [get_language(self.get_text(text_pred)) for text_pred in text_preds_per_page]
languages_dict = [{"value": lang[0], "confidence": lang[1]} for lang in languages]
else:
languages_dict = None
# Rotate back pages and boxes while keeping original image size
if self.straighten_pages:
boxes_per_page = [
{
k: rotate_boxes(
page_boxes,
angle,
orig_shape=page.shape[:2]
if isinstance(page, np.ndarray)
else page.shape[1:], # type: ignore[arg-type]
target_shape=mask, # type: ignore[arg-type]
)
for k, page_boxes in page_boxes_dict.items()
}
for page_boxes_dict, page, angle, mask in zip(
boxes_per_page, pages, origin_page_orientations, origin_page_shapes
)
]
out = self.doc_builder(
boxes_per_page,
text_preds_per_page,
[page.shape[:2] if channels_last else page.shape[-2:] for page in pages], # type: ignore[misc]
orientations,
languages_dict,
)
return out
@staticmethod
def get_text(text_pred: Dict) -> str:
text = []
for value in text_pred.values():
text += [item[0] for item in value]
return " ".join(text)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, Optional
from doctr.models.builder import KIEDocumentBuilder
from ..classification.predictor import CropOrientationPredictor
from ..predictor.base import _OCRPredictor
__all__ = ["_KIEPredictor"]
class _KIEPredictor(_OCRPredictor):
"""Implements an object able to localize and identify text elements in a set of documents
Args:
assume_straight_pages: if True, speeds up the inference by assuming you only pass straight pages
without rotated textual elements.
straighten_pages: if True, estimates the page general orientation based on the median line orientation.
Then, rotates page before passing it to the deep learning modules. The final predictions will be remapped
accordingly. Doing so will improve performances for documents with page-uniform rotations.
preserve_aspect_ratio: if True, resize preserving the aspect ratio (with padding)
symmetric_pad: if True and preserve_aspect_ratio is True, pas the image symmetrically.
kwargs: keyword args of `DocumentBuilder`
"""
crop_orientation_predictor: Optional[CropOrientationPredictor]
def __init__(
self,
assume_straight_pages: bool = True,
straighten_pages: bool = False,
preserve_aspect_ratio: bool = True,
symmetric_pad: bool = True,
**kwargs: Any,
) -> None:
super().__init__(assume_straight_pages, straighten_pages, preserve_aspect_ratio, symmetric_pad, **kwargs)
self.doc_builder: KIEDocumentBuilder = KIEDocumentBuilder(**kwargs)
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import logging
import os
from typing import Any, Callable, List, Optional, Tuple, Union
from zipfile import ZipFile
import tensorflow as tf
import tf2onnx
from tensorflow.keras import Model, layers
from doctr.utils.data import download_from_url
logging.getLogger("tensorflow").setLevel(logging.DEBUG)
__all__ = ["load_pretrained_params", "conv_sequence", "IntermediateLayerGetter", "export_model_to_onnx"]
def load_pretrained_params(
model: Model,
url: Optional[str] = None,
hash_prefix: Optional[str] = None,
overwrite: bool = False,
internal_name: str = "weights",
**kwargs: Any,
) -> None:
"""Load a set of parameters onto a model
>>> from doctr.models import load_pretrained_params
>>> load_pretrained_params(model, "https://yoursource.com/yourcheckpoint-yourhash.zip")
Args:
model: the keras model to be loaded
url: URL of the zipped set of parameters
hash_prefix: first characters of SHA256 expected hash
overwrite: should the zip extraction be enforced if the archive has already been extracted
internal_name: name of the ckpt files
"""
if url is None:
logging.warning("Invalid model URL, using default initialization.")
else:
archive_path = download_from_url(url, hash_prefix=hash_prefix, cache_subdir="models", **kwargs)
# Unzip the archive
params_path = archive_path.parent.joinpath(archive_path.stem)
if not params_path.is_dir() or overwrite:
with ZipFile(archive_path, "r") as f:
f.extractall(path=params_path)
# Load weights
model.load_weights(f"{params_path}{os.sep}{internal_name}")
def conv_sequence(
out_channels: int,
activation: Optional[Union[str, Callable]] = None,
bn: bool = False,
padding: str = "same",
kernel_initializer: str = "he_normal",
**kwargs: Any,
) -> List[layers.Layer]:
"""Builds a convolutional-based layer sequence
>>> from tensorflow.keras import Sequential
>>> from doctr.models import conv_sequence
>>> module = Sequential(conv_sequence(32, 'relu', True, kernel_size=3, input_shape=[224, 224, 3]))
Args:
out_channels: number of output channels
activation: activation to be used (default: no activation)
bn: should a batch normalization layer be added
padding: padding scheme
kernel_initializer: kernel initializer
Returns:
list of layers
"""
# No bias before Batch norm
kwargs["use_bias"] = kwargs.get("use_bias", not bn)
# Add activation directly to the conv if there is no BN
kwargs["activation"] = activation if not bn else None
conv_seq = [layers.Conv2D(out_channels, padding=padding, kernel_initializer=kernel_initializer, **kwargs)]
if bn:
conv_seq.append(layers.BatchNormalization())
if (isinstance(activation, str) or callable(activation)) and bn:
# Activation function can either be a string or a function ('relu' or tf.nn.relu)
conv_seq.append(layers.Activation(activation))
return conv_seq
class IntermediateLayerGetter(Model):
"""Implements an intermediate layer getter
>>> from tensorflow.keras.applications import ResNet50
>>> from doctr.models import IntermediateLayerGetter
>>> target_layers = ["conv2_block3_out", "conv3_block4_out", "conv4_block6_out", "conv5_block3_out"]
>>> feat_extractor = IntermediateLayerGetter(ResNet50(include_top=False, pooling=False), target_layers)
Args:
model: the model to extract feature maps from
layer_names: the list of layers to retrieve the feature map from
"""
def __init__(self, model: Model, layer_names: List[str]) -> None:
intermediate_fmaps = [model.get_layer(layer_name).get_output_at(0) for layer_name in layer_names]
super().__init__(model.input, outputs=intermediate_fmaps)
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
def export_model_to_onnx(
model: Model, model_name: str, dummy_input: List[tf.TensorSpec], **kwargs: Any
) -> Tuple[str, List[str]]:
"""Export model to ONNX format.
>>> import tensorflow as tf
>>> from doctr.models.classification import resnet18
>>> from doctr.models.utils import export_classification_model_to_onnx
>>> model = resnet18(pretrained=True, include_top=True)
>>> export_model_to_onnx(model, "my_model",
>>> dummy_input=[tf.TensorSpec([None, 32, 32, 3], tf.float32, name="input")])
Args:
model: the keras model to be exported
model_name: the name for the exported model
dummy_input: the dummy input to the model
kwargs: additional arguments to be passed to tf2onnx
Returns:
the path to the exported model and a list with the output layer names
"""
large_model = kwargs.get("large_model", False)
model_proto, _ = tf2onnx.convert.from_keras(
model,
opset=14, # minimum opset which support all operators we use (v0.5.2)
input_signature=dummy_input,
output_path=f"{model_name}.zip" if large_model else f"{model_name}.onnx",
**kwargs,
)
# Get the output layer names
output = [n.name for n in model_proto.graph.output]
# models which are too large (weights > 2GB while converting to ONNX) needs to be handled
# about an external tensor storage where the graph and weights are seperatly stored in a archive
if large_model:
logging.info(f"Model exported to {model_name}.zip")
return f"{model_name}.zip", output
logging.info(f"Model exported to {model_name}.zip")
return f"{model_name}.onnx", output
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import logging
from typing import Any, List, Optional
import torch
from torch import nn
from doctr.utils.data import download_from_url
__all__ = ["load_pretrained_params", "conv_sequence_pt", "export_model_to_onnx"]
def load_pretrained_params(
model: nn.Module,
url: Optional[str] = None,
hash_prefix: Optional[str] = None,
overwrite: bool = False,
ignore_keys: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
"""Load a set of parameters onto a model
>>> from doctr.models import load_pretrained_params
>>> load_pretrained_params(model, "https://yoursource.com/yourcheckpoint-yourhash.zip")
Args:
model: the PyTorch model to be loaded
url: URL of the zipped set of parameters
hash_prefix: first characters of SHA256 expected hash
overwrite: should the zip extraction be enforced if the archive has already been extracted
ignore_keys: list of weights to be ignored from the state_dict
"""
if url is None:
logging.warning("Invalid model URL, using default initialization.")
else:
archive_path = download_from_url(url, hash_prefix=hash_prefix, cache_subdir="models", **kwargs)
# Read state_dict
state_dict = torch.load(archive_path, map_location="cpu")
# Remove weights from the state_dict
if ignore_keys is not None and len(ignore_keys) > 0:
for key in ignore_keys:
state_dict.pop(key)
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
if set(missing_keys) != set(ignore_keys) or len(unexpected_keys) > 0:
raise ValueError("unable to load state_dict, due to non-matching keys.")
else:
# Load weights
model.load_state_dict(state_dict)
def conv_sequence_pt(
in_channels: int,
out_channels: int,
relu: bool = False,
bn: bool = False,
**kwargs: Any,
) -> List[nn.Module]:
"""Builds a convolutional-based layer sequence
>>> from torch.nn import Sequential
>>> from doctr.models import conv_sequence
>>> module = Sequential(conv_sequence(3, 32, True, True, kernel_size=3))
Args:
out_channels: number of output channels
relu: whether ReLU should be used
bn: should a batch normalization layer be added
Returns:
list of layers
"""
# No bias before Batch norm
kwargs["bias"] = kwargs.get("bias", not bn)
# Add activation directly to the conv if there is no BN
conv_seq: List[nn.Module] = [nn.Conv2d(in_channels, out_channels, **kwargs)]
if bn:
conv_seq.append(nn.BatchNorm2d(out_channels))
if relu:
conv_seq.append(nn.ReLU(inplace=True))
return conv_seq
def export_model_to_onnx(model: nn.Module, model_name: str, dummy_input: torch.Tensor, **kwargs: Any) -> str:
"""Export model to ONNX format.
>>> import torch
>>> from doctr.models.classification import resnet18
>>> from doctr.models.utils import export_model_to_onnx
>>> model = resnet18(pretrained=True)
>>> export_model_to_onnx(model, "my_model", dummy_input=torch.randn(1, 3, 32, 32))
Args:
model: the PyTorch model to be exported
model_name: the name for the exported model
dummy_input: the dummy input to the model
kwargs: additional arguments to be passed to torch.onnx.export
Returns:
the path to the exported model
"""
torch.onnx.export(
model,
dummy_input,
f"{model_name}.onnx",
input_names=["input"],
output_names=["logits"],
dynamic_axes={"input": {0: "batch_size"}, "logits": {0: "batch_size"}},
export_params=True,
opset_version=14, # minimum opset which support all operators we use (v0.5.2)
verbose=False,
**kwargs,
)
logging.info(f"Model exported to {model_name}.onnx")
return f"{model_name}.onnx"
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, List
from doctr.file_utils import is_tf_available
from doctr.models.preprocessor import PreProcessor
from .. import recognition
from .predictor import RecognitionPredictor
__all__ = ["recognition_predictor"]
ARCHS: List[str] = [
"crnn_vgg16_bn",
"crnn_mobilenet_v3_small",
"crnn_mobilenet_v3_large",
"sar_resnet31",
"master",
"vitstr_small",
"vitstr_base",
]
def _predictor(arch: Any, pretrained: bool, **kwargs: Any) -> RecognitionPredictor:
if isinstance(arch, str):
if arch not in ARCHS:
raise ValueError(f"unknown architecture '{arch}'")
_model = recognition.__dict__[arch](
pretrained=pretrained, pretrained_backbone=kwargs.get("pretrained_backbone", True)
)
else:
if not isinstance(arch, (recognition.CRNN, recognition.SAR, recognition.MASTER, recognition.ViTSTR)):
raise ValueError(f"unknown architecture: {type(arch)}")
_model = arch
kwargs.pop("pretrained_backbone", None)
kwargs["mean"] = kwargs.get("mean", _model.cfg["mean"])
kwargs["std"] = kwargs.get("std", _model.cfg["std"])
kwargs["batch_size"] = kwargs.get("batch_size", 32)
input_shape = _model.cfg["input_shape"][:2] if is_tf_available() else _model.cfg["input_shape"][-2:]
predictor = RecognitionPredictor(PreProcessor(input_shape, preserve_aspect_ratio=True, **kwargs), _model)
return predictor
def recognition_predictor(arch: Any = "crnn_vgg16_bn", pretrained: bool = False, **kwargs: Any) -> RecognitionPredictor:
"""Text recognition architecture.
Example::
>>> import numpy as np
>>> from doctr.models import recognition_predictor
>>> model = recognition_predictor(pretrained=True)
>>> input_page = (255 * np.random.rand(32, 128, 3)).astype(np.uint8)
>>> out = model([input_page])
Args:
arch: name of the architecture or model itself to use (e.g. 'crnn_vgg16_bn')
pretrained: If True, returns a model pre-trained on our text recognition dataset
Returns:
Recognition predictor
"""
return _predictor(arch, pretrained, **kwargs)
|
from .crnn import *
from .master import *
from .sar import *
from .vitstr import *
from .zoo import *
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import List, Tuple
import numpy as np
from doctr.datasets import encode_sequences
from doctr.utils.repr import NestedObject
__all__ = ["RecognitionPostProcessor", "RecognitionModel"]
class RecognitionModel(NestedObject):
"""Implements abstract RecognitionModel class"""
vocab: str
max_length: int
def build_target(
self,
gts: List[str],
) -> Tuple[np.ndarray, List[int]]:
"""Encode a list of gts sequences into a np array and gives the corresponding*
sequence lengths.
Args:
gts: list of ground-truth labels
Returns:
A tuple of 2 tensors: Encoded labels and sequence lengths (for each entry of the batch)
"""
encoded = encode_sequences(sequences=gts, vocab=self.vocab, target_size=self.max_length, eos=len(self.vocab))
seq_len = [len(word) for word in gts]
return encoded, seq_len
class RecognitionPostProcessor(NestedObject):
"""Abstract class to postprocess the raw output of the model
Args:
vocab: string containing the ordered sequence of supported characters
"""
def __init__(
self,
vocab: str,
) -> None:
self.vocab = vocab
self._embedding = list(self.vocab) + ["<eos>"]
def extra_repr(self) -> str:
return f"vocab_size={len(self.vocab)}"
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import List
from rapidfuzz.string_metric import levenshtein
__all__ = ["merge_strings", "merge_multi_strings"]
def merge_strings(a: str, b: str, dil_factor: float) -> str:
"""Merges 2 character sequences in the best way to maximize the alignment of their overlapping characters.
Args:
a: first char seq, suffix should be similar to b's prefix.
b: second char seq, prefix should be similar to a's suffix.
dil_factor: dilation factor of the boxes to overlap, should be > 1. This parameter is
only used when the mother sequence is splitted on a character repetition
Returns:
A merged character sequence.
Example::
>>> from doctr.model.recognition.utils import merge_sequences
>>> merge_sequences('abcd', 'cdefgh', 1.4)
'abcdefgh'
>>> merge_sequences('abcdi', 'cdefgh', 1.4)
'abcdefgh'
"""
seq_len = min(len(a), len(b))
if seq_len == 0: # One sequence is empty, return the other
return b if len(a) == 0 else b
# Initialize merging index and corresponding score (mean Levenstein)
min_score, index = 1.0, 0 # No overlap, just concatenate
scores = [levenshtein(a[-i:], b[:i], processor=None) / i for i in range(1, seq_len + 1)]
# Edge case (split in the middle of char repetitions): if it starts with 2 or more 0
if len(scores) > 1 and (scores[0], scores[1]) == (0, 0):
# Compute n_overlap (number of overlapping chars, geometrically determined)
n_overlap = round(len(b) * (dil_factor - 1) / dil_factor)
# Find the number of consecutive zeros in the scores list
# Impossible to have a zero after a non-zero score in that case
n_zeros = sum(val == 0 for val in scores)
# Index is bounded by the geometrical overlap to avoid collapsing repetitions
min_score, index = 0, min(n_zeros, n_overlap)
else: # Common case: choose the min score index
for i, score in enumerate(scores):
if score < min_score:
min_score, index = score, i + 1 # Add one because first index is an overlap of 1 char
# Merge with correct overlap
if index == 0:
return a + b
return a[:-1] + b[index - 1 :]
def merge_multi_strings(seq_list: List[str], dil_factor: float) -> str:
"""Recursively merges consecutive string sequences with overlapping characters.
Args:
seq_list: list of sequences to merge. Sequences need to be ordered from left to right.
dil_factor: dilation factor of the boxes to overlap, should be > 1. This parameter is
only used when the mother sequence is splitted on a character repetition
Returns:
A merged character sequence
Example::
>>> from doctr.model.recognition.utils import merge_multi_sequences
>>> merge_multi_sequences(['abc', 'bcdef', 'difghi', 'aijkl'], 1.4)
'abcdefghijkl'
"""
def _recursive_merge(a: str, seq_list: List[str], dil_factor: float) -> str:
# Recursive version of compute_overlap
if len(seq_list) == 1:
return merge_strings(a, seq_list[0], dil_factor)
return _recursive_merge(merge_strings(a, seq_list[0], dil_factor), seq_list[1:], dil_factor)
return _recursive_merge("", seq_list, dil_factor)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.