python_code
stringlengths 0
456k
|
---|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple, Union
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Model, Sequential
from doctr.datasets import VOCABS
from ...classification import mobilenet_v3_large_r, mobilenet_v3_small_r, vgg16_bn_r
from ...utils.tensorflow import load_pretrained_params
from ..core import RecognitionModel, RecognitionPostProcessor
__all__ = ["CRNN", "crnn_vgg16_bn", "crnn_mobilenet_v3_small", "crnn_mobilenet_v3_large"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"crnn_vgg16_bn": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 128, 3),
"vocab": VOCABS["legacy_french"],
"url": "https://doctr-static.mindee.com/models?id=v0.3.0/crnn_vgg16_bn-76b7f2c6.zip&src=0",
},
"crnn_mobilenet_v3_small": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 128, 3),
"vocab": VOCABS["french"],
"url": "https://doctr-static.mindee.com/models?id=v0.3.1/crnn_mobilenet_v3_small-7f36edec.zip&src=0",
},
"crnn_mobilenet_v3_large": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 128, 3),
"vocab": VOCABS["french"],
"url": None,
},
}
class CTCPostProcessor(RecognitionPostProcessor):
"""
Postprocess raw prediction of the model (logits) to a list of words using CTC decoding
Args:
vocab: string containing the ordered sequence of supported characters
ignore_case: if True, ignore case of letters
ignore_accents: if True, ignore accents of letters
"""
def __call__(
self,
logits: tf.Tensor,
beam_width: int = 1,
top_paths: int = 1,
) -> Union[List[Tuple[str, float]], List[Tuple[List[str], List[float]]]]:
"""
Performs decoding of raw output with CTC and decoding of CTC predictions
with label_to_idx mapping dictionnary
Args:
logits: raw output of the model, shape BATCH_SIZE X SEQ_LEN X NUM_CLASSES + 1
beam_width: An int scalar >= 0 (beam search beam width).
top_paths: An int scalar >= 0, <= beam_width (controls output size).
Returns:
A list of decoded words of length BATCH_SIZE
"""
# Decode CTC
_decoded, _log_prob = tf.nn.ctc_beam_search_decoder(
tf.transpose(logits, perm=[1, 0, 2]),
tf.fill(tf.shape(logits)[:1], tf.shape(logits)[1]),
beam_width=beam_width,
top_paths=top_paths,
)
_decoded = tf.sparse.concat(
1,
[tf.sparse.expand_dims(dec, axis=1) for dec in _decoded],
expand_nonconcat_dims=True,
) # dim : batchsize x beamwidth x actual_max_len_predictions
out_idxs = tf.sparse.to_dense(_decoded, default_value=len(self.vocab))
# Map it to characters
_decoded_strings_pred = tf.strings.reduce_join(
inputs=tf.nn.embedding_lookup(tf.constant(self._embedding, dtype=tf.string), out_idxs),
axis=-1,
)
_decoded_strings_pred = tf.strings.split(_decoded_strings_pred, "<eos>")
decoded_strings_pred = tf.sparse.to_dense(_decoded_strings_pred.to_sparse(), default_value="not valid")[
:, :, 0
] # dim : batch_size x beam_width
if top_paths == 1:
probs = tf.math.exp(tf.squeeze(_log_prob, axis=1)) # dim : batchsize
decoded_strings_pred = tf.squeeze(decoded_strings_pred, axis=1)
word_values = [word.decode() for word in decoded_strings_pred.numpy().tolist()]
else:
probs = tf.math.exp(_log_prob) # dim : batchsize x beamwidth
word_values = [[word.decode() for word in words] for words in decoded_strings_pred.numpy().tolist()]
return list(zip(word_values, probs.numpy().tolist()))
class CRNN(RecognitionModel, Model):
"""Implements a CRNN architecture as described in `"An End-to-End Trainable Neural Network for Image-based
Sequence Recognition and Its Application to Scene Text Recognition" <https://arxiv.org/pdf/1507.05717.pdf>`_.
Args:
feature_extractor: the backbone serving as feature extractor
vocab: vocabulary used for encoding
rnn_units: number of units in the LSTM layers
exportable: onnx exportable returns only logits
beam_width: beam width for beam search decoding
top_paths: number of top paths for beam search decoding
cfg: configuration dictionary
"""
_children_names: List[str] = ["feat_extractor", "decoder", "postprocessor"]
def __init__(
self,
feature_extractor: tf.keras.Model,
vocab: str,
rnn_units: int = 128,
exportable: bool = False,
beam_width: int = 1,
top_paths: int = 1,
cfg: Optional[Dict[str, Any]] = None,
) -> None:
# Initialize kernels
h, w, c = feature_extractor.output_shape[1:]
super().__init__()
self.vocab = vocab
self.max_length = w
self.cfg = cfg
self.exportable = exportable
self.feat_extractor = feature_extractor
self.decoder = Sequential(
[
layers.Bidirectional(layers.LSTM(units=rnn_units, return_sequences=True)),
layers.Bidirectional(layers.LSTM(units=rnn_units, return_sequences=True)),
layers.Dense(units=len(vocab) + 1),
]
)
self.decoder.build(input_shape=(None, w, h * c))
self.postprocessor = CTCPostProcessor(vocab=vocab)
self.beam_width = beam_width
self.top_paths = top_paths
def compute_loss(
self,
model_output: tf.Tensor,
target: List[str],
) -> tf.Tensor:
"""Compute CTC loss for the model.
Args:
model_output: predicted logits of the model
target: lengths of each gt word inside the batch
Returns:
The loss of the model on the batch
"""
gt, seq_len = self.build_target(target)
batch_len = model_output.shape[0]
input_length = tf.fill((batch_len,), model_output.shape[1])
ctc_loss = tf.nn.ctc_loss(
gt, model_output, seq_len, input_length, logits_time_major=False, blank_index=len(self.vocab)
)
return ctc_loss
def call(
self,
x: tf.Tensor,
target: Optional[List[str]] = None,
return_model_output: bool = False,
return_preds: bool = False,
beam_width: int = 1,
top_paths: int = 1,
**kwargs: Any,
) -> Dict[str, Any]:
if kwargs.get("training", False) and target is None:
raise ValueError("Need to provide labels during training")
features = self.feat_extractor(x, **kwargs)
# B x H x W x C --> B x W x H x C
transposed_feat = tf.transpose(features, perm=[0, 2, 1, 3])
w, h, c = transposed_feat.get_shape().as_list()[1:]
# B x W x H x C --> B x W x H * C
features_seq = tf.reshape(transposed_feat, shape=(-1, w, h * c))
logits = self.decoder(features_seq, **kwargs)
out: Dict[str, tf.Tensor] = {}
if self.exportable:
out["logits"] = logits
return out
if return_model_output:
out["out_map"] = logits
if target is None or return_preds:
# Post-process boxes
out["preds"] = self.postprocessor(logits, beam_width=beam_width, top_paths=top_paths)
if target is not None:
out["loss"] = self.compute_loss(logits, target)
return out
def _crnn(
arch: str,
pretrained: bool,
backbone_fn,
pretrained_backbone: bool = True,
input_shape: Optional[Tuple[int, int, int]] = None,
**kwargs: Any,
) -> CRNN:
pretrained_backbone = pretrained_backbone and not pretrained
kwargs["vocab"] = kwargs.get("vocab", default_cfgs[arch]["vocab"])
_cfg = deepcopy(default_cfgs[arch])
_cfg["vocab"] = kwargs["vocab"]
_cfg["input_shape"] = input_shape or default_cfgs[arch]["input_shape"]
feat_extractor = backbone_fn(
input_shape=_cfg["input_shape"],
include_top=False,
pretrained=pretrained_backbone,
)
# Build the model
model = CRNN(feat_extractor, cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, _cfg["url"])
return model
def crnn_vgg16_bn(pretrained: bool = False, **kwargs: Any) -> CRNN:
"""CRNN with a VGG-16 backbone as described in `"An End-to-End Trainable Neural Network for Image-based
Sequence Recognition and Its Application to Scene Text Recognition" <https://arxiv.org/pdf/1507.05717.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import crnn_vgg16_bn
>>> model = crnn_vgg16_bn(pretrained=True)
>>> input_tensor = tf.random.uniform(shape=[1, 32, 128, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text recognition dataset
Returns:
text recognition architecture
"""
return _crnn("crnn_vgg16_bn", pretrained, vgg16_bn_r, **kwargs)
def crnn_mobilenet_v3_small(pretrained: bool = False, **kwargs: Any) -> CRNN:
"""CRNN with a MobileNet V3 Small backbone as described in `"An End-to-End Trainable Neural Network for Image-based
Sequence Recognition and Its Application to Scene Text Recognition" <https://arxiv.org/pdf/1507.05717.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import crnn_mobilenet_v3_small
>>> model = crnn_mobilenet_v3_small(pretrained=True)
>>> input_tensor = tf.random.uniform(shape=[1, 32, 128, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text recognition dataset
Returns:
text recognition architecture
"""
return _crnn("crnn_mobilenet_v3_small", pretrained, mobilenet_v3_small_r, **kwargs)
def crnn_mobilenet_v3_large(pretrained: bool = False, **kwargs: Any) -> CRNN:
"""CRNN with a MobileNet V3 Large backbone as described in `"An End-to-End Trainable Neural Network for Image-based
Sequence Recognition and Its Application to Scene Text Recognition" <https://arxiv.org/pdf/1507.05717.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import crnn_mobilenet_v3_large
>>> model = crnn_mobilenet_v3_large(pretrained=True)
>>> input_tensor = tf.random.uniform(shape=[1, 32, 128, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text recognition dataset
Returns:
text recognition architecture
"""
return _crnn("crnn_mobilenet_v3_large", pretrained, mobilenet_v3_large_r, **kwargs)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from itertools import groupby
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torch import nn
from torch.nn import functional as F
from doctr.datasets import VOCABS, decode_sequence
from ...classification import mobilenet_v3_large_r, mobilenet_v3_small_r, vgg16_bn_r
from ...utils.pytorch import load_pretrained_params
from ..core import RecognitionModel, RecognitionPostProcessor
__all__ = ["CRNN", "crnn_vgg16_bn", "crnn_mobilenet_v3_small", "crnn_mobilenet_v3_large"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"crnn_vgg16_bn": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 128),
"vocab": VOCABS["legacy_french"],
"url": "https://doctr-static.mindee.com/models?id=v0.3.1/crnn_vgg16_bn-9762b0b0.pt&src=0",
},
"crnn_mobilenet_v3_small": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 128),
"vocab": VOCABS["french"],
"url": "https://doctr-static.mindee.com/models?id=v0.3.1/crnn_mobilenet_v3_small_pt-3b919a02.pt&src=0",
},
"crnn_mobilenet_v3_large": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 128),
"vocab": VOCABS["french"],
"url": "https://doctr-static.mindee.com/models?id=v0.3.1/crnn_mobilenet_v3_large_pt-f5259ec2.pt&src=0",
},
}
class CTCPostProcessor(RecognitionPostProcessor):
"""
Postprocess raw prediction of the model (logits) to a list of words using CTC decoding
Args:
vocab: string containing the ordered sequence of supported characters
"""
@staticmethod
def ctc_best_path(
logits: torch.Tensor,
vocab: str = VOCABS["french"],
blank: int = 0,
) -> List[Tuple[str, float]]:
"""Implements best path decoding as shown by Graves (Dissertation, p63), highly inspired from
<https://github.com/githubharald/CTCDecoder>`_.
Args:
logits: model output, shape: N x T x C
vocab: vocabulary to use
blank: index of blank label
Returns:
A list of tuples: (word, confidence)
"""
# Gather the most confident characters, and assign the smallest conf among those to the sequence prob
probs = F.softmax(logits, dim=-1).max(dim=-1).values.min(dim=1).values
# collapse best path (using itertools.groupby), map to chars, join char list to string
words = [
decode_sequence([k for k, _ in groupby(seq.tolist()) if k != blank], vocab)
for seq in torch.argmax(logits, dim=-1)
]
return list(zip(words, probs.tolist()))
def __call__(self, logits: torch.Tensor) -> List[Tuple[str, float]]:
"""
Performs decoding of raw output with CTC and decoding of CTC predictions
with label_to_idx mapping dictionnary
Args:
logits: raw output of the model, shape (N, C + 1, seq_len)
Returns:
A tuple of 2 lists: a list of str (words) and a list of float (probs)
"""
# Decode CTC
return self.ctc_best_path(logits=logits, vocab=self.vocab, blank=len(self.vocab))
class CRNN(RecognitionModel, nn.Module):
"""Implements a CRNN architecture as described in `"An End-to-End Trainable Neural Network for Image-based
Sequence Recognition and Its Application to Scene Text Recognition" <https://arxiv.org/pdf/1507.05717.pdf>`_.
Args:
feature_extractor: the backbone serving as feature extractor
vocab: vocabulary used for encoding
rnn_units: number of units in the LSTM layers
exportable: onnx exportable returns only logits
cfg: configuration dictionary
"""
_children_names: List[str] = ["feat_extractor", "decoder", "linear", "postprocessor"]
def __init__(
self,
feature_extractor: nn.Module,
vocab: str,
rnn_units: int = 128,
input_shape: Tuple[int, int, int] = (3, 32, 128),
exportable: bool = False,
cfg: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
self.vocab = vocab
self.cfg = cfg
self.max_length = 32
self.exportable = exportable
self.feat_extractor = feature_extractor
# Resolve the input_size of the LSTM
self.feat_extractor.eval()
with torch.no_grad():
out_shape = self.feat_extractor(torch.zeros((1, *input_shape))).shape
lstm_in = out_shape[1] * out_shape[2]
# Switch back to original mode
self.feat_extractor.train()
self.decoder = nn.LSTM(
input_size=lstm_in,
hidden_size=rnn_units,
batch_first=True,
num_layers=2,
bidirectional=True,
)
# features units = 2 * rnn_units because bidirectional layers
self.linear = nn.Linear(in_features=2 * rnn_units, out_features=len(vocab) + 1)
self.postprocessor = CTCPostProcessor(vocab=vocab)
for n, m in self.named_modules():
# Don't override the initialization of the backbone
if n.startswith("feat_extractor."):
continue
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
def compute_loss(
self,
model_output: torch.Tensor,
target: List[str],
) -> torch.Tensor:
"""Compute CTC loss for the model.
Args:
gt: the encoded tensor with gt labels
model_output: predicted logits of the model
seq_len: lengths of each gt word inside the batch
Returns:
The loss of the model on the batch
"""
gt, seq_len = self.build_target(target)
batch_len = model_output.shape[0]
input_length = model_output.shape[1] * torch.ones(size=(batch_len,), dtype=torch.int32)
# N x T x C -> T x N x C
logits = model_output.permute(1, 0, 2)
probs = F.log_softmax(logits, dim=-1)
ctc_loss = F.ctc_loss(
probs,
torch.from_numpy(gt),
input_length,
torch.tensor(seq_len, dtype=torch.int),
len(self.vocab),
zero_infinity=True,
)
return ctc_loss
def forward(
self,
x: torch.Tensor,
target: Optional[List[str]] = None,
return_model_output: bool = False,
return_preds: bool = False,
) -> Dict[str, Any]:
if self.training and target is None:
raise ValueError("Need to provide labels during training")
features = self.feat_extractor(x)
# B x C x H x W --> B x C*H x W --> B x W x C*H
c, h, w = features.shape[1], features.shape[2], features.shape[3]
features_seq = torch.reshape(features, shape=(-1, h * c, w))
features_seq = torch.transpose(features_seq, 1, 2)
logits, _ = self.decoder(features_seq)
logits = self.linear(logits)
out: Dict[str, Any] = {}
if self.exportable:
out["logits"] = logits
return out
if return_model_output:
out["out_map"] = logits
if target is None or return_preds:
# Post-process boxes
out["preds"] = self.postprocessor(logits)
if target is not None:
out["loss"] = self.compute_loss(logits, target)
return out
def _crnn(
arch: str,
pretrained: bool,
backbone_fn: Callable[[Any], nn.Module],
pretrained_backbone: bool = True,
ignore_keys: Optional[List[str]] = None,
**kwargs: Any,
) -> CRNN:
pretrained_backbone = pretrained_backbone and not pretrained
# Feature extractor
feat_extractor = backbone_fn(pretrained=pretrained_backbone).features # type: ignore[call-arg]
kwargs["vocab"] = kwargs.get("vocab", default_cfgs[arch]["vocab"])
kwargs["input_shape"] = kwargs.get("input_shape", default_cfgs[arch]["input_shape"])
_cfg = deepcopy(default_cfgs[arch])
_cfg["vocab"] = kwargs["vocab"]
_cfg["input_shape"] = kwargs["input_shape"]
# Build the model
model = CRNN(feat_extractor, cfg=_cfg, **kwargs) # type: ignore[arg-type]
# Load pretrained parameters
if pretrained:
# The number of classes is not the same as the number of classes in the pretrained model =>
# remove the last layer weights
_ignore_keys = ignore_keys if _cfg["vocab"] != default_cfgs[arch]["vocab"] else None
load_pretrained_params(model, _cfg["url"], ignore_keys=_ignore_keys)
return model
def crnn_vgg16_bn(pretrained: bool = False, **kwargs: Any) -> CRNN:
"""CRNN with a VGG-16 backbone as described in `"An End-to-End Trainable Neural Network for Image-based
Sequence Recognition and Its Application to Scene Text Recognition" <https://arxiv.org/pdf/1507.05717.pdf>`_.
>>> import torch
>>> from doctr.models import crnn_vgg16_bn
>>> model = crnn_vgg16_bn(pretrained=True)
>>> input_tensor = torch.rand(1, 3, 32, 128)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text recognition dataset
Returns:
text recognition architecture
"""
return _crnn("crnn_vgg16_bn", pretrained, vgg16_bn_r, ignore_keys=["linear.weight", "linear.bias"], **kwargs)
def crnn_mobilenet_v3_small(pretrained: bool = False, **kwargs: Any) -> CRNN:
"""CRNN with a MobileNet V3 Small backbone as described in `"An End-to-End Trainable Neural Network for Image-based
Sequence Recognition and Its Application to Scene Text Recognition" <https://arxiv.org/pdf/1507.05717.pdf>`_.
>>> import torch
>>> from doctr.models import crnn_mobilenet_v3_small
>>> model = crnn_mobilenet_v3_small(pretrained=True)
>>> input_tensor = torch.rand(1, 3, 32, 128)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text recognition dataset
Returns:
text recognition architecture
"""
return _crnn(
"crnn_mobilenet_v3_small",
pretrained,
mobilenet_v3_small_r,
ignore_keys=["linear.weight", "linear.bias"],
**kwargs,
)
def crnn_mobilenet_v3_large(pretrained: bool = False, **kwargs: Any) -> CRNN:
"""CRNN with a MobileNet V3 Large backbone as described in `"An End-to-End Trainable Neural Network for Image-based
Sequence Recognition and Its Application to Scene Text Recognition" <https://arxiv.org/pdf/1507.05717.pdf>`_.
>>> import torch
>>> from doctr.models import crnn_mobilenet_v3_large
>>> model = crnn_mobilenet_v3_large(pretrained=True)
>>> input_tensor = torch.rand(1, 3, 32, 128)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text recognition dataset
Returns:
text recognition architecture
"""
return _crnn(
"crnn_mobilenet_v3_large",
pretrained,
mobilenet_v3_large_r,
ignore_keys=["linear.weight", "linear.bias"],
**kwargs,
)
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
import tensorflow as tf
from tensorflow.keras import Model, layers
from doctr.datasets import VOCABS
from ...classification import vit_b, vit_s
from ...utils.tensorflow import load_pretrained_params
from .base import _ViTSTR, _ViTSTRPostProcessor
__all__ = ["ViTSTR", "vitstr_small", "vitstr_base"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"vitstr_small": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 128, 3),
"vocab": VOCABS["french"],
"url": None,
},
"vitstr_base": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 128, 3),
"vocab": VOCABS["french"],
"url": None,
},
}
class ViTSTR(_ViTSTR, Model):
"""Implements a ViTSTR architecture as described in `"Vision Transformer for Fast and
Efficient Scene Text Recognition" <https://arxiv.org/pdf/2105.08582.pdf>`_.
Args:
feature_extractor: the backbone serving as feature extractor
vocab: vocabulary used for encoding
embedding_units: number of embedding units
max_length: maximum word length handled by the model
dropout_prob: dropout probability for the encoder and decoder
input_shape: input shape of the image
exportable: onnx exportable returns only logits
cfg: dictionary containing information about the model
"""
_children_names: List[str] = ["feat_extractor", "postprocessor"]
def __init__(
self,
feature_extractor,
vocab: str,
embedding_units: int,
max_length: int = 25,
dropout_prob: float = 0.0,
input_shape: Tuple[int, int, int] = (32, 128, 3), # different from paper
exportable: bool = False,
cfg: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
self.vocab = vocab
self.exportable = exportable
self.cfg = cfg
# NOTE: different from paper, who uses eos also as pad token
self.max_length = max_length + 3 # Add 1 step for EOS, 1 for SOS, 1 for PAD
self.feat_extractor = feature_extractor
self.head = layers.Dense(len(self.vocab) + 3, name="head")
self.postprocessor = ViTSTRPostProcessor(vocab=self.vocab)
@staticmethod
def compute_loss(
model_output: tf.Tensor,
gt: tf.Tensor,
seq_len: List[int],
) -> tf.Tensor:
"""Compute categorical cross-entropy loss for the model.
Sequences are masked after the EOS character.
Args:
model_output: predicted logits of the model
gt: the encoded tensor with gt labels
seq_len: lengths of each gt word inside the batch
Returns:
The loss of the model on the batch
"""
# Input length : number of steps
input_len = tf.shape(model_output)[1]
# Add one for additional <eos> token (sos disappear in shift!)
seq_len = tf.cast(seq_len, tf.int32) + 1
# One-hot gt labels
oh_gt = tf.one_hot(gt, depth=model_output.shape[2])
# Compute loss: don't forget to shift gt! Otherwise the model learns to output the gt[t-1]!
# The "masked" first gt char is <sos>. Delete last logit of the model output.
cce = tf.nn.softmax_cross_entropy_with_logits(oh_gt[:, 1:, :], model_output[:, :-1, :])
# Compute mask
mask_values = tf.zeros_like(cce)
mask_2d = tf.sequence_mask(seq_len, input_len - 1) # delete the last mask timestep as well
masked_loss = tf.where(mask_2d, cce, mask_values)
ce_loss = tf.math.divide(tf.reduce_sum(masked_loss, axis=1), tf.cast(seq_len, model_output.dtype))
return tf.expand_dims(ce_loss, axis=1)
def call(
self,
x: tf.Tensor,
target: Optional[List[str]] = None,
return_model_output: bool = False,
return_preds: bool = False,
**kwargs: Any,
) -> Dict[str, Any]:
features = self.feat_extractor(x, **kwargs) # (batch_size, patches_seqlen, d_model)
if target is not None:
gt, seq_len = self.build_target(target)
seq_len = tf.cast(seq_len, tf.int32)
if kwargs.get("training", False) and target is None:
raise ValueError("Need to provide labels during training")
features = features[:, : self.max_length + 1] # add 1 for unused cls token (ViT)
# (batch_size, max_length + 1, d_model)
B, N, E = features.shape
features = tf.reshape(features, (B * N, E))
logits = tf.reshape(self.head(features), (B, N, len(self.vocab) + 3)) # (batch_size, max_length + 1, vocab + 3)
decoded_features = logits[:, 1:] # remove cls_token
out: Dict[str, tf.Tensor] = {}
if self.exportable:
out["logits"] = decoded_features
return out
if return_model_output:
out["out_map"] = decoded_features
if target is None or return_preds:
# Post-process boxes
out["preds"] = self.postprocessor(decoded_features)
if target is not None:
out["loss"] = self.compute_loss(decoded_features, gt, seq_len)
return out
class ViTSTRPostProcessor(_ViTSTRPostProcessor):
"""Post processor for ViTSTR architecture
Args:
vocab: string containing the ordered sequence of supported characters
"""
def __call__(
self,
logits: tf.Tensor,
) -> List[Tuple[str, float]]:
# compute pred with argmax for attention models
out_idxs = tf.math.argmax(logits, axis=2)
# N x L
probs = tf.gather(tf.nn.softmax(logits, axis=-1), out_idxs, axis=-1, batch_dims=2)
# Take the minimum confidence of the sequence
probs = tf.math.reduce_min(probs, axis=1)
# decode raw output of the model with tf_label_to_idx
out_idxs = tf.cast(out_idxs, dtype="int32")
embedding = tf.constant(self._embedding, dtype=tf.string)
decoded_strings_pred = tf.strings.reduce_join(inputs=tf.nn.embedding_lookup(embedding, out_idxs), axis=-1)
decoded_strings_pred = tf.strings.split(decoded_strings_pred, "<eos>")
decoded_strings_pred = tf.sparse.to_dense(decoded_strings_pred.to_sparse(), default_value="not valid")[:, 0]
word_values = [word.decode() for word in decoded_strings_pred.numpy().tolist()]
return list(zip(word_values, probs.numpy().tolist()))
def _vitstr(
arch: str,
pretrained: bool,
backbone_fn,
pretrained_backbone: bool = False, # NOTE: training from scratch without a pretrained backbone works better
input_shape: Optional[Tuple[int, int, int]] = None,
**kwargs: Any,
) -> ViTSTR:
pretrained_backbone = pretrained_backbone and not pretrained
# Patch the config
_cfg = deepcopy(default_cfgs[arch])
_cfg["input_shape"] = input_shape or _cfg["input_shape"]
_cfg["vocab"] = kwargs.get("vocab", _cfg["vocab"])
kwargs["vocab"] = _cfg["vocab"]
# Feature extractor
feat_extractor = backbone_fn(
pretrained=pretrained_backbone,
input_shape=_cfg["input_shape"],
include_top=False,
)
# Build the model
model = ViTSTR(feat_extractor, cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, default_cfgs[arch]["url"])
return model
def vitstr_small(pretrained: bool = False, **kwargs: Any) -> ViTSTR:
"""ViTSTR-Small as described in `"Vision Transformer for Fast and Efficient Scene Text Recognition"
<https://arxiv.org/pdf/2105.08582.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import vitstr_small
>>> model = vitstr_small(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 32, 128, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text recognition dataset
Returns:
text recognition architecture
"""
return _vitstr(
"vitstr_small",
pretrained,
vit_s,
embedding_units=384,
**kwargs,
)
def vitstr_base(pretrained: bool = False, **kwargs: Any) -> ViTSTR:
"""ViTSTR-Base as described in `"Vision Transformer for Fast and Efficient Scene Text Recognition"
<https://arxiv.org/pdf/2105.08582.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import vitstr_base
>>> model = vitstr_base(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 32, 128, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text recognition dataset
Returns:
text recognition architecture
"""
return _vitstr(
"vitstr_base",
pretrained,
vit_b,
embedding_units=768,
**kwargs,
)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.models._utils import IntermediateLayerGetter
from doctr.datasets import VOCABS
from ...classification import vit_b, vit_s
from ...utils.pytorch import load_pretrained_params
from .base import _ViTSTR, _ViTSTRPostProcessor
__all__ = ["ViTSTR", "vitstr_small", "vitstr_base"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"vitstr_small": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 128),
"vocab": VOCABS["french"],
"url": None,
},
"vitstr_base": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 128),
"vocab": VOCABS["french"],
"url": None,
},
}
class ViTSTR(_ViTSTR, nn.Module):
"""Implements a ViTSTR architecture as described in `"Vision Transformer for Fast and
Efficient Scene Text Recognition" <https://arxiv.org/pdf/2105.08582.pdf>`_.
Args:
feature_extractor: the backbone serving as feature extractor
vocab: vocabulary used for encoding
embedding_units: number of embedding units
max_length: maximum word length handled by the model
dropout_prob: dropout probability of the encoder LSTM
input_shape: input shape of the image
exportable: onnx exportable returns only logits
cfg: dictionary containing information about the model
"""
def __init__(
self,
feature_extractor,
vocab: str,
embedding_units: int,
max_length: int = 25,
input_shape: Tuple[int, int, int] = (3, 32, 128), # different from paper
exportable: bool = False,
cfg: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
self.vocab = vocab
self.exportable = exportable
self.cfg = cfg
# NOTE: different from paper, who uses eos also as pad token
self.max_length = max_length + 3 # Add 1 step for EOS, 1 for SOS, 1 for PAD
self.feat_extractor = feature_extractor
self.head = nn.Linear(embedding_units, len(self.vocab) + 3)
self.postprocessor = ViTSTRPostProcessor(vocab=self.vocab)
def forward(
self,
x: torch.Tensor,
target: Optional[List[str]] = None,
return_model_output: bool = False,
return_preds: bool = False,
) -> Dict[str, Any]:
features = self.feat_extractor(x)["features"] # (batch_size, patches_seqlen, d_model)
if target is not None:
_gt, _seq_len = self.build_target(target)
gt, seq_len = torch.from_numpy(_gt).to(dtype=torch.long), torch.tensor(_seq_len)
gt, seq_len = gt.to(x.device), seq_len.to(x.device)
if self.training and target is None:
raise ValueError("Need to provide labels during training")
# borrowed from : https://github.com/baudm/parseq/blob/main/strhub/models/vitstr/model.py
features = features[:, : self.max_length + 1] # add 1 for unused cls token (ViT)
# (batch_size, max_length + 1, d_model)
B, N, E = features.size()
features = features.reshape(B * N, E)
logits = self.head(features).view(B, N, len(self.vocab) + 3) # (batch_size, max_length + 1, vocab + 3)
decoded_features = logits[:, 1:] # remove cls_token
out: Dict[str, Any] = {}
if self.exportable:
out["logits"] = decoded_features
return out
if return_model_output:
out["out_map"] = decoded_features
if target is None or return_preds:
# Post-process boxes
out["preds"] = self.postprocessor(decoded_features)
if target is not None:
out["loss"] = self.compute_loss(decoded_features, gt, seq_len)
return out
@staticmethod
def compute_loss(
model_output: torch.Tensor,
gt: torch.Tensor,
seq_len: torch.Tensor,
) -> torch.Tensor:
"""Compute categorical cross-entropy loss for the model.
Sequences are masked after the EOS character.
Args:
model_output: predicted logits of the model
gt: the encoded tensor with gt labels
seq_len: lengths of each gt word inside the batch
Returns:
The loss of the model on the batch
"""
# Input length : number of steps
input_len = model_output.shape[1]
# Add one for additional <eos> token (sos disappear in shift!)
seq_len = seq_len + 1
# Compute loss: don't forget to shift gt! Otherwise the model learns to output the gt[t-1]!
# The "masked" first gt char is <sos>. Delete last logit of the model output.
cce = F.cross_entropy(model_output[:, :-1, :].permute(0, 2, 1), gt[:, 1:], reduction="none")
# Compute mask, remove 1 timestep here as well
mask_2d = torch.arange(input_len - 1, device=model_output.device)[None, :] >= seq_len[:, None]
cce[mask_2d] = 0
ce_loss = cce.sum(1) / seq_len.to(dtype=model_output.dtype)
return ce_loss.mean()
class ViTSTRPostProcessor(_ViTSTRPostProcessor):
"""Post processor for ViTSTR architecture
Args:
vocab: string containing the ordered sequence of supported characters
"""
def __call__(
self,
logits: torch.Tensor,
) -> List[Tuple[str, float]]:
# compute pred with argmax for attention models
out_idxs = logits.argmax(-1)
# N x L
probs = torch.gather(torch.softmax(logits, -1), -1, out_idxs.unsqueeze(-1)).squeeze(-1)
# Take the minimum confidence of the sequence
probs = probs.min(dim=1).values.detach().cpu()
# Manual decoding
word_values = [
"".join(self._embedding[idx] for idx in encoded_seq).split("<eos>")[0]
for encoded_seq in out_idxs.cpu().numpy()
]
return list(zip(word_values, probs.numpy().tolist()))
def _vitstr(
arch: str,
pretrained: bool,
backbone_fn: Callable[[bool], nn.Module],
layer: str,
pretrained_backbone: bool = False, # NOTE: training from scratch without a pretrained backbone works better
ignore_keys: Optional[List[str]] = None,
**kwargs: Any,
) -> ViTSTR:
pretrained_backbone = pretrained_backbone and not pretrained
# Patch the config
_cfg = deepcopy(default_cfgs[arch])
_cfg["vocab"] = kwargs.get("vocab", _cfg["vocab"])
_cfg["input_shape"] = kwargs.get("input_shape", _cfg["input_shape"])
kwargs["vocab"] = _cfg["vocab"]
kwargs["input_shape"] = _cfg["input_shape"]
# Feature extractor
feat_extractor = IntermediateLayerGetter(
backbone_fn(pretrained_backbone, input_shape=_cfg["input_shape"]), # type: ignore[call-arg]
{layer: "features"},
)
# Build the model
model = ViTSTR(feat_extractor, cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
# The number of classes is not the same as the number of classes in the pretrained model =>
# remove the last layer weights
_ignore_keys = ignore_keys if _cfg["vocab"] != default_cfgs[arch]["vocab"] else None
load_pretrained_params(model, default_cfgs[arch]["url"], ignore_keys=_ignore_keys)
return model
def vitstr_small(pretrained: bool = False, **kwargs: Any) -> ViTSTR:
"""ViTSTR-Small as described in `"Vision Transformer for Fast and Efficient Scene Text Recognition"
<https://arxiv.org/pdf/2105.08582.pdf>`_.
>>> import torch
>>> from doctr.models import vitstr_small
>>> model = vitstr_small(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 32, 128))
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text recognition dataset
Returns:
text recognition architecture
"""
return _vitstr(
"vitstr_small",
pretrained,
vit_s,
"1",
embedding_units=384,
ignore_keys=["head.weight", "head.bias"],
**kwargs,
)
def vitstr_base(pretrained: bool = False, **kwargs: Any) -> ViTSTR:
"""ViTSTR-Base as described in `"Vision Transformer for Fast and Efficient Scene Text Recognition"
<https://arxiv.org/pdf/2105.08582.pdf>`_.
>>> import torch
>>> from doctr.models import vitstr_base
>>> model = vitstr_base(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 32, 128))
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text recognition dataset
Returns:
text recognition architecture
"""
return _vitstr(
"vitstr_base",
pretrained,
vit_b,
"1",
embedding_units=768,
ignore_keys=["head.weight", "head.bias"],
**kwargs,
)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import List, Tuple
import numpy as np
from ....datasets import encode_sequences
from ..core import RecognitionPostProcessor
class _ViTSTR:
vocab: str
max_length: int
def build_target(
self,
gts: List[str],
) -> Tuple[np.ndarray, List[int]]:
"""Encode a list of gts sequences into a np array and gives the corresponding*
sequence lengths.
Args:
gts: list of ground-truth labels
Returns:
A tuple of 2 tensors: Encoded labels and sequence lengths (for each entry of the batch)
"""
encoded = encode_sequences(
sequences=gts,
vocab=self.vocab,
target_size=self.max_length,
eos=len(self.vocab),
sos=len(self.vocab) + 1,
pad=len(self.vocab) + 2,
)
seq_len = [len(word) for word in gts]
return encoded, seq_len
class _ViTSTRPostProcessor(RecognitionPostProcessor):
"""Abstract class to postprocess the raw output of the model
Args:
vocab: string containing the ordered sequence of supported characters
"""
def __init__(
self,
vocab: str,
) -> None:
super().__init__(vocab)
self._embedding = list(vocab) + ["<eos>", "<sos>", "<pad>"]
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
import tensorflow as tf
from tensorflow.keras import Model, layers
from doctr.datasets import VOCABS
from doctr.models.classification import magc_resnet31
from doctr.models.modules.transformer import Decoder, PositionalEncoding
from ...utils.tensorflow import load_pretrained_params
from .base import _MASTER, _MASTERPostProcessor
__all__ = ["MASTER", "master"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"master": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 128, 3),
"vocab": VOCABS["french"],
"url": None,
},
}
class MASTER(_MASTER, Model):
"""Implements MASTER as described in paper: <https://arxiv.org/pdf/1910.02562.pdf>`_.
Implementation based on the official TF implementation: <https://github.com/jiangxiluning/MASTER-TF>`_.
Args:
feature_extractor: the backbone serving as feature extractor
vocab: vocabulary, (without EOS, SOS, PAD)
d_model: d parameter for the transformer decoder
dff: depth of the pointwise feed-forward layer
num_heads: number of heads for the mutli-head attention module
num_layers: number of decoder layers to stack
max_length: maximum length of character sequence handled by the model
dropout: dropout probability of the decoder
input_shape: size of the image inputs
exportable: onnx exportable returns only logits
cfg: dictionary containing information about the model
"""
def __init__(
self,
feature_extractor: tf.keras.Model,
vocab: str,
d_model: int = 512,
dff: int = 2048,
num_heads: int = 8, # number of heads in the transformer decoder
num_layers: int = 3,
max_length: int = 50,
dropout: float = 0.2,
input_shape: Tuple[int, int, int] = (32, 128, 3), # different from the paper
exportable: bool = False,
cfg: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
self.exportable = exportable
self.max_length = max_length
self.d_model = d_model
self.vocab = vocab
self.cfg = cfg
self.vocab_size = len(vocab)
self.feat_extractor = feature_extractor
self.positional_encoding = PositionalEncoding(self.d_model, dropout, max_len=input_shape[0] * input_shape[1])
self.decoder = Decoder(
num_layers=num_layers,
d_model=self.d_model,
num_heads=num_heads,
vocab_size=self.vocab_size + 3, # EOS, SOS, PAD
dff=dff,
dropout=dropout,
maximum_position_encoding=self.max_length,
)
self.linear = layers.Dense(self.vocab_size + 3, kernel_initializer=tf.initializers.he_uniform())
self.postprocessor = MASTERPostProcessor(vocab=self.vocab)
@tf.function
def make_source_and_target_mask(self, source: tf.Tensor, target: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
# [1, 1, 1, ..., 0, 0, 0] -> 0 is masked
# (N, 1, 1, max_length)
target_pad_mask = tf.cast(tf.math.not_equal(target, self.vocab_size + 2), dtype=tf.uint8)
target_pad_mask = target_pad_mask[:, tf.newaxis, tf.newaxis, :]
target_length = target.shape[1]
# sub mask filled diagonal with 1 = see 0 = masked (max_length, max_length)
target_sub_mask = tf.linalg.band_part(tf.ones((target_length, target_length)), -1, 0)
# source mask filled with ones (max_length, positional_encoded_seq_len)
source_mask = tf.ones((target_length, source.shape[1]))
# combine the two masks into one boolean mask where False is masked (N, 1, max_length, max_length)
target_mask = tf.math.logical_and(
tf.cast(target_sub_mask, dtype=tf.bool), tf.cast(target_pad_mask, dtype=tf.bool)
)
return source_mask, target_mask
@staticmethod
def compute_loss(
model_output: tf.Tensor,
gt: tf.Tensor,
seq_len: List[int],
) -> tf.Tensor:
"""Compute categorical cross-entropy loss for the model.
Sequences are masked after the EOS character.
Args:
gt: the encoded tensor with gt labels
model_output: predicted logits of the model
seq_len: lengths of each gt word inside the batch
Returns:
The loss of the model on the batch
"""
# Input length : number of timesteps
input_len = tf.shape(model_output)[1]
# Add one for additional <eos> token (sos disappear in shift!)
seq_len = tf.cast(seq_len, tf.int32) + 1
# One-hot gt labels
oh_gt = tf.one_hot(gt, depth=model_output.shape[2])
# Compute loss: don't forget to shift gt! Otherwise the model learns to output the gt[t-1]!
# The "masked" first gt char is <sos>. Delete last logit of the model output.
cce = tf.nn.softmax_cross_entropy_with_logits(oh_gt[:, 1:, :], model_output[:, :-1, :])
# Compute mask
mask_values = tf.zeros_like(cce)
mask_2d = tf.sequence_mask(seq_len, input_len - 1) # delete the last mask timestep as well
masked_loss = tf.where(mask_2d, cce, mask_values)
ce_loss = tf.math.divide(tf.reduce_sum(masked_loss, axis=1), tf.cast(seq_len, model_output.dtype))
return tf.expand_dims(ce_loss, axis=1)
def call(
self,
x: tf.Tensor,
target: Optional[List[str]] = None,
return_model_output: bool = False,
return_preds: bool = False,
**kwargs: Any,
) -> Dict[str, Any]:
"""Call function for training
Args:
x: images
target: list of str labels
return_model_output: if True, return logits
return_preds: if True, decode logits
Return:
A dictionnary containing eventually loss, logits and predictions.
"""
# Encode
feature = self.feat_extractor(x, **kwargs)
b, h, w, c = feature.get_shape()
# (N, H, W, C) --> (N, H * W, C)
feature = tf.reshape(feature, shape=(b, h * w, c))
# add positional encoding to features
encoded = self.positional_encoding(feature, **kwargs)
out: Dict[str, tf.Tensor] = {}
if kwargs.get("training", False) and target is None:
raise ValueError("Need to provide labels during training")
if target is not None:
# Compute target: tensor of gts and sequence lengths
gt, seq_len = self.build_target(target)
# Compute decoder masks
source_mask, target_mask = self.make_source_and_target_mask(encoded, gt)
# Compute logits
output = self.decoder(gt, encoded, source_mask, target_mask, **kwargs)
logits = self.linear(output, **kwargs)
else:
logits = self.decode(encoded, **kwargs)
if self.exportable:
out["logits"] = logits
return out
if target is not None:
out["loss"] = self.compute_loss(logits, gt, seq_len)
if return_model_output:
out["out_map"] = logits
if return_preds:
out["preds"] = self.postprocessor(logits)
return out
@tf.function
def decode(self, encoded: tf.Tensor, **kwargs: Any) -> tf.Tensor:
"""Decode function for prediction
Args:
encoded: encoded features
Return:
A Tuple of tf.Tensor: predictions, logits
"""
b = encoded.shape[0]
start_symbol = tf.constant(self.vocab_size + 1, dtype=tf.int32) # SOS
padding_symbol = tf.constant(self.vocab_size + 2, dtype=tf.int32) # PAD
ys = tf.fill(dims=(b, self.max_length - 1), value=padding_symbol)
start_vector = tf.fill(dims=(b, 1), value=start_symbol)
ys = tf.concat([start_vector, ys], axis=-1)
# Final dimension include EOS/SOS/PAD
for i in range(self.max_length - 1):
source_mask, target_mask = self.make_source_and_target_mask(encoded, ys)
output = self.decoder(ys, encoded, source_mask, target_mask, **kwargs)
logits = self.linear(output, **kwargs)
prob = tf.nn.softmax(logits, axis=-1)
next_token = tf.argmax(prob, axis=-1, output_type=ys.dtype)
# update ys with the next token and ignore the first token (SOS)
i_mesh, j_mesh = tf.meshgrid(tf.range(b), tf.range(self.max_length), indexing="ij")
indices = tf.stack([i_mesh[:, i + 1], j_mesh[:, i + 1]], axis=1)
ys = tf.tensor_scatter_nd_update(ys, indices, next_token[:, i])
# Shape (N, max_length, vocab_size + 1)
return logits
class MASTERPostProcessor(_MASTERPostProcessor):
"""Post processor for MASTER architectures
Args:
vocab: string containing the ordered sequence of supported characters
"""
def __call__(
self,
logits: tf.Tensor,
) -> List[Tuple[str, float]]:
# compute pred with argmax for attention models
out_idxs = tf.math.argmax(logits, axis=2)
# N x L
probs = tf.gather(tf.nn.softmax(logits, axis=-1), out_idxs, axis=-1, batch_dims=2)
# Take the minimum confidence of the sequence
probs = tf.math.reduce_min(probs, axis=1)
# decode raw output of the model with tf_label_to_idx
out_idxs = tf.cast(out_idxs, dtype="int32")
embedding = tf.constant(self._embedding, dtype=tf.string)
decoded_strings_pred = tf.strings.reduce_join(inputs=tf.nn.embedding_lookup(embedding, out_idxs), axis=-1)
decoded_strings_pred = tf.strings.split(decoded_strings_pred, "<eos>")
decoded_strings_pred = tf.sparse.to_dense(decoded_strings_pred.to_sparse(), default_value="not valid")[:, 0]
word_values = [word.decode() for word in decoded_strings_pred.numpy().tolist()]
return list(zip(word_values, probs.numpy().tolist()))
def _master(arch: str, pretrained: bool, backbone_fn, pretrained_backbone: bool = True, **kwargs: Any) -> MASTER:
pretrained_backbone = pretrained_backbone and not pretrained
# Patch the config
_cfg = deepcopy(default_cfgs[arch])
_cfg["input_shape"] = kwargs.get("input_shape", _cfg["input_shape"])
_cfg["vocab"] = kwargs.get("vocab", _cfg["vocab"])
kwargs["vocab"] = _cfg["vocab"]
kwargs["input_shape"] = _cfg["input_shape"]
# Build the model
model = MASTER(
backbone_fn(pretrained=pretrained_backbone, input_shape=_cfg["input_shape"], include_top=False),
cfg=_cfg,
**kwargs,
)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, default_cfgs[arch]["url"])
return model
def master(pretrained: bool = False, **kwargs: Any) -> MASTER:
"""MASTER as described in paper: <https://arxiv.org/pdf/1910.02562.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import master
>>> model = master(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 32, 128, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text recognition dataset
Returns:
text recognition architecture
"""
return _master("master", pretrained, magc_resnet31, **kwargs)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.models._utils import IntermediateLayerGetter
from doctr.datasets import VOCABS
from doctr.models.classification import magc_resnet31
from doctr.models.modules.transformer import Decoder, PositionalEncoding
from ...utils.pytorch import load_pretrained_params
from .base import _MASTER, _MASTERPostProcessor
__all__ = ["MASTER", "master"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"master": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 128),
"vocab": VOCABS["french"],
"url": None,
},
}
class MASTER(_MASTER, nn.Module):
"""Implements MASTER as described in paper: <https://arxiv.org/pdf/1910.02562.pdf>`_.
Implementation based on the official Pytorch implementation: <https://github.com/wenwenyu/MASTER-pytorch>`_.
Args:
feature_extractor: the backbone serving as feature extractor
vocab: vocabulary, (without EOS, SOS, PAD)
d_model: d parameter for the transformer decoder
dff: depth of the pointwise feed-forward layer
num_heads: number of heads for the mutli-head attention module
num_layers: number of decoder layers to stack
max_length: maximum length of character sequence handled by the model
dropout: dropout probability of the decoder
input_shape: size of the image inputs
exportable: onnx exportable returns only logits
cfg: dictionary containing information about the model
"""
def __init__(
self,
feature_extractor: nn.Module,
vocab: str,
d_model: int = 512,
dff: int = 2048,
num_heads: int = 8, # number of heads in the transformer decoder
num_layers: int = 3,
max_length: int = 50,
dropout: float = 0.2,
input_shape: Tuple[int, int, int] = (3, 32, 128), # different from the paper
exportable: bool = False,
cfg: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
self.exportable = exportable
self.max_length = max_length
self.d_model = d_model
self.vocab = vocab
self.cfg = cfg
self.vocab_size = len(vocab)
self.feat_extractor = feature_extractor
self.positional_encoding = PositionalEncoding(self.d_model, dropout, max_len=input_shape[1] * input_shape[2])
self.decoder = Decoder(
num_layers=num_layers,
d_model=self.d_model,
num_heads=num_heads,
vocab_size=self.vocab_size + 3, # EOS, SOS, PAD
dff=dff,
dropout=dropout,
maximum_position_encoding=self.max_length,
)
self.linear = nn.Linear(self.d_model, self.vocab_size + 3)
self.postprocessor = MASTERPostProcessor(vocab=self.vocab)
for n, m in self.named_modules():
# Don't override the initialization of the backbone
if n.startswith("feat_extractor."):
continue
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def make_source_and_target_mask(
self, source: torch.Tensor, target: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
# borrowed and slightly modified from https://github.com/wenwenyu/MASTER-pytorch
# NOTE: nn.TransformerDecoder takes the inverse from this implementation
# [True, True, True, ..., False, False, False] -> False is masked
target_pad_mask = (target != self.vocab_size + 2).unsqueeze(1).unsqueeze(1) # (N, 1, 1, max_length)
target_length = target.size(1)
# sub mask filled diagonal with True = see and False = masked (max_length, max_length)
# NOTE: onnxruntime tril/triu works only with float currently (onnxruntime 1.11.1 - opset 14)
target_sub_mask = torch.tril(torch.ones((target_length, target_length), device=source.device), diagonal=0).to(
dtype=torch.bool
)
# source mask filled with ones (max_length, positional_encoded_seq_len)
source_mask = torch.ones((target_length, source.size(1)), dtype=torch.uint8, device=source.device)
# combine the two masks into one (N, 1, max_length, max_length)
target_mask = target_pad_mask & target_sub_mask
return source_mask, target_mask.int()
@staticmethod
def compute_loss(
model_output: torch.Tensor,
gt: torch.Tensor,
seq_len: torch.Tensor,
) -> torch.Tensor:
"""Compute categorical cross-entropy loss for the model.
Sequences are masked after the EOS character.
Args:
gt: the encoded tensor with gt labels
model_output: predicted logits of the model
seq_len: lengths of each gt word inside the batch
Returns:
The loss of the model on the batch
"""
# Input length : number of timesteps
input_len = model_output.shape[1]
# Add one for additional <eos> token (sos disappear in shift!)
seq_len = seq_len + 1
# Compute loss: don't forget to shift gt! Otherwise the model learns to output the gt[t-1]!
# The "masked" first gt char is <sos>. Delete last logit of the model output.
cce = F.cross_entropy(model_output[:, :-1, :].permute(0, 2, 1), gt[:, 1:], reduction="none")
# Compute mask, remove 1 timestep here as well
mask_2d = torch.arange(input_len - 1, device=model_output.device)[None, :] >= seq_len[:, None]
cce[mask_2d] = 0
ce_loss = cce.sum(1) / seq_len.to(dtype=model_output.dtype)
return ce_loss.mean()
def forward(
self,
x: torch.Tensor,
target: Optional[List[str]] = None,
return_model_output: bool = False,
return_preds: bool = False,
) -> Dict[str, Any]:
"""Call function for training
Args:
x: images
target: list of str labels
return_model_output: if True, return logits
return_preds: if True, decode logits
Returns:
A dictionnary containing eventually loss, logits and predictions.
"""
# Encode
features = self.feat_extractor(x)["features"]
b, c, h, w = features.shape
# (N, C, H, W) --> (N, H * W, C)
features = features.view(b, c, h * w).permute((0, 2, 1))
# add positional encoding to features
encoded = self.positional_encoding(features)
out: Dict[str, Any] = {}
if self.training and target is None:
raise ValueError("Need to provide labels during training")
if target is not None:
# Compute target: tensor of gts and sequence lengths
_gt, _seq_len = self.build_target(target)
gt, seq_len = torch.from_numpy(_gt).to(dtype=torch.long), torch.tensor(_seq_len)
gt, seq_len = gt.to(x.device), seq_len.to(x.device)
# Compute source mask and target mask
source_mask, target_mask = self.make_source_and_target_mask(encoded, gt)
output = self.decoder(gt, encoded, source_mask, target_mask)
# Compute logits
logits = self.linear(output)
else:
logits = self.decode(encoded)
if self.exportable:
out["logits"] = logits
return out
if target is not None:
out["loss"] = self.compute_loss(logits, gt, seq_len)
if return_model_output:
out["out_map"] = logits
if return_preds:
out["preds"] = self.postprocessor(logits)
return out
def decode(self, encoded: torch.Tensor) -> torch.Tensor:
"""Decode function for prediction
Args:
encoded: input tensor
Return:
A Tuple of torch.Tensor: predictions, logits
"""
b = encoded.size(0)
# Padding symbol + SOS at the beginning
ys = torch.full((b, self.max_length), self.vocab_size + 2, dtype=torch.long, device=encoded.device) # pad
ys[:, 0] = self.vocab_size + 1 # sos
# Final dimension include EOS/SOS/PAD
for i in range(self.max_length - 1):
source_mask, target_mask = self.make_source_and_target_mask(encoded, ys)
output = self.decoder(ys, encoded, source_mask, target_mask)
logits = self.linear(output)
prob = torch.softmax(logits, dim=-1)
next_token = torch.max(prob, dim=-1).indices
# update ys with the next token and ignore the first token (SOS)
ys[:, i + 1] = next_token[:, i]
# Shape (N, max_length, vocab_size + 1)
return logits
class MASTERPostProcessor(_MASTERPostProcessor):
"""Post processor for MASTER architectures"""
def __call__(
self,
logits: torch.Tensor,
) -> List[Tuple[str, float]]:
# compute pred with argmax for attention models
out_idxs = logits.argmax(-1)
# N x L
probs = torch.gather(torch.softmax(logits, -1), -1, out_idxs.unsqueeze(-1)).squeeze(-1)
# Take the minimum confidence of the sequence
probs = probs.min(dim=1).values.detach().cpu()
# Manual decoding
word_values = [
"".join(self._embedding[idx] for idx in encoded_seq).split("<eos>")[0]
for encoded_seq in out_idxs.cpu().numpy()
]
return list(zip(word_values, probs.numpy().tolist()))
def _master(
arch: str,
pretrained: bool,
backbone_fn: Callable[[bool], nn.Module],
layer: str,
pretrained_backbone: bool = True,
ignore_keys: Optional[List[str]] = None,
**kwargs: Any,
) -> MASTER:
pretrained_backbone = pretrained_backbone and not pretrained
# Patch the config
_cfg = deepcopy(default_cfgs[arch])
_cfg["input_shape"] = kwargs.get("input_shape", _cfg["input_shape"])
_cfg["vocab"] = kwargs.get("vocab", _cfg["vocab"])
kwargs["vocab"] = _cfg["vocab"]
kwargs["input_shape"] = _cfg["input_shape"]
# Build the model
feat_extractor = IntermediateLayerGetter(
backbone_fn(pretrained_backbone),
{layer: "features"},
)
model = MASTER(feat_extractor, cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
# The number of classes is not the same as the number of classes in the pretrained model =>
# remove the last layer weights
_ignore_keys = ignore_keys if _cfg["vocab"] != default_cfgs[arch]["vocab"] else None
load_pretrained_params(model, default_cfgs[arch]["url"], ignore_keys=_ignore_keys)
return model
def master(pretrained: bool = False, **kwargs: Any) -> MASTER:
"""MASTER as described in paper: <https://arxiv.org/pdf/1910.02562.pdf>`_.
>>> import torch
>>> from doctr.models import master
>>> model = master(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 32, 128))
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text recognition dataset
Returns:
text recognition architecture
"""
return _master(
"master",
pretrained,
magc_resnet31,
"10",
ignore_keys=[
"decoder.embed.weight",
"linear.weight",
"linear.bias",
],
**kwargs,
)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import List, Tuple
import numpy as np
from ....datasets import encode_sequences
from ..core import RecognitionPostProcessor
class _MASTER:
vocab: str
max_length: int
def build_target(
self,
gts: List[str],
) -> Tuple[np.ndarray, List[int]]:
"""Encode a list of gts sequences into a np array and gives the corresponding*
sequence lengths.
Args:
gts: list of ground-truth labels
Returns:
A tuple of 2 tensors: Encoded labels and sequence lengths (for each entry of the batch)
"""
encoded = encode_sequences(
sequences=gts,
vocab=self.vocab,
target_size=self.max_length,
eos=len(self.vocab),
sos=len(self.vocab) + 1,
pad=len(self.vocab) + 2,
)
seq_len = [len(word) for word in gts]
return encoded, seq_len
class _MASTERPostProcessor(RecognitionPostProcessor):
"""Abstract class to postprocess the raw output of the model
Args:
vocab: string containing the ordered sequence of supported characters
"""
def __init__(
self,
vocab: str,
) -> None:
super().__init__(vocab)
self._embedding = list(vocab) + ["<eos>"] + ["<sos>"] + ["<pad>"]
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
import tensorflow as tf
from tensorflow.keras import Model, Sequential, layers
from doctr.datasets import VOCABS
from doctr.utils.repr import NestedObject
from ...classification import resnet31
from ...utils.tensorflow import load_pretrained_params
from ..core import RecognitionModel, RecognitionPostProcessor
__all__ = ["SAR", "sar_resnet31"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"sar_resnet31": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (32, 128, 3),
"vocab": VOCABS["french"],
"url": None,
},
}
class SAREncoder(layers.Layer, NestedObject):
"""Implements encoder module of the SAR model
Args:
rnn_units: number of hidden rnn units
dropout_prob: dropout probability
"""
def __init__(self, rnn_units: int, dropout_prob: float = 0.0) -> None:
super().__init__()
self.rnn = Sequential(
[
layers.LSTM(units=rnn_units, return_sequences=True, recurrent_dropout=dropout_prob),
layers.LSTM(units=rnn_units, return_sequences=False, recurrent_dropout=dropout_prob),
]
)
def call(
self,
x: tf.Tensor,
**kwargs: Any,
) -> tf.Tensor:
# (N, C)
return self.rnn(x, **kwargs)
class AttentionModule(layers.Layer, NestedObject):
"""Implements attention module of the SAR model
Args:
attention_units: number of hidden attention units
"""
def __init__(self, attention_units: int) -> None:
super().__init__()
self.hidden_state_projector = layers.Conv2D(
attention_units,
1,
strides=1,
use_bias=False,
padding="same",
kernel_initializer="he_normal",
)
self.features_projector = layers.Conv2D(
attention_units,
3,
strides=1,
use_bias=True,
padding="same",
kernel_initializer="he_normal",
)
self.attention_projector = layers.Conv2D(
1,
1,
strides=1,
use_bias=False,
padding="same",
kernel_initializer="he_normal",
)
self.flatten = layers.Flatten()
def call(
self,
features: tf.Tensor,
hidden_state: tf.Tensor,
**kwargs: Any,
) -> tf.Tensor:
[H, W] = features.get_shape().as_list()[1:3]
# shape (N, H, W, vgg_units) -> (N, H, W, attention_units)
features_projection = self.features_projector(features, **kwargs)
# shape (N, 1, 1, rnn_units) -> (N, 1, 1, attention_units)
hidden_state = tf.expand_dims(tf.expand_dims(hidden_state, axis=1), axis=1)
hidden_state_projection = self.hidden_state_projector(hidden_state, **kwargs)
projection = tf.math.tanh(hidden_state_projection + features_projection)
# shape (N, H, W, attention_units) -> (N, H, W, 1)
attention = self.attention_projector(projection, **kwargs)
# shape (N, H, W, 1) -> (N, H * W)
attention = self.flatten(attention)
attention = tf.nn.softmax(attention)
# shape (N, H * W) -> (N, H, W, 1)
attention_map = tf.reshape(attention, [-1, H, W, 1])
glimpse = tf.math.multiply(features, attention_map)
# shape (N, H * W) -> (N, C)
return tf.reduce_sum(glimpse, axis=[1, 2])
class SARDecoder(layers.Layer, NestedObject):
"""Implements decoder module of the SAR model
Args:
rnn_units: number of hidden units in recurrent cells
max_length: maximum length of a sequence
vocab_size: number of classes in the model alphabet
embedding_units: number of hidden embedding units
attention_units: number of hidden attention units
num_decoder_cells: number of LSTMCell layers to stack
dropout_prob: dropout probability
"""
def __init__(
self,
rnn_units: int,
max_length: int,
vocab_size: int,
embedding_units: int,
attention_units: int,
num_decoder_cells: int = 2,
dropout_prob: float = 0.0,
) -> None:
super().__init__()
self.vocab_size = vocab_size
self.max_length = max_length
self.embed = layers.Dense(embedding_units, use_bias=False)
self.embed_tgt = layers.Embedding(embedding_units, self.vocab_size + 1)
self.lstm_cells = layers.StackedRNNCells(
[layers.LSTMCell(rnn_units, implementation=1) for _ in range(num_decoder_cells)]
)
self.attention_module = AttentionModule(attention_units)
self.output_dense = layers.Dense(self.vocab_size + 1, use_bias=True)
self.dropout = layers.Dropout(dropout_prob)
def call(
self,
features: tf.Tensor,
holistic: tf.Tensor,
gt: Optional[tf.Tensor] = None,
**kwargs: Any,
) -> tf.Tensor:
if gt is not None:
gt_embedding = self.embed_tgt(gt, **kwargs)
logits_list: List[tf.Tensor] = []
for t in range(self.max_length + 1): # 32
if t == 0:
# step to init the first states of the LSTMCell
states = self.lstm_cells.get_initial_state(
inputs=None, batch_size=features.shape[0], dtype=features.dtype
)
prev_symbol = holistic
elif t == 1:
# step to init a 'blank' sequence of length vocab_size + 1 filled with zeros
# (N, vocab_size + 1) --> (N, embedding_units)
prev_symbol = tf.zeros([features.shape[0], self.vocab_size + 1])
prev_symbol = self.embed(prev_symbol, **kwargs)
else:
if gt is not None:
# (N, embedding_units) -2 because of <bos> and <eos> (same)
prev_symbol = self.embed(gt_embedding[:, t - 2], **kwargs)
else:
# -1 to start at timestep where prev_symbol was initialized
index = tf.argmax(logits_list[t - 1], axis=-1)
# update prev_symbol with ones at the index of the previous logit vector
# (N, embedding_units)
index = tf.ones_like(index)
prev_symbol = tf.scatter_nd(
tf.expand_dims(index, axis=1),
prev_symbol,
tf.constant([features.shape[0], features.shape[-1]], dtype=tf.int64),
)
# (N, C), (N, C) take the last hidden state and cell state from current timestep
_, states = self.lstm_cells(prev_symbol, states, **kwargs)
# states = (hidden_state, cell_state)
hidden_state = states[0][0]
# (N, H, W, C), (N, C) --> (N, C)
glimpse = self.attention_module(features, hidden_state, **kwargs)
# (N, C), (N, C) --> (N, 2 * C)
logits = tf.concat([hidden_state, glimpse], axis=1)
logits = self.dropout(logits, **kwargs)
# (N, vocab_size + 1)
logits_list.append(self.output_dense(logits, **kwargs))
# (max_length + 1, N, vocab_size + 1) --> (N, max_length + 1, vocab_size + 1)
return tf.transpose(tf.stack(logits_list[1:]), (1, 0, 2))
class SAR(Model, RecognitionModel):
"""Implements a SAR architecture as described in `"Show, Attend and Read:A Simple and Strong Baseline for
Irregular Text Recognition" <https://arxiv.org/pdf/1811.00751.pdf>`_.
Args:
feature_extractor: the backbone serving as feature extractor
vocab: vocabulary used for encoding
rnn_units: number of hidden units in both encoder and decoder LSTM
embedding_units: number of embedding units
attention_units: number of hidden units in attention module
max_length: maximum word length handled by the model
num_decoder_cells: number of LSTMCell layers to stack
dropout_prob: dropout probability for the encoder and decoder
exportable: onnx exportable returns only logits
cfg: dictionary containing information about the model
"""
_children_names: List[str] = ["feat_extractor", "encoder", "decoder", "postprocessor"]
def __init__(
self,
feature_extractor,
vocab: str,
rnn_units: int = 512,
embedding_units: int = 512,
attention_units: int = 512,
max_length: int = 30,
num_decoder_cells: int = 2,
dropout_prob: float = 0.0,
exportable: bool = False,
cfg: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
self.vocab = vocab
self.exportable = exportable
self.cfg = cfg
self.max_length = max_length + 1 # Add 1 timestep for EOS after the longest word
self.feat_extractor = feature_extractor
self.encoder = SAREncoder(rnn_units, dropout_prob)
self.decoder = SARDecoder(
rnn_units,
self.max_length,
len(vocab),
embedding_units,
attention_units,
num_decoder_cells,
dropout_prob,
)
self.postprocessor = SARPostProcessor(vocab=vocab)
@staticmethod
def compute_loss(
model_output: tf.Tensor,
gt: tf.Tensor,
seq_len: tf.Tensor,
) -> tf.Tensor:
"""Compute categorical cross-entropy loss for the model.
Sequences are masked after the EOS character.
Args:
gt: the encoded tensor with gt labels
model_output: predicted logits of the model
seq_len: lengths of each gt word inside the batch
Returns:
The loss of the model on the batch
"""
# Input length : number of timesteps
input_len = tf.shape(model_output)[1]
# Add one for additional <eos> token
seq_len = seq_len + 1
# One-hot gt labels
oh_gt = tf.one_hot(gt, depth=model_output.shape[2])
# Compute loss
cce = tf.nn.softmax_cross_entropy_with_logits(oh_gt, model_output)
# Compute mask
mask_values = tf.zeros_like(cce)
mask_2d = tf.sequence_mask(seq_len, input_len)
masked_loss = tf.where(mask_2d, cce, mask_values)
ce_loss = tf.math.divide(tf.reduce_sum(masked_loss, axis=1), tf.cast(seq_len, model_output.dtype))
return tf.expand_dims(ce_loss, axis=1)
def call(
self,
x: tf.Tensor,
target: Optional[List[str]] = None,
return_model_output: bool = False,
return_preds: bool = False,
**kwargs: Any,
) -> Dict[str, Any]:
features = self.feat_extractor(x, **kwargs)
# vertical max pooling --> (N, C, W)
pooled_features = tf.reduce_max(features, axis=1)
# holistic (N, C)
encoded = self.encoder(pooled_features, **kwargs)
if target is not None:
gt, seq_len = self.build_target(target)
seq_len = tf.cast(seq_len, tf.int32)
if kwargs.get("training", False) and target is None:
raise ValueError("Need to provide labels during training for teacher forcing")
decoded_features = self.decoder(features, encoded, gt=None if target is None else gt, **kwargs)
out: Dict[str, tf.Tensor] = {}
if self.exportable:
out["logits"] = decoded_features
return out
if return_model_output:
out["out_map"] = decoded_features
if target is None or return_preds:
# Post-process boxes
out["preds"] = self.postprocessor(decoded_features)
if target is not None:
out["loss"] = self.compute_loss(decoded_features, gt, seq_len)
return out
class SARPostProcessor(RecognitionPostProcessor):
"""Post processor for SAR architectures
Args:
vocab: string containing the ordered sequence of supported characters
"""
def __call__(
self,
logits: tf.Tensor,
) -> List[Tuple[str, float]]:
# compute pred with argmax for attention models
out_idxs = tf.math.argmax(logits, axis=2)
# N x L
probs = tf.gather(tf.nn.softmax(logits, axis=-1), out_idxs, axis=-1, batch_dims=2)
# Take the minimum confidence of the sequence
probs = tf.math.reduce_min(probs, axis=1)
# decode raw output of the model with tf_label_to_idx
out_idxs = tf.cast(out_idxs, dtype="int32")
embedding = tf.constant(self._embedding, dtype=tf.string)
decoded_strings_pred = tf.strings.reduce_join(inputs=tf.nn.embedding_lookup(embedding, out_idxs), axis=-1)
decoded_strings_pred = tf.strings.split(decoded_strings_pred, "<eos>")
decoded_strings_pred = tf.sparse.to_dense(decoded_strings_pred.to_sparse(), default_value="not valid")[:, 0]
word_values = [word.decode() for word in decoded_strings_pred.numpy().tolist()]
return list(zip(word_values, probs.numpy().tolist()))
def _sar(
arch: str,
pretrained: bool,
backbone_fn,
pretrained_backbone: bool = True,
input_shape: Optional[Tuple[int, int, int]] = None,
**kwargs: Any,
) -> SAR:
pretrained_backbone = pretrained_backbone and not pretrained
# Patch the config
_cfg = deepcopy(default_cfgs[arch])
_cfg["input_shape"] = input_shape or _cfg["input_shape"]
_cfg["vocab"] = kwargs.get("vocab", _cfg["vocab"])
# Feature extractor
feat_extractor = backbone_fn(
pretrained=pretrained_backbone,
input_shape=_cfg["input_shape"],
include_top=False,
)
kwargs["vocab"] = _cfg["vocab"]
# Build the model
model = SAR(feat_extractor, cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, default_cfgs[arch]["url"])
return model
def sar_resnet31(pretrained: bool = False, **kwargs: Any) -> SAR:
"""SAR with a resnet-31 feature extractor as described in `"Show, Attend and Read:A Simple and Strong
Baseline for Irregular Text Recognition" <https://arxiv.org/pdf/1811.00751.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import sar_resnet31
>>> model = sar_resnet31(pretrained=False)
>>> input_tensor = tf.random.uniform(shape=[1, 64, 256, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text recognition dataset
Returns:
text recognition architecture
"""
return _sar("sar_resnet31", pretrained, resnet31, **kwargs)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.models._utils import IntermediateLayerGetter
from doctr.datasets import VOCABS
from ...classification import resnet31
from ...utils.pytorch import load_pretrained_params
from ..core import RecognitionModel, RecognitionPostProcessor
__all__ = ["SAR", "sar_resnet31"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"sar_resnet31": {
"mean": (0.694, 0.695, 0.693),
"std": (0.299, 0.296, 0.301),
"input_shape": (3, 32, 128),
"vocab": VOCABS["french"],
"url": None,
},
}
class SAREncoder(nn.Module):
def __init__(self, in_feats: int, rnn_units: int, dropout_prob: float = 0.0) -> None:
super().__init__()
self.rnn = nn.LSTM(in_feats, rnn_units, 2, batch_first=True, dropout=dropout_prob)
self.linear = nn.Linear(rnn_units, rnn_units)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# (N, L, C) --> (N, T, C)
encoded = self.rnn(x)[0]
# (N, C)
return self.linear(encoded[:, -1, :])
class AttentionModule(nn.Module):
def __init__(self, feat_chans: int, state_chans: int, attention_units: int) -> None:
super().__init__()
self.feat_conv = nn.Conv2d(feat_chans, attention_units, kernel_size=3, padding=1)
# No need to add another bias since both tensors are summed together
self.state_conv = nn.Conv2d(state_chans, attention_units, kernel_size=1, bias=False)
self.attention_projector = nn.Conv2d(attention_units, 1, kernel_size=1, bias=False)
def forward(
self,
features: torch.Tensor, # (N, C, H, W)
hidden_state: torch.Tensor, # (N, C)
) -> torch.Tensor:
H_f, W_f = features.shape[2:]
# (N, feat_chans, H, W) --> (N, attention_units, H, W)
feat_projection = self.feat_conv(features)
# (N, state_chans, 1, 1) --> (N, attention_units, 1, 1)
hidden_state = hidden_state.view(hidden_state.size(0), hidden_state.size(1), 1, 1)
state_projection = self.state_conv(hidden_state)
state_projection = state_projection.expand(-1, -1, H_f, W_f)
# (N, attention_units, 1, 1) --> (N, attention_units, H_f, W_f)
attention_weights = torch.tanh(feat_projection + state_projection)
# (N, attention_units, H_f, W_f) --> (N, 1, H_f, W_f)
attention_weights = self.attention_projector(attention_weights)
B, C, H, W = attention_weights.size()
# (N, H, W) --> (N, 1, H, W)
attention_weights = torch.softmax(attention_weights.view(B, -1), dim=-1).view(B, C, H, W)
# fuse features and attention weights (N, C)
return (features * attention_weights).sum(dim=(2, 3))
class SARDecoder(nn.Module):
"""Implements decoder module of the SAR model
Args:
rnn_units: number of hidden units in recurrent cells
max_length: maximum length of a sequence
vocab_size: number of classes in the model alphabet
embedding_units: number of hidden embedding units
attention_units: number of hidden attention units
"""
def __init__(
self,
rnn_units: int,
max_length: int,
vocab_size: int,
embedding_units: int,
attention_units: int,
feat_chans: int = 512,
dropout_prob: float = 0.0,
) -> None:
super().__init__()
self.vocab_size = vocab_size
self.max_length = max_length
self.embed = nn.Linear(self.vocab_size + 1, embedding_units)
self.embed_tgt = nn.Embedding(embedding_units, self.vocab_size + 1)
self.attention_module = AttentionModule(feat_chans, rnn_units, attention_units)
self.lstm_cell = nn.LSTMCell(rnn_units, rnn_units)
self.output_dense = nn.Linear(2 * rnn_units, self.vocab_size + 1)
self.dropout = nn.Dropout(dropout_prob)
def forward(
self,
features: torch.Tensor, # (N, C, H, W)
holistic: torch.Tensor, # (N, C)
gt: Optional[torch.Tensor] = None, # (N, L)
) -> torch.Tensor:
if gt is not None:
gt_embedding = self.embed_tgt(gt)
logits_list: List[torch.Tensor] = []
for t in range(self.max_length + 1): # 32
if t == 0:
# step to init the first states of the LSTMCell
hidden_state_init = cell_state_init = torch.zeros(
features.size(0), features.size(1), device=features.device
)
hidden_state, cell_state = hidden_state_init, cell_state_init
prev_symbol = holistic
elif t == 1:
# step to init a 'blank' sequence of length vocab_size + 1 filled with zeros
# (N, vocab_size + 1) --> (N, embedding_units)
prev_symbol = torch.zeros(features.size(0), self.vocab_size + 1, device=features.device)
prev_symbol = self.embed(prev_symbol)
else:
if gt is not None:
# (N, embedding_units) -2 because of <bos> and <eos> (same)
prev_symbol = self.embed(gt_embedding[:, t - 2])
else:
# -1 to start at timestep where prev_symbol was initialized
index = logits_list[t - 1].argmax(-1)
# update prev_symbol with ones at the index of the previous logit vector
# (N, embedding_units)
prev_symbol = prev_symbol.scatter_(1, index.unsqueeze(1), 1)
# (N, C), (N, C) take the last hidden state and cell state from current timestep
hidden_state_init, cell_state_init = self.lstm_cell(prev_symbol, (hidden_state_init, cell_state_init))
hidden_state, cell_state = self.lstm_cell(hidden_state_init, (hidden_state, cell_state))
# (N, C, H, W), (N, C) --> (N, C)
glimpse = self.attention_module(features, hidden_state)
# (N, C), (N, C) --> (N, 2 * C)
logits = torch.cat([hidden_state, glimpse], dim=1)
logits = self.dropout(logits)
# (N, vocab_size + 1)
logits_list.append(self.output_dense(logits))
# (max_length + 1, N, vocab_size + 1) --> (N, max_length + 1, vocab_size + 1)
return torch.stack(logits_list[1:]).permute(1, 0, 2)
class SAR(nn.Module, RecognitionModel):
"""Implements a SAR architecture as described in `"Show, Attend and Read:A Simple and Strong Baseline for
Irregular Text Recognition" <https://arxiv.org/pdf/1811.00751.pdf>`_.
Args:
feature_extractor: the backbone serving as feature extractor
vocab: vocabulary used for encoding
rnn_units: number of hidden units in both encoder and decoder LSTM
embedding_units: number of embedding units
attention_units: number of hidden units in attention module
max_length: maximum word length handled by the model
dropout_prob: dropout probability of the encoder LSTM
exportable: onnx exportable returns only logits
cfg: dictionary containing information about the model
"""
def __init__(
self,
feature_extractor,
vocab: str,
rnn_units: int = 512,
embedding_units: int = 512,
attention_units: int = 512,
max_length: int = 30,
dropout_prob: float = 0.0,
input_shape: Tuple[int, int, int] = (3, 32, 128),
exportable: bool = False,
cfg: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
self.vocab = vocab
self.exportable = exportable
self.cfg = cfg
self.max_length = max_length + 1 # Add 1 timestep for EOS after the longest word
self.feat_extractor = feature_extractor
# Size the LSTM
self.feat_extractor.eval()
with torch.no_grad():
out_shape = self.feat_extractor(torch.zeros((1, *input_shape)))["features"].shape
# Switch back to original mode
self.feat_extractor.train()
self.encoder = SAREncoder(out_shape[1], rnn_units, dropout_prob)
self.decoder = SARDecoder(
rnn_units,
self.max_length,
len(self.vocab),
embedding_units,
attention_units,
dropout_prob=dropout_prob,
)
self.postprocessor = SARPostProcessor(vocab=vocab)
def forward(
self,
x: torch.Tensor,
target: Optional[List[str]] = None,
return_model_output: bool = False,
return_preds: bool = False,
) -> Dict[str, Any]:
features = self.feat_extractor(x)["features"]
# NOTE: use max instead of functional max_pool2d which leads to ONNX incompatibility (kernel_size)
# Vertical max pooling (N, C, H, W) --> (N, C, W)
pooled_features = features.max(dim=-2).values
# (N, W, C)
pooled_features = pooled_features.permute(0, 2, 1).contiguous()
# (N, C)
encoded = self.encoder(pooled_features)
if target is not None:
_gt, _seq_len = self.build_target(target)
gt, seq_len = torch.from_numpy(_gt).to(dtype=torch.long), torch.tensor(_seq_len)
gt, seq_len = gt.to(x.device), seq_len.to(x.device)
if self.training and target is None:
raise ValueError("Need to provide labels during training for teacher forcing")
decoded_features = self.decoder(features, encoded, gt=None if target is None else gt)
out: Dict[str, Any] = {}
if self.exportable:
out["logits"] = decoded_features
return out
if return_model_output:
out["out_map"] = decoded_features
if target is None or return_preds:
# Post-process boxes
out["preds"] = self.postprocessor(decoded_features)
if target is not None:
out["loss"] = self.compute_loss(decoded_features, gt, seq_len)
return out
@staticmethod
def compute_loss(
model_output: torch.Tensor,
gt: torch.Tensor,
seq_len: torch.Tensor,
) -> torch.Tensor:
"""Compute categorical cross-entropy loss for the model.
Sequences are masked after the EOS character.
Args:
model_output: predicted logits of the model
gt: the encoded tensor with gt labels
seq_len: lengths of each gt word inside the batch
Returns:
The loss of the model on the batch
"""
# Input length : number of timesteps
input_len = model_output.shape[1]
# Add one for additional <eos> token
seq_len = seq_len + 1
# Compute loss
# (N, L, vocab_size + 1)
cce = F.cross_entropy(model_output.permute(0, 2, 1), gt, reduction="none")
mask_2d = torch.arange(input_len, device=model_output.device)[None, :] >= seq_len[:, None]
cce[mask_2d] = 0
ce_loss = cce.sum(1) / seq_len.to(dtype=model_output.dtype)
return ce_loss.mean()
class SARPostProcessor(RecognitionPostProcessor):
"""Post processor for SAR architectures
Args:
vocab: string containing the ordered sequence of supported characters
"""
def __call__(
self,
logits: torch.Tensor,
) -> List[Tuple[str, float]]:
# compute pred with argmax for attention models
out_idxs = logits.argmax(-1)
# N x L
probs = torch.gather(torch.softmax(logits, -1), -1, out_idxs.unsqueeze(-1)).squeeze(-1)
# Take the minimum confidence of the sequence
probs = probs.min(dim=1).values.detach().cpu()
# Manual decoding
word_values = [
"".join(self._embedding[idx] for idx in encoded_seq).split("<eos>")[0]
for encoded_seq in out_idxs.detach().cpu().numpy()
]
return list(zip(word_values, probs.numpy().tolist()))
def _sar(
arch: str,
pretrained: bool,
backbone_fn: Callable[[bool], nn.Module],
layer: str,
pretrained_backbone: bool = True,
ignore_keys: Optional[List[str]] = None,
**kwargs: Any,
) -> SAR:
pretrained_backbone = pretrained_backbone and not pretrained
# Patch the config
_cfg = deepcopy(default_cfgs[arch])
_cfg["vocab"] = kwargs.get("vocab", _cfg["vocab"])
_cfg["input_shape"] = kwargs.get("input_shape", _cfg["input_shape"])
# Feature extractor
feat_extractor = IntermediateLayerGetter(
backbone_fn(pretrained_backbone),
{layer: "features"},
)
kwargs["vocab"] = _cfg["vocab"]
kwargs["input_shape"] = _cfg["input_shape"]
# Build the model
model = SAR(feat_extractor, cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
# The number of classes is not the same as the number of classes in the pretrained model =>
# remove the last layer weights
_ignore_keys = ignore_keys if _cfg["vocab"] != default_cfgs[arch]["vocab"] else None
load_pretrained_params(model, default_cfgs[arch]["url"], ignore_keys=_ignore_keys)
return model
def sar_resnet31(pretrained: bool = False, **kwargs: Any) -> SAR:
"""SAR with a resnet-31 feature extractor as described in `"Show, Attend and Read:A Simple and Strong
Baseline for Irregular Text Recognition" <https://arxiv.org/pdf/1811.00751.pdf>`_.
>>> import torch
>>> from doctr.models import sar_resnet31
>>> model = sar_resnet31(pretrained=False)
>>> input_tensor = torch.rand((1, 3, 32, 128))
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text recognition dataset
Returns:
text recognition architecture
"""
return _sar(
"sar_resnet31",
pretrained,
resnet31,
"10",
ignore_keys=[
"decoder.embed.weight",
"decoder.embed_tgt.weight",
"decoder.output_dense.weight",
"decoder.output_dense.bias",
],
**kwargs,
)
|
from doctr.file_utils import is_tf_available
if is_tf_available():
from .tensorflow import *
else:
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, List, Tuple, Union
import numpy as np
import tensorflow as tf
from doctr.models.preprocessor import PreProcessor
from doctr.utils.repr import NestedObject
from ..core import RecognitionModel
from ._utils import remap_preds, split_crops
__all__ = ["RecognitionPredictor"]
class RecognitionPredictor(NestedObject):
"""Implements an object able to identify character sequences in images
Args:
pre_processor: transform inputs for easier batched model inference
model: core detection architecture
split_wide_crops: wether to use crop splitting for high aspect ratio crops
"""
_children_names: List[str] = ["pre_processor", "model"]
def __init__(
self,
pre_processor: PreProcessor,
model: RecognitionModel,
split_wide_crops: bool = True,
) -> None:
super().__init__()
self.pre_processor = pre_processor
self.model = model
self.split_wide_crops = split_wide_crops
self.critical_ar = 8 # Critical aspect ratio
self.dil_factor = 1.4 # Dilation factor to overlap the crops
self.target_ar = 6 # Target aspect ratio
def __call__(
self,
crops: List[Union[np.ndarray, tf.Tensor]],
**kwargs: Any,
) -> List[Tuple[str, float]]:
if len(crops) == 0:
return []
# Dimension check
if any(crop.ndim != 3 for crop in crops):
raise ValueError("incorrect input shape: all crops are expected to be multi-channel 2D images.")
# Split crops that are too wide
remapped = False
if self.split_wide_crops:
new_crops, crop_map, remapped = split_crops(crops, self.critical_ar, self.target_ar, self.dil_factor)
if remapped:
crops = new_crops
# Resize & batch them
processed_batches = self.pre_processor(crops)
# Forward it
raw = [
self.model(batch, return_preds=True, training=False, **kwargs)["preds"] # type: ignore[operator]
for batch in processed_batches
]
# Process outputs
out = [charseq for batch in raw for charseq in batch]
# Remap crops
if self.split_wide_crops and remapped:
out = remap_preds(out, crop_map, self.dil_factor)
return out
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, List, Sequence, Tuple, Union
import numpy as np
import torch
from torch import nn
from doctr.models.preprocessor import PreProcessor
from ._utils import remap_preds, split_crops
__all__ = ["RecognitionPredictor"]
class RecognitionPredictor(nn.Module):
"""Implements an object able to identify character sequences in images
Args:
pre_processor: transform inputs for easier batched model inference
model: core detection architecture
split_wide_crops: wether to use crop splitting for high aspect ratio crops
"""
def __init__(
self,
pre_processor: PreProcessor,
model: nn.Module,
split_wide_crops: bool = True,
) -> None:
super().__init__()
self.pre_processor = pre_processor
self.model = model.eval()
self.split_wide_crops = split_wide_crops
self.critical_ar = 8 # Critical aspect ratio
self.dil_factor = 1.4 # Dilation factor to overlap the crops
self.target_ar = 6 # Target aspect ratio
@torch.no_grad()
def forward(
self,
crops: Sequence[Union[np.ndarray, torch.Tensor]],
**kwargs: Any,
) -> List[Tuple[str, float]]:
if len(crops) == 0:
return []
# Dimension check
if any(crop.ndim != 3 for crop in crops):
raise ValueError("incorrect input shape: all crops are expected to be multi-channel 2D images.")
# Split crops that are too wide
remapped = False
if self.split_wide_crops:
new_crops, crop_map, remapped = split_crops(
crops, # type: ignore[arg-type]
self.critical_ar,
self.target_ar,
self.dil_factor,
isinstance(crops[0], np.ndarray),
)
if remapped:
crops = new_crops
# Resize & batch them
processed_batches = self.pre_processor(crops)
# Forward it
_device = next(self.model.parameters()).device
raw = [
self.model(batch.to(device=_device), return_preds=True, **kwargs)["preds"] for batch in processed_batches
]
# Process outputs
out = [charseq for batch in raw for charseq in batch]
# Remap crops
if self.split_wide_crops and remapped:
out = remap_preds(out, crop_map, self.dil_factor)
return out
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import List, Tuple, Union
import numpy as np
from ..utils import merge_multi_strings
__all__ = ["split_crops", "remap_preds"]
def split_crops(
crops: List[np.ndarray],
max_ratio: float,
target_ratio: int,
dilation: float,
channels_last: bool = True,
) -> Tuple[List[np.ndarray], List[Union[int, Tuple[int, int]]], bool]:
"""Chunk crops horizontally to match a given aspect ratio
Args:
crops: list of numpy array of shape (H, W, 3) if channels_last or (3, H, W) otherwise
max_ratio: the maximum aspect ratio that won't trigger the chunk
target_ratio: when crops are chunked, they will be chunked to match this aspect ratio
dilation: the width dilation of final chunks (to provide some overlaps)
channels_last: whether the numpy array has dimensions in channels last order
Returns:
a tuple with the new crops, their mapping, and a boolean specifying whether any remap is required
"""
_remap_required = False
crop_map: List[Union[int, Tuple[int, int]]] = []
new_crops: List[np.ndarray] = []
for crop in crops:
h, w = crop.shape[:2] if channels_last else crop.shape[-2:]
aspect_ratio = w / h
if aspect_ratio > max_ratio:
# Determine the number of crops, reference aspect ratio = 4 = 128 / 32
num_subcrops = int(aspect_ratio // target_ratio)
# Find the new widths, additional dilation factor to overlap crops
width = dilation * w / num_subcrops
centers = [(w / num_subcrops) * (1 / 2 + idx) for idx in range(num_subcrops)]
# Get the crops
if channels_last:
_crops = [
crop[:, max(0, int(round(center - width / 2))) : min(w - 1, int(round(center + width / 2))), :]
for center in centers
]
else:
_crops = [
crop[:, :, max(0, int(round(center - width / 2))) : min(w - 1, int(round(center + width / 2)))]
for center in centers
]
# Avoid sending zero-sized crops
_crops = [crop for crop in _crops if all(s > 0 for s in crop.shape)]
# Record the slice of crops
crop_map.append((len(new_crops), len(new_crops) + len(_crops)))
new_crops.extend(_crops)
# At least one crop will require merging
_remap_required = True
else:
crop_map.append(len(new_crops))
new_crops.append(crop)
return new_crops, crop_map, _remap_required
def remap_preds(
preds: List[Tuple[str, float]], crop_map: List[Union[int, Tuple[int, int]]], dilation: float
) -> List[Tuple[str, float]]:
remapped_out = []
for _idx in crop_map:
# Crop hasn't been split
if isinstance(_idx, int):
remapped_out.append(preds[_idx])
else:
# unzip
vals, probs = zip(*preds[_idx[0] : _idx[1]])
# Merge the string values
remapped_out.append((merge_multi_strings(vals, dilation), min(probs))) # type: ignore[arg-type]
return remapped_out
|
from .transformer import *
from .vision_transformer import *
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import math
from typing import Any, Tuple
import tensorflow as tf
from tensorflow.keras import layers
from doctr.utils.repr import NestedObject
__all__ = ["PatchEmbedding"]
class PatchEmbedding(layers.Layer, NestedObject):
"""Compute 2D patch embeddings with cls token and positional encoding"""
def __init__(self, input_shape: Tuple[int, int, int], embed_dim: int) -> None:
super().__init__()
height, width, _ = input_shape
# calculate patch size
# NOTE: this is different from the original implementation
self.patch_size = (height // (height // 8), width // (width // 8))
self.grid_size = (self.patch_size[0], self.patch_size[1])
self.num_patches = self.patch_size[0] * self.patch_size[1]
self.cls_token = self.add_weight(shape=(1, 1, embed_dim), initializer="zeros", trainable=True, name="cls_token")
self.positions = self.add_weight(
shape=(1, self.num_patches + 1, embed_dim),
initializer="zeros",
trainable=True,
name="positions",
)
self.proj = layers.Dense(embed_dim, kernel_initializer="he_normal", name="projection")
def interpolate_pos_encoding(self, embeddings: tf.Tensor, height: int, width: int) -> tf.Tensor:
"""
100 % borrowed from:
https://github.com/huggingface/transformers/blob/main/src/transformers/models/vit/modeling_tf_vit.py
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
resolution images.
Source:
https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py
"""
seq_len, dim = embeddings.shape[1:]
num_patches = seq_len - 1
num_positions = self.positions.shape[1] - 1
if num_patches == num_positions and height == width:
return self.positions
class_pos_embed = self.positions[:, :1]
patch_pos_embed = self.positions[:, 1:]
h0 = height // self.patch_size[0]
w0 = width // self.patch_size[1]
patch_pos_embed = tf.image.resize(
images=tf.reshape(
patch_pos_embed, shape=(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
),
size=(h0, w0),
method="bilinear",
)
shape = patch_pos_embed.shape
assert h0 == shape[-3], "height of interpolated patch embedding doesn't match"
assert w0 == shape[-2], "width of interpolated patch embedding doesn't match"
patch_pos_embed = tf.reshape(tensor=patch_pos_embed, shape=(1, -1, dim))
return tf.concat(values=(class_pos_embed, patch_pos_embed), axis=1)
def call(self, x: tf.Tensor, **kwargs: Any) -> tf.Tensor:
B, H, W, C = x.shape
assert H % self.patch_size[0] == 0, "Image height must be divisible by patch height"
assert W % self.patch_size[1] == 0, "Image width must be divisible by patch width"
# patchify image without convolution
# adapted from:
# https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial15/Vision_Transformer.html
# NOTE: tf.image.extract_patches has no ONNX support and Conv2D with padding=valid consumes to much memory
patches = tf.reshape(
x, (B, H // self.patch_size[0], self.patch_size[0], W // self.patch_size[1], self.patch_size[1], C)
)
patches = tf.transpose(a=patches, perm=(0, 1, 3, 2, 4, 5))
# (B, H', W', C, ph, pw) -> (B, H'*W', C*ph*pw)
patches = tf.reshape(tensor=patches, shape=(B, -1, self.patch_size[0] * self.patch_size[1] * C))
patches = self.proj(patches, **kwargs) # (batch_size, num_patches, d_model)
cls_tokens = tf.repeat(self.cls_token, B, axis=0) # (batch_size, 1, d_model)
# concate cls_tokens to patches
embeddings = tf.concat([cls_tokens, patches], axis=1) # (batch_size, num_patches + 1, d_model)
# add positions to embeddings
embeddings += self.interpolate_pos_encoding(embeddings, H, W) # (batch_size, num_patches + 1, d_model)
return embeddings
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import math
from typing import Tuple
import torch
from torch import nn
__all__ = ["PatchEmbedding"]
class PatchEmbedding(nn.Module):
"""Compute 2D patch embeddings with cls token and positional encoding"""
def __init__(self, input_shape: Tuple[int, int, int], embed_dim: int) -> None:
super().__init__()
channels, height, width = input_shape
# calculate patch size
# NOTE: this is different from the original implementation
self.patch_size = (height // (height // 8), width // (width // 8))
self.grid_size = (self.patch_size[0], self.patch_size[1])
self.num_patches = self.patch_size[0] * self.patch_size[1]
self.cls_token = nn.Parameter(torch.randn(1, 1, embed_dim))
self.positions = nn.Parameter(torch.randn(1, self.num_patches + 1, embed_dim))
self.proj = nn.Linear((channels * self.patch_size[0] * self.patch_size[1]), embed_dim)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
100 % borrowed from:
https://github.com/huggingface/transformers/blob/main/src/transformers/models/vit/modeling_vit.py
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
resolution images.
Source:
https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.positions.shape[1] - 1
if num_patches == num_positions and height == width:
return self.positions
class_pos_embed = self.positions[:, 0]
patch_pos_embed = self.positions[:, 1:]
dim = embeddings.shape[-1]
h0 = float(height // self.patch_size[0])
w0 = float(width // self.patch_size[1])
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
h0, w0 = h0 + 0.1, w0 + 0.1
patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
scale_factor=(h0 / math.sqrt(num_positions), w0 / math.sqrt(num_positions)),
mode="bilinear",
align_corners=False,
recompute_scale_factor=True,
)
assert int(h0) == patch_pos_embed.shape[-2], "height of interpolated patch embedding doesn't match"
assert int(w0) == patch_pos_embed.shape[-1], "width of interpolated patch embedding doesn't match"
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, C, H, W = x.shape
assert H % self.patch_size[0] == 0, "Image height must be divisible by patch height"
assert W % self.patch_size[1] == 0, "Image width must be divisible by patch width"
# patchify image without convolution
# adapted from:
# https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial15/Vision_Transformer.html
# NOTE: patchify with Conv2d works only with padding="valid" correctly on smaller images
# and has currently no ONNX support so we use this workaround
x = x.reshape(
B, C, (H // self.patch_size[0]), self.patch_size[0], (W // self.patch_size[1]), self.patch_size[1]
)
# (B, H', W', C, ph, pw) -> (B, H'*W', C*ph*pw)
patches = x.permute(0, 2, 4, 1, 3, 5).flatten(1, 2).flatten(2, 4)
patches = self.proj(patches) # (batch_size, num_patches, d_model)
cls_tokens = self.cls_token.expand(B, -1, -1) # (batch_size, 1, d_model)
# concate cls_tokens to patches
embeddings = torch.cat([cls_tokens, patches], dim=1) # (batch_size, num_patches + 1, d_model)
# add positions to embeddings
embeddings += self.interpolate_pos_encoding(embeddings, H, W) # (batch_size, num_patches + 1, d_model)
return embeddings
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import math
from typing import Any, Callable, Optional, Tuple
import tensorflow as tf
from tensorflow.keras import layers
from doctr.utils.repr import NestedObject
__all__ = ["Decoder", "PositionalEncoding", "EncoderBlock"]
tf.config.run_functions_eagerly(True)
class PositionalEncoding(layers.Layer, NestedObject):
"""Compute positional encoding"""
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000) -> None:
super(PositionalEncoding, self).__init__()
self.dropout = layers.Dropout(rate=dropout)
# Compute the positional encodings once in log space.
pe = tf.Variable(tf.zeros((max_len, d_model)))
position = tf.cast(
tf.expand_dims(tf.experimental.numpy.arange(start=0, stop=max_len), axis=1), dtype=tf.float32
)
div_term = tf.math.exp(
tf.cast(tf.experimental.numpy.arange(start=0, stop=d_model, step=2), dtype=tf.float32)
* -(math.log(10000.0) / d_model)
)
pe = pe.numpy()
pe[:, 0::2] = tf.math.sin(position * div_term)
pe[:, 1::2] = tf.math.cos(position * div_term)
self.pe = tf.expand_dims(tf.convert_to_tensor(pe), axis=0)
def call(
self,
x: tf.Tensor,
**kwargs: Any,
) -> tf.Tensor:
"""
Args:
x: embeddings (batch, max_len, d_model)
**kwargs: additional arguments
Returns:
positional embeddings (batch, max_len, d_model)
"""
if x.dtype == tf.float16: # amp fix: cast to half
x = x + tf.cast(self.pe[:, : x.shape[1]], dtype=tf.half)
else:
x = x + self.pe[:, : x.shape[1]]
return self.dropout(x, **kwargs)
@tf.function
def scaled_dot_product_attention(
query: tf.Tensor, key: tf.Tensor, value: tf.Tensor, mask: Optional[tf.Tensor] = None
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Scaled Dot-Product Attention"""
scores = tf.matmul(query, tf.transpose(key, perm=[0, 1, 3, 2])) / math.sqrt(query.shape[-1])
if mask is not None:
# NOTE: to ensure the ONNX compatibility, tf.where works only with bool type condition
scores = tf.where(mask == False, float("-inf"), scores) # noqa: E712
p_attn = tf.nn.softmax(scores, axis=-1)
return tf.matmul(p_attn, value), p_attn
class PositionwiseFeedForward(layers.Layer, NestedObject):
"""Position-wise Feed-Forward Network"""
def __init__(
self, d_model: int, ffd: int, dropout=0.1, activation_fct: Callable[[Any], Any] = layers.ReLU()
) -> None:
super(PositionwiseFeedForward, self).__init__()
self.activation_fct = activation_fct
self.first_linear = layers.Dense(ffd, kernel_initializer=tf.initializers.he_uniform())
self.sec_linear = layers.Dense(d_model, kernel_initializer=tf.initializers.he_uniform())
self.dropout = layers.Dropout(rate=dropout)
def call(self, x: tf.Tensor, **kwargs: Any) -> tf.Tensor:
x = self.first_linear(x, **kwargs)
x = self.activation_fct(x)
x = self.dropout(x, **kwargs)
x = self.sec_linear(x, **kwargs)
x = self.dropout(x, **kwargs)
return x
class MultiHeadAttention(layers.Layer, NestedObject):
"""Multi-Head Attention"""
def __init__(self, num_heads: int, d_model: int, dropout: float = 0.1) -> None:
super().__init__()
assert d_model % num_heads == 0, "d_model must be divisible by num_heads"
self.d_k = d_model // num_heads
self.num_heads = num_heads
self.linear_layers = [layers.Dense(d_model, kernel_initializer=tf.initializers.he_uniform()) for _ in range(3)]
self.output_linear = layers.Dense(d_model, kernel_initializer=tf.initializers.he_uniform())
def call(
self,
query: tf.Tensor,
key: tf.Tensor,
value: tf.Tensor,
mask: tf.Tensor = None,
**kwargs: Any,
) -> tf.Tensor:
batch_size = query.shape[0]
# linear projections of Q, K, V
query, key, value = [
tf.transpose(
tf.reshape(linear(x, **kwargs), shape=[batch_size, -1, self.num_heads, self.d_k]), perm=[0, 2, 1, 3]
)
for linear, x in zip(self.linear_layers, (query, key, value))
]
# apply attention on all the projected vectors in batch
x, attn = scaled_dot_product_attention(query, key, value, mask=mask)
# Concat attention heads
x = tf.transpose(x, perm=[0, 2, 1, 3])
x = tf.reshape(x, shape=[batch_size, -1, self.num_heads * self.d_k])
return self.output_linear(x, **kwargs)
class EncoderBlock(layers.Layer, NestedObject):
"""Transformer Encoder Block"""
def __init__(
self,
num_layers: int,
num_heads: int,
d_model: int,
dff: int, # hidden dimension of the feedforward network
dropout: float,
activation_fct: Callable[[Any], Any] = layers.ReLU(),
) -> None:
super().__init__()
self.num_layers = num_layers
self.layer_norm_input = layers.LayerNormalization(epsilon=1e-5)
self.layer_norm_attention = layers.LayerNormalization(epsilon=1e-5)
self.layer_norm_output = layers.LayerNormalization(epsilon=1e-5)
self.dropout = layers.Dropout(rate=dropout)
self.attention = [MultiHeadAttention(num_heads, d_model, dropout) for _ in range(self.num_layers)]
self.position_feed_forward = [
PositionwiseFeedForward(d_model, dff, dropout, activation_fct) for _ in range(self.num_layers)
]
def call(self, x: tf.Tensor, mask: Optional[tf.Tensor] = None, **kwargs: Any) -> tf.Tensor:
output = x
for i in range(self.num_layers):
normed_output = self.layer_norm_input(output, **kwargs)
output = output + self.dropout(
self.attention[i](normed_output, normed_output, normed_output, mask, **kwargs),
**kwargs,
)
normed_output = self.layer_norm_attention(output, **kwargs)
output = output + self.dropout(self.position_feed_forward[i](normed_output, **kwargs), **kwargs)
# (batch_size, seq_len, d_model)
return self.layer_norm_output(output, **kwargs)
class Decoder(layers.Layer, NestedObject):
"""Transformer Decoder"""
def __init__(
self,
num_layers: int,
num_heads: int,
d_model: int,
vocab_size: int,
dropout: float = 0.2,
dff: int = 2048, # hidden dimension of the feedforward network
maximum_position_encoding: int = 50,
) -> None:
super(Decoder, self).__init__()
self.num_layers = num_layers
self.d_model = d_model
self.layer_norm_input = layers.LayerNormalization(epsilon=1e-5)
self.layer_norm_masked_attention = layers.LayerNormalization(epsilon=1e-5)
self.layer_norm_attention = layers.LayerNormalization(epsilon=1e-5)
self.layer_norm_output = layers.LayerNormalization(epsilon=1e-5)
self.dropout = layers.Dropout(rate=dropout)
self.embed = layers.Embedding(vocab_size, d_model)
self.positional_encoding = PositionalEncoding(d_model, dropout, maximum_position_encoding)
self.attention = [MultiHeadAttention(num_heads, d_model, dropout) for _ in range(self.num_layers)]
self.source_attention = [MultiHeadAttention(num_heads, d_model, dropout) for _ in range(self.num_layers)]
self.position_feed_forward = [PositionwiseFeedForward(d_model, dff, dropout) for _ in range(self.num_layers)]
def call(
self,
tgt: tf.Tensor,
memory: tf.Tensor,
source_mask: Optional[tf.Tensor] = None,
target_mask: Optional[tf.Tensor] = None,
**kwargs: Any,
) -> tf.Tensor:
tgt = self.embed(tgt, **kwargs) * math.sqrt(self.d_model)
pos_enc_tgt = self.positional_encoding(tgt, **kwargs)
output = pos_enc_tgt
for i in range(self.num_layers):
normed_output = self.layer_norm_input(output, **kwargs)
output = output + self.dropout(
self.attention[i](normed_output, normed_output, normed_output, target_mask, **kwargs),
**kwargs,
)
normed_output = self.layer_norm_masked_attention(output, **kwargs)
output = output + self.dropout(
self.source_attention[i](normed_output, memory, memory, source_mask, **kwargs),
**kwargs,
)
normed_output = self.layer_norm_attention(output, **kwargs)
output = output + self.dropout(self.position_feed_forward[i](normed_output, **kwargs), **kwargs)
# (batch_size, seq_len, d_model)
return self.layer_norm_output(output, **kwargs)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
# This module 'transformer.py' is inspired by https://github.com/wenwenyu/MASTER-pytorch and Decoder is borrowed
import math
from typing import Any, Callable, Optional, Tuple
import torch
from torch import nn
__all__ = ["Decoder", "PositionalEncoding", "EncoderBlock"]
class PositionalEncoding(nn.Module):
"""Compute positional encoding"""
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000) -> None:
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer("pe", pe.unsqueeze(0))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x: embeddings (batch, max_len, d_model)
Returns:
positional embeddings (batch, max_len, d_model)
"""
x = x + self.pe[:, : x.size(1)] # type: ignore
return self.dropout(x)
def scaled_dot_product_attention(
query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Scaled Dot-Product Attention"""
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(query.size(-1))
if mask is not None:
# NOTE: to ensure the ONNX compatibility, masked_fill works only with int equal condition
scores = scores.masked_fill(mask == 0, float("-inf"))
p_attn = torch.softmax(scores, dim=-1)
return torch.matmul(p_attn, value), p_attn
class PositionwiseFeedForward(nn.Sequential):
"""Position-wise Feed-Forward Network"""
def __init__(
self, d_model: int, ffd: int, dropout: float = 0.1, activation_fct: Callable[[Any], Any] = nn.ReLU()
) -> None:
super().__init__( # type: ignore[call-overload]
nn.Linear(d_model, ffd),
activation_fct,
nn.Dropout(p=dropout),
nn.Linear(ffd, d_model),
nn.Dropout(p=dropout),
)
class MultiHeadAttention(nn.Module):
"""Multi-Head Attention"""
def __init__(self, num_heads: int, d_model: int, dropout: float = 0.1) -> None:
super().__init__()
assert d_model % num_heads == 0, "d_model must be divisible by num_heads"
self.d_k = d_model // num_heads
self.num_heads = num_heads
self.linear_layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)])
self.output_linear = nn.Linear(d_model, d_model)
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask=None) -> torch.Tensor:
batch_size = query.size(0)
# linear projections of Q, K, V
query, key, value = [
linear(x).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
for linear, x in zip(self.linear_layers, (query, key, value))
]
# apply attention on all the projected vectors in batch
x, attn = scaled_dot_product_attention(query, key, value, mask=mask)
# Concat attention heads
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.num_heads * self.d_k)
return self.output_linear(x)
class EncoderBlock(nn.Module):
"""Transformer Encoder Block"""
def __init__(
self,
num_layers: int,
num_heads: int,
d_model: int,
dff: int, # hidden dimension of the feedforward network
dropout: float,
activation_fct: Callable[[Any], Any] = nn.ReLU(),
) -> None:
super().__init__()
self.num_layers = num_layers
self.layer_norm_input = nn.LayerNorm(d_model, eps=1e-5)
self.layer_norm_attention = nn.LayerNorm(d_model, eps=1e-5)
self.layer_norm_output = nn.LayerNorm(d_model, eps=1e-5)
self.dropout = nn.Dropout(dropout)
self.attention = nn.ModuleList(
[MultiHeadAttention(num_heads, d_model, dropout) for _ in range(self.num_layers)]
)
self.position_feed_forward = nn.ModuleList(
[PositionwiseFeedForward(d_model, dff, dropout, activation_fct) for _ in range(self.num_layers)]
)
def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
output = x
for i in range(self.num_layers):
normed_output = self.layer_norm_input(output)
output = output + self.dropout(self.attention[i](normed_output, normed_output, normed_output, mask))
normed_output = self.layer_norm_attention(output)
output = output + self.dropout(self.position_feed_forward[i](normed_output))
# (batch_size, seq_len, d_model)
return self.layer_norm_output(output)
class Decoder(nn.Module):
"""Transformer Decoder"""
def __init__(
self,
num_layers: int,
num_heads: int,
d_model: int,
vocab_size: int,
dropout: float = 0.2,
dff: int = 2048, # hidden dimension of the feedforward network
maximum_position_encoding: int = 50,
) -> None:
super(Decoder, self).__init__()
self.num_layers = num_layers
self.d_model = d_model
self.layer_norm_input = nn.LayerNorm(d_model, eps=1e-5)
self.layer_norm_masked_attention = nn.LayerNorm(d_model, eps=1e-5)
self.layer_norm_attention = nn.LayerNorm(d_model, eps=1e-5)
self.layer_norm_output = nn.LayerNorm(d_model, eps=1e-5)
self.dropout = nn.Dropout(dropout)
self.embed = nn.Embedding(vocab_size, d_model)
self.positional_encoding = PositionalEncoding(d_model, dropout, maximum_position_encoding)
self.attention = nn.ModuleList(
[MultiHeadAttention(num_heads, d_model, dropout) for _ in range(self.num_layers)]
)
self.source_attention = nn.ModuleList(
[MultiHeadAttention(num_heads, d_model, dropout) for _ in range(self.num_layers)]
)
self.position_feed_forward = nn.ModuleList(
[PositionwiseFeedForward(d_model, dff, dropout) for _ in range(self.num_layers)]
)
def forward(
self,
tgt: torch.Tensor,
memory: torch.Tensor,
source_mask: Optional[torch.Tensor] = None,
target_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
tgt = self.embed(tgt) * math.sqrt(self.d_model)
pos_enc_tgt = self.positional_encoding(tgt)
output = pos_enc_tgt
for i in range(self.num_layers):
normed_output = self.layer_norm_input(output)
output = output + self.dropout(self.attention[i](normed_output, normed_output, normed_output, target_mask))
normed_output = self.layer_norm_masked_attention(output)
output = output + self.dropout(self.source_attention[i](normed_output, memory, memory, source_mask))
normed_output = self.layer_norm_attention(output)
output = output + self.dropout(self.position_feed_forward[i](normed_output))
# (batch_size, seq_len, d_model)
return self.layer_norm_output(output)
|
from doctr.file_utils import is_tf_available
if is_tf_available():
from .tensorflow import *
else:
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, List, Union
import numpy as np
import tensorflow as tf
from doctr.io.elements import Document
from doctr.models._utils import estimate_orientation, get_language
from doctr.models.detection.predictor import DetectionPredictor
from doctr.models.recognition.predictor import RecognitionPredictor
from doctr.utils.geometry import rotate_boxes, rotate_image
from doctr.utils.repr import NestedObject
from .base import _OCRPredictor
__all__ = ["OCRPredictor"]
class OCRPredictor(NestedObject, _OCRPredictor):
"""Implements an object able to localize and identify text elements in a set of documents
Args:
det_predictor: detection module
reco_predictor: recognition module
assume_straight_pages: if True, speeds up the inference by assuming you only pass straight pages
without rotated textual elements.
straighten_pages: if True, estimates the page general orientation based on the median line orientation.
Then, rotates page before passing it to the deep learning modules. The final predictions will be remapped
accordingly. Doing so will improve performances for documents with page-uniform rotations.
detect_orientation: if True, the estimated general page orientation will be added to the predictions for each
page. Doing so will slightly deteriorate the overall latency.
detect_language: if True, the language prediction will be added to the predictions for each
page. Doing so will slightly deteriorate the overall latency.
kwargs: keyword args of `DocumentBuilder`
"""
_children_names = ["det_predictor", "reco_predictor", "doc_builder"]
def __init__(
self,
det_predictor: DetectionPredictor,
reco_predictor: RecognitionPredictor,
assume_straight_pages: bool = True,
straighten_pages: bool = False,
preserve_aspect_ratio: bool = False,
symmetric_pad: bool = True,
detect_orientation: bool = False,
detect_language: bool = False,
**kwargs: Any,
) -> None:
self.det_predictor = det_predictor
self.reco_predictor = reco_predictor
_OCRPredictor.__init__(
self, assume_straight_pages, straighten_pages, preserve_aspect_ratio, symmetric_pad, **kwargs
)
self.detect_orientation = detect_orientation
self.detect_language = detect_language
def __call__(
self,
pages: List[Union[np.ndarray, tf.Tensor]],
**kwargs: Any,
) -> Document:
# Dimension check
if any(page.ndim != 3 for page in pages):
raise ValueError("incorrect input shape: all pages are expected to be multi-channel 2D images.")
origin_page_shapes = [page.shape[:2] for page in pages]
# Detect document rotation and rotate pages
if self.detect_orientation:
origin_page_orientations = [estimate_orientation(page) for page in pages]
orientations = [
{"value": orientation_page, "confidence": 1.0} for orientation_page in origin_page_orientations
]
else:
orientations = None
if self.straighten_pages:
origin_page_orientations = (
origin_page_orientations if self.detect_orientation else [estimate_orientation(page) for page in pages]
)
pages = [rotate_image(page, -angle, expand=True) for page, angle in zip(pages, origin_page_orientations)]
# Localize text elements
loc_preds_dict = self.det_predictor(pages, **kwargs)
assert all(
len(loc_pred) == 1 for loc_pred in loc_preds_dict
), "Detection Model in ocr_predictor should output only one class"
loc_preds: List[np.ndarray] = [list(loc_pred.values())[0] for loc_pred in loc_preds_dict]
# Rectify crops if aspect ratio
loc_preds = self._remove_padding(pages, loc_preds)
# Crop images
crops, loc_preds = self._prepare_crops(
pages, loc_preds, channels_last=True, assume_straight_pages=self.assume_straight_pages
)
# Rectify crop orientation
if not self.assume_straight_pages:
crops, loc_preds = self._rectify_crops(crops, loc_preds)
# Identify character sequences
word_preds = self.reco_predictor([crop for page_crops in crops for crop in page_crops], **kwargs)
boxes, text_preds = self._process_predictions(loc_preds, word_preds)
if self.detect_language:
languages = [get_language(" ".join([item[0] for item in text_pred])) for text_pred in text_preds]
languages_dict = [{"value": lang[0], "confidence": lang[1]} for lang in languages]
else:
languages_dict = None
# Rotate back pages and boxes while keeping original image size
if self.straighten_pages:
boxes = [
rotate_boxes(
page_boxes,
angle,
orig_shape=page.shape[:2] if isinstance(page, np.ndarray) else page.shape[-2:],
target_shape=mask, # type: ignore[arg-type]
)
for page_boxes, page, angle, mask in zip(boxes, pages, origin_page_orientations, origin_page_shapes)
]
out = self.doc_builder(
boxes,
text_preds,
origin_page_shapes, # type: ignore[arg-type]
orientations,
languages_dict,
)
return out
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, List, Union
import numpy as np
import torch
from torch import nn
from doctr.io.elements import Document
from doctr.models._utils import estimate_orientation, get_language
from doctr.models.detection.predictor import DetectionPredictor
from doctr.models.recognition.predictor import RecognitionPredictor
from doctr.utils.geometry import rotate_boxes, rotate_image
from .base import _OCRPredictor
__all__ = ["OCRPredictor"]
class OCRPredictor(nn.Module, _OCRPredictor):
"""Implements an object able to localize and identify text elements in a set of documents
Args:
det_predictor: detection module
reco_predictor: recognition module
assume_straight_pages: if True, speeds up the inference by assuming you only pass straight pages
without rotated textual elements.
straighten_pages: if True, estimates the page general orientation based on the median line orientation.
Then, rotates page before passing it to the deep learning modules. The final predictions will be remapped
accordingly. Doing so will improve performances for documents with page-uniform rotations.
detect_orientation: if True, the estimated general page orientation will be added to the predictions for each
page. Doing so will slightly deteriorate the overall latency.
detect_language: if True, the language prediction will be added to the predictions for each
page. Doing so will slightly deteriorate the overall latency.
kwargs: keyword args of `DocumentBuilder`
"""
def __init__(
self,
det_predictor: DetectionPredictor,
reco_predictor: RecognitionPredictor,
assume_straight_pages: bool = True,
straighten_pages: bool = False,
preserve_aspect_ratio: bool = False,
symmetric_pad: bool = True,
detect_orientation: bool = False,
detect_language: bool = False,
**kwargs: Any,
) -> None:
nn.Module.__init__(self)
self.det_predictor = det_predictor.eval() # type: ignore[attr-defined]
self.reco_predictor = reco_predictor.eval() # type: ignore[attr-defined]
_OCRPredictor.__init__(
self, assume_straight_pages, straighten_pages, preserve_aspect_ratio, symmetric_pad, **kwargs
)
self.detect_orientation = detect_orientation
self.detect_language = detect_language
@torch.no_grad()
def forward(
self,
pages: List[Union[np.ndarray, torch.Tensor]],
**kwargs: Any,
) -> Document:
# Dimension check
if any(page.ndim != 3 for page in pages):
raise ValueError("incorrect input shape: all pages are expected to be multi-channel 2D images.")
origin_page_shapes = [page.shape[:2] if isinstance(page, np.ndarray) else page.shape[-2:] for page in pages]
# Detect document rotation and rotate pages
if self.detect_orientation:
origin_page_orientations = [estimate_orientation(page) for page in pages] # type: ignore[arg-type]
orientations = [
{"value": orientation_page, "confidence": 1.0} for orientation_page in origin_page_orientations
]
else:
orientations = None
if self.straighten_pages:
origin_page_orientations = (
origin_page_orientations
if self.detect_orientation
else [estimate_orientation(page) for page in pages] # type: ignore[arg-type]
)
pages = [
rotate_image(page, -angle, expand=True) # type: ignore[arg-type]
for page, angle in zip(pages, origin_page_orientations)
]
# Localize text elements
loc_preds = self.det_predictor(pages, **kwargs)
assert all(
len(loc_pred) == 1 for loc_pred in loc_preds
), "Detection Model in ocr_predictor should output only one class"
loc_preds = [list(loc_pred.values())[0] for loc_pred in loc_preds]
# Check whether crop mode should be switched to channels first
channels_last = len(pages) == 0 or isinstance(pages[0], np.ndarray)
# Rectify crops if aspect ratio
loc_preds = self._remove_padding(pages, loc_preds) # type: ignore[arg-type]
# Crop images
crops, loc_preds = self._prepare_crops(
pages, # type: ignore[arg-type]
loc_preds,
channels_last=channels_last,
assume_straight_pages=self.assume_straight_pages,
)
# Rectify crop orientation
if not self.assume_straight_pages:
crops, loc_preds = self._rectify_crops(crops, loc_preds)
# Identify character sequences
word_preds = self.reco_predictor([crop for page_crops in crops for crop in page_crops], **kwargs)
boxes, text_preds = self._process_predictions(loc_preds, word_preds)
if self.detect_language:
languages = [get_language(" ".join([item[0] for item in text_pred])) for text_pred in text_preds]
languages_dict = [{"value": lang[0], "confidence": lang[1]} for lang in languages]
else:
languages_dict = None
# Rotate back pages and boxes while keeping original image size
if self.straighten_pages:
boxes = [
rotate_boxes(
page_boxes,
angle,
orig_shape=page.shape[:2]
if isinstance(page, np.ndarray)
else page.shape[1:], # type: ignore[arg-type]
target_shape=mask, # type: ignore[arg-type]
)
for page_boxes, page, angle, mask in zip(boxes, pages, origin_page_orientations, origin_page_shapes)
]
out = self.doc_builder(
boxes,
text_preds,
[page.shape[:2] if channels_last else page.shape[-2:] for page in pages], # type: ignore[misc]
orientations,
languages_dict,
)
return out
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, List, Optional, Tuple
import numpy as np
from doctr.models.builder import DocumentBuilder
from doctr.utils.geometry import extract_crops, extract_rcrops
from .._utils import rectify_crops, rectify_loc_preds
from ..classification import crop_orientation_predictor
from ..classification.predictor import CropOrientationPredictor
__all__ = ["_OCRPredictor"]
class _OCRPredictor:
"""Implements an object able to localize and identify text elements in a set of documents
Args:
assume_straight_pages: if True, speeds up the inference by assuming you only pass straight pages
without rotated textual elements.
straighten_pages: if True, estimates the page general orientation based on the median line orientation.
Then, rotates page before passing it to the deep learning modules. The final predictions will be remapped
accordingly. Doing so will improve performances for documents with page-uniform rotations.
preserve_aspect_ratio: if True, resize preserving the aspect ratio (with padding)
symmetric_pad: if True and preserve_aspect_ratio is True, pas the image symmetrically.
kwargs: keyword args of `DocumentBuilder`
"""
crop_orientation_predictor: Optional[CropOrientationPredictor]
def __init__(
self,
assume_straight_pages: bool = True,
straighten_pages: bool = False,
preserve_aspect_ratio: bool = True,
symmetric_pad: bool = True,
**kwargs: Any,
) -> None:
self.assume_straight_pages = assume_straight_pages
self.straighten_pages = straighten_pages
self.crop_orientation_predictor = None if assume_straight_pages else crop_orientation_predictor(pretrained=True)
self.doc_builder = DocumentBuilder(**kwargs)
self.preserve_aspect_ratio = preserve_aspect_ratio
self.symmetric_pad = symmetric_pad
@staticmethod
def _generate_crops(
pages: List[np.ndarray],
loc_preds: List[np.ndarray],
channels_last: bool,
assume_straight_pages: bool = False,
) -> List[List[np.ndarray]]:
extraction_fn = extract_crops if assume_straight_pages else extract_rcrops
crops = [
extraction_fn(page, _boxes[:, :4], channels_last=channels_last) # type: ignore[operator]
for page, _boxes in zip(pages, loc_preds)
]
return crops
@staticmethod
def _prepare_crops(
pages: List[np.ndarray],
loc_preds: List[np.ndarray],
channels_last: bool,
assume_straight_pages: bool = False,
) -> Tuple[List[List[np.ndarray]], List[np.ndarray]]:
crops = _OCRPredictor._generate_crops(pages, loc_preds, channels_last, assume_straight_pages)
# Avoid sending zero-sized crops
is_kept = [[all(s > 0 for s in crop.shape) for crop in page_crops] for page_crops in crops]
crops = [
[crop for crop, _kept in zip(page_crops, page_kept) if _kept]
for page_crops, page_kept in zip(crops, is_kept)
]
loc_preds = [_boxes[_kept] for _boxes, _kept in zip(loc_preds, is_kept)]
return crops, loc_preds
def _rectify_crops(
self,
crops: List[List[np.ndarray]],
loc_preds: List[np.ndarray],
) -> Tuple[List[List[np.ndarray]], List[np.ndarray]]:
# Work at a page level
orientations = [self.crop_orientation_predictor(page_crops) for page_crops in crops] # type: ignore[misc]
rect_crops = [rectify_crops(page_crops, orientation) for page_crops, orientation in zip(crops, orientations)]
rect_loc_preds = [
rectify_loc_preds(page_loc_preds, orientation) if len(page_loc_preds) > 0 else page_loc_preds
for page_loc_preds, orientation in zip(loc_preds, orientations)
]
return rect_crops, rect_loc_preds # type: ignore[return-value]
def _remove_padding(
self,
pages: List[np.ndarray],
loc_preds: List[np.ndarray],
) -> List[np.ndarray]:
if self.preserve_aspect_ratio:
# Rectify loc_preds to remove padding
rectified_preds = []
for page, loc_pred in zip(pages, loc_preds):
h, w = page.shape[0], page.shape[1]
if h > w:
# y unchanged, dilate x coord
if self.symmetric_pad:
if self.assume_straight_pages:
loc_pred[:, [0, 2]] = np.clip((loc_pred[:, [0, 2]] - 0.5) * h / w + 0.5, 0, 1)
else:
loc_pred[:, :, 0] = np.clip((loc_pred[:, :, 0] - 0.5) * h / w + 0.5, 0, 1)
else:
if self.assume_straight_pages:
loc_pred[:, [0, 2]] *= h / w
else:
loc_pred[:, :, 0] *= h / w
elif w > h:
# x unchanged, dilate y coord
if self.symmetric_pad:
if self.assume_straight_pages:
loc_pred[:, [1, 3]] = np.clip((loc_pred[:, [1, 3]] - 0.5) * w / h + 0.5, 0, 1)
else:
loc_pred[:, :, 1] = np.clip((loc_pred[:, :, 1] - 0.5) * w / h + 0.5, 0, 1)
else:
if self.assume_straight_pages:
loc_pred[:, [1, 3]] *= w / h
else:
loc_pred[:, :, 1] *= w / h
rectified_preds.append(loc_pred)
return rectified_preds
return loc_preds
@staticmethod
def _process_predictions(
loc_preds: List[np.ndarray],
word_preds: List[Tuple[str, float]],
) -> Tuple[List[np.ndarray], List[List[Tuple[str, float]]]]:
text_preds = []
if len(loc_preds) > 0:
# Text
_idx = 0
for page_boxes in loc_preds:
text_preds.append(word_preds[_idx : _idx + page_boxes.shape[0]])
_idx += page_boxes.shape[0]
return loc_preds, text_preds
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import math
from typing import Any, List, Tuple, Union
import numpy as np
import tensorflow as tf
from doctr.transforms import Normalize, Resize
from doctr.utils.multithreading import multithread_exec
from doctr.utils.repr import NestedObject
__all__ = ["PreProcessor"]
class PreProcessor(NestedObject):
"""Implements an abstract preprocessor object which performs casting, resizing, batching and normalization.
Args:
output_size: expected size of each page in format (H, W)
batch_size: the size of page batches
mean: mean value of the training distribution by channel
std: standard deviation of the training distribution by channel
"""
_children_names: List[str] = ["resize", "normalize"]
def __init__(
self,
output_size: Tuple[int, int],
batch_size: int,
mean: Tuple[float, float, float] = (0.5, 0.5, 0.5),
std: Tuple[float, float, float] = (1.0, 1.0, 1.0),
fp16: bool = False,
**kwargs: Any,
) -> None:
self.batch_size = batch_size
self.resize = Resize(output_size, **kwargs)
# Perform the division by 255 at the same time
self.normalize = Normalize(mean, std)
def batch_inputs(self, samples: List[tf.Tensor]) -> List[tf.Tensor]:
"""Gather samples into batches for inference purposes
Args:
samples: list of samples (tf.Tensor)
Returns:
list of batched samples
"""
num_batches = int(math.ceil(len(samples) / self.batch_size))
batches = [
tf.stack(samples[idx * self.batch_size : min((idx + 1) * self.batch_size, len(samples))], axis=0)
for idx in range(int(num_batches))
]
return batches
def sample_transforms(self, x: Union[np.ndarray, tf.Tensor]) -> tf.Tensor:
if x.ndim != 3:
raise AssertionError("expected list of 3D Tensors")
if isinstance(x, np.ndarray):
if x.dtype not in (np.uint8, np.float32):
raise TypeError("unsupported data type for numpy.ndarray")
x = tf.convert_to_tensor(x)
elif x.dtype not in (tf.uint8, tf.float16, tf.float32):
raise TypeError("unsupported data type for torch.Tensor")
# Data type & 255 division
if x.dtype == tf.uint8:
x = tf.image.convert_image_dtype(x, dtype=tf.float32)
# Resizing
x = self.resize(x)
return x
def __call__(self, x: Union[tf.Tensor, np.ndarray, List[Union[tf.Tensor, np.ndarray]]]) -> List[tf.Tensor]:
"""Prepare document data for model forwarding
Args:
x: list of images (np.array) or tensors (already resized and batched)
Returns:
list of page batches
"""
# Input type check
if isinstance(x, (np.ndarray, tf.Tensor)):
if x.ndim != 4:
raise AssertionError("expected 4D Tensor")
if isinstance(x, np.ndarray):
if x.dtype not in (np.uint8, np.float32):
raise TypeError("unsupported data type for numpy.ndarray")
x = tf.convert_to_tensor(x)
elif x.dtype not in (tf.uint8, tf.float16, tf.float32):
raise TypeError("unsupported data type for torch.Tensor")
# Data type & 255 division
if x.dtype == tf.uint8:
x = tf.image.convert_image_dtype(x, dtype=tf.float32)
# Resizing
if (x.shape[1], x.shape[2]) != self.resize.output_size:
x = tf.image.resize(x, self.resize.output_size, method=self.resize.method)
batches = [x]
elif isinstance(x, list) and all(isinstance(sample, (np.ndarray, tf.Tensor)) for sample in x):
# Sample transform (to tensor, resize)
samples = list(multithread_exec(self.sample_transforms, x))
# Batching
batches = self.batch_inputs(samples)
else:
raise TypeError(f"invalid input type: {type(x)}")
# Batch transforms (normalize)
batches = list(multithread_exec(self.normalize, batches))
return batches
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import math
from typing import Any, List, Tuple, Union
import numpy as np
import torch
from torch import nn
from torchvision.transforms import functional as F
from torchvision.transforms import transforms as T
from doctr.transforms import Resize
from doctr.utils.multithreading import multithread_exec
__all__ = ["PreProcessor"]
class PreProcessor(nn.Module):
"""Implements an abstract preprocessor object which performs casting, resizing, batching and normalization.
Args:
output_size: expected size of each page in format (H, W)
batch_size: the size of page batches
mean: mean value of the training distribution by channel
std: standard deviation of the training distribution by channel
"""
def __init__(
self,
output_size: Tuple[int, int],
batch_size: int,
mean: Tuple[float, float, float] = (0.5, 0.5, 0.5),
std: Tuple[float, float, float] = (1.0, 1.0, 1.0),
fp16: bool = False,
**kwargs: Any,
) -> None:
super().__init__()
self.batch_size = batch_size
self.resize: T.Resize = Resize(output_size, **kwargs)
# Perform the division by 255 at the same time
self.normalize = T.Normalize(mean, std)
def batch_inputs(self, samples: List[torch.Tensor]) -> List[torch.Tensor]:
"""Gather samples into batches for inference purposes
Args:
samples: list of samples of shape (C, H, W)
Returns:
list of batched samples (*, C, H, W)
"""
num_batches = int(math.ceil(len(samples) / self.batch_size))
batches = [
torch.stack(samples[idx * self.batch_size : min((idx + 1) * self.batch_size, len(samples))], dim=0)
for idx in range(int(num_batches))
]
return batches
def sample_transforms(self, x: Union[np.ndarray, torch.Tensor]) -> torch.Tensor:
if x.ndim != 3:
raise AssertionError("expected list of 3D Tensors")
if isinstance(x, np.ndarray):
if x.dtype not in (np.uint8, np.float32):
raise TypeError("unsupported data type for numpy.ndarray")
x = torch.from_numpy(x.copy()).permute(2, 0, 1)
elif x.dtype not in (torch.uint8, torch.float16, torch.float32):
raise TypeError("unsupported data type for torch.Tensor")
# Resizing
x = self.resize(x)
# Data type
if x.dtype == torch.uint8:
x = x.to(dtype=torch.float32).div(255).clip(0, 1) # type: ignore[union-attr]
else:
x = x.to(dtype=torch.float32) # type: ignore[union-attr]
return x
def __call__(self, x: Union[torch.Tensor, np.ndarray, List[Union[torch.Tensor, np.ndarray]]]) -> List[torch.Tensor]:
"""Prepare document data for model forwarding
Args:
x: list of images (np.array) or tensors (already resized and batched)
Returns:
list of page batches
"""
# Input type check
if isinstance(x, (np.ndarray, torch.Tensor)):
if x.ndim != 4:
raise AssertionError("expected 4D Tensor")
if isinstance(x, np.ndarray):
if x.dtype not in (np.uint8, np.float32):
raise TypeError("unsupported data type for numpy.ndarray")
x = torch.from_numpy(x.copy()).permute(0, 3, 1, 2)
elif x.dtype not in (torch.uint8, torch.float16, torch.float32):
raise TypeError("unsupported data type for torch.Tensor")
# Resizing
if x.shape[-2] != self.resize.size[0] or x.shape[-1] != self.resize.size[1]:
x = F.resize(x, self.resize.size, interpolation=self.resize.interpolation)
# Data type
if x.dtype == torch.uint8: # type: ignore[union-attr]
x = x.to(dtype=torch.float32).div(255).clip(0, 1) # type: ignore[union-attr]
else:
x = x.to(dtype=torch.float32) # type: ignore[union-attr]
batches = [x]
elif isinstance(x, list) and all(isinstance(sample, (np.ndarray, torch.Tensor)) for sample in x):
# Sample transform (to tensor, resize)
samples = list(multithread_exec(self.sample_transforms, x))
# Batching
batches = self.batch_inputs(samples)
else:
raise TypeError(f"invalid input type: {type(x)}")
# Batch transforms (normalize)
batches = list(multithread_exec(self.normalize, batches))
return batches
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, List
from doctr.file_utils import is_tf_available, is_torch_available
from .. import detection
from ..preprocessor import PreProcessor
from .predictor import DetectionPredictor
__all__ = ["detection_predictor"]
ARCHS: List[str]
ROT_ARCHS: List[str]
if is_tf_available():
ARCHS = ["db_resnet50", "db_mobilenet_v3_large", "linknet_resnet18", "linknet_resnet34", "linknet_resnet50"]
ROT_ARCHS = ["linknet_resnet18_rotation"]
elif is_torch_available():
ARCHS = [
"db_resnet34",
"db_resnet50",
"db_mobilenet_v3_large",
"linknet_resnet18",
"linknet_resnet34",
"linknet_resnet50",
]
ROT_ARCHS = ["db_resnet50_rotation"]
def _predictor(arch: Any, pretrained: bool, assume_straight_pages: bool = True, **kwargs: Any) -> DetectionPredictor:
if isinstance(arch, str):
if arch not in ARCHS + ROT_ARCHS:
raise ValueError(f"unknown architecture '{arch}'")
if arch not in ROT_ARCHS and not assume_straight_pages:
raise AssertionError(
"You are trying to use a model trained on straight pages while not assuming"
" your pages are straight. If you have only straight documents, don't pass"
" assume_straight_pages=False, otherwise you should use one of these archs:"
f"{ROT_ARCHS}"
)
_model = detection.__dict__[arch](
pretrained=pretrained,
pretrained_backbone=kwargs.get("pretrained_backbone", True),
assume_straight_pages=assume_straight_pages,
)
else:
if not isinstance(arch, (detection.DBNet, detection.LinkNet)):
raise ValueError(f"unknown architecture: {type(arch)}")
_model = arch
_model.assume_straight_pages = assume_straight_pages
kwargs.pop("pretrained_backbone", None)
kwargs["mean"] = kwargs.get("mean", _model.cfg["mean"])
kwargs["std"] = kwargs.get("std", _model.cfg["std"])
kwargs["batch_size"] = kwargs.get("batch_size", 1)
predictor = DetectionPredictor(
PreProcessor(_model.cfg["input_shape"][:-1] if is_tf_available() else _model.cfg["input_shape"][1:], **kwargs),
_model,
)
return predictor
def detection_predictor(
arch: Any = "db_resnet50", pretrained: bool = False, assume_straight_pages: bool = True, **kwargs: Any
) -> DetectionPredictor:
"""Text detection architecture.
>>> import numpy as np
>>> from doctr.models import detection_predictor
>>> model = detection_predictor(arch='db_resnet50', pretrained=True)
>>> input_page = (255 * np.random.rand(600, 800, 3)).astype(np.uint8)
>>> out = model([input_page])
Args:
arch: name of the architecture or model itself to use (e.g. 'db_resnet50')
pretrained: If True, returns a model pre-trained on our text detection dataset
assume_straight_pages: If True, fit straight boxes to the page
Returns:
Detection predictor
"""
return _predictor(arch, pretrained, assume_straight_pages, **kwargs)
|
from .differentiable_binarization import *
from .linknet import *
from .zoo import *
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import List
import cv2
import numpy as np
from doctr.utils.repr import NestedObject
__all__ = ["DetectionPostProcessor"]
class DetectionPostProcessor(NestedObject):
"""Abstract class to postprocess the raw output of the model
Args:
box_thresh (float): minimal objectness score to consider a box
bin_thresh (float): threshold to apply to segmentation raw heatmap
assume straight_pages (bool): if True, fit straight boxes only
"""
def __init__(self, box_thresh: float = 0.5, bin_thresh: float = 0.5, assume_straight_pages: bool = True) -> None:
self.box_thresh = box_thresh
self.bin_thresh = bin_thresh
self.assume_straight_pages = assume_straight_pages
self._opening_kernel: np.ndarray = np.ones((3, 3), dtype=np.uint8)
def extra_repr(self) -> str:
return f"bin_thresh={self.bin_thresh}, box_thresh={self.box_thresh}"
@staticmethod
def box_score(pred: np.ndarray, points: np.ndarray, assume_straight_pages: bool = True) -> float:
"""Compute the confidence score for a polygon : mean of the p values on the polygon
Args:
pred (np.ndarray): p map returned by the model
Returns:
polygon objectness
"""
h, w = pred.shape[:2]
if assume_straight_pages:
xmin = np.clip(np.floor(points[:, 0].min()).astype(np.int32), 0, w - 1)
xmax = np.clip(np.ceil(points[:, 0].max()).astype(np.int32), 0, w - 1)
ymin = np.clip(np.floor(points[:, 1].min()).astype(np.int32), 0, h - 1)
ymax = np.clip(np.ceil(points[:, 1].max()).astype(np.int32), 0, h - 1)
return pred[ymin : ymax + 1, xmin : xmax + 1].mean()
else:
mask: np.ndarray = np.zeros((h, w), np.int32)
cv2.fillPoly(mask, [points.astype(np.int32)], 1.0)
product = pred * mask
return np.sum(product) / np.count_nonzero(product)
def bitmap_to_boxes(
self,
pred: np.ndarray,
bitmap: np.ndarray,
) -> np.ndarray:
raise NotImplementedError
def __call__(
self,
proba_map,
) -> List[List[np.ndarray]]:
"""Performs postprocessing for a list of model outputs
Args:
proba_map: probability map of shape (N, H, W, C)
Returns:
list of N class predictions (for each input sample), where each class predictions is a list of C tensors
of shape (*, 5) or (*, 6)
"""
if proba_map.ndim != 4:
raise AssertionError(f"arg `proba_map` is expected to be 4-dimensional, got {proba_map.ndim}.")
# Erosion + dilation on the binary map
bin_map = [
[
cv2.morphologyEx(bmap[..., idx], cv2.MORPH_OPEN, self._opening_kernel)
for idx in range(proba_map.shape[-1])
]
for bmap in (proba_map >= self.bin_thresh).astype(np.uint8)
]
return [
[self.bitmap_to_boxes(pmaps[..., idx], bmaps[idx]) for idx in range(proba_map.shape[-1])]
for pmaps, bmaps in zip(proba_map, bin_map)
]
|
from doctr.file_utils import is_tf_available
if is_tf_available():
from .tensorflow import *
else:
from .pytorch import *
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import tensorflow as tf
__all__ = ["erode", "dilate"]
def erode(x: tf.Tensor, kernel_size: int) -> tf.Tensor:
"""Performs erosion on a given tensor
Args:
x: boolean tensor of shape (N, H, W, C)
kernel_size: the size of the kernel to use for erosion
Returns:
the eroded tensor
"""
return 1 - tf.nn.max_pool2d(1 - x, kernel_size, strides=1, padding="SAME")
def dilate(x: tf.Tensor, kernel_size: int) -> tf.Tensor:
"""Performs dilation on a given tensor
Args:
x: boolean tensor of shape (N, H, W, C)
kernel_size: the size of the kernel to use for dilation
Returns:
the dilated tensor
"""
return tf.nn.max_pool2d(x, kernel_size, strides=1, padding="SAME")
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from torch import Tensor
from torch.nn.functional import max_pool2d
__all__ = ["erode", "dilate"]
def erode(x: Tensor, kernel_size: int) -> Tensor:
"""Performs erosion on a given tensor
Args:
x: boolean tensor of shape (N, C, H, W)
kernel_size: the size of the kernel to use for erosion
Returns:
the eroded tensor
"""
_pad = (kernel_size - 1) // 2
return 1 - max_pool2d(1 - x, kernel_size, stride=1, padding=_pad)
def dilate(x: Tensor, kernel_size: int) -> Tensor:
"""Performs dilation on a given tensor
Args:
x: boolean tensor of shape (N, C, H, W)
kernel_size: the size of the kernel to use for dilation
Returns:
the dilated tensor
"""
_pad = (kernel_size - 1) // 2
return max_pool2d(x, kernel_size, stride=1, padding=_pad)
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
# Credits: post-processing adapted from https://github.com/xuannianz/DifferentiableBinarization
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.applications import ResNet50
from doctr.file_utils import CLASS_NAME
from doctr.models.utils import IntermediateLayerGetter, conv_sequence, load_pretrained_params
from doctr.utils.repr import NestedObject
from ...classification import mobilenet_v3_large
from .base import DBPostProcessor, _DBNet
__all__ = ["DBNet", "db_resnet50", "db_mobilenet_v3_large"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"db_resnet50": {
"mean": (0.798, 0.785, 0.772),
"std": (0.264, 0.2749, 0.287),
"input_shape": (1024, 1024, 3),
"url": "https://doctr-static.mindee.com/models?id=v0.2.0/db_resnet50-adcafc63.zip&src=0",
},
"db_mobilenet_v3_large": {
"mean": (0.798, 0.785, 0.772),
"std": (0.264, 0.2749, 0.287),
"input_shape": (1024, 1024, 3),
"url": "https://doctr-static.mindee.com/models?id=v0.3.1/db_mobilenet_v3_large-8c16d5bf.zip&src=0",
},
}
class FeaturePyramidNetwork(layers.Layer, NestedObject):
"""Feature Pyramid Network as described in `"Feature Pyramid Networks for Object Detection"
<https://arxiv.org/pdf/1612.03144.pdf>`_.
Args:
channels: number of channel to output
"""
def __init__(
self,
channels: int,
) -> None:
super().__init__()
self.channels = channels
self.upsample = layers.UpSampling2D(size=(2, 2), interpolation="nearest")
self.inner_blocks = [layers.Conv2D(channels, 1, strides=1, kernel_initializer="he_normal") for _ in range(4)]
self.layer_blocks = [self.build_upsampling(channels, dilation_factor=2**idx) for idx in range(4)]
@staticmethod
def build_upsampling(
channels: int,
dilation_factor: int = 1,
) -> layers.Layer:
"""Module which performs a 3x3 convolution followed by up-sampling
Args:
channels: number of output channels
dilation_factor (int): dilation factor to scale the convolution output before concatenation
Returns:
a keras.layers.Layer object, wrapping these operations in a sequential module
"""
_layers = conv_sequence(channels, "relu", True, kernel_size=3)
if dilation_factor > 1:
_layers.append(layers.UpSampling2D(size=(dilation_factor, dilation_factor), interpolation="nearest"))
module = keras.Sequential(_layers)
return module
def extra_repr(self) -> str:
return f"channels={self.channels}"
def call(
self,
x: List[tf.Tensor],
**kwargs: Any,
) -> tf.Tensor:
# Channel mapping
results = [block(fmap, **kwargs) for block, fmap in zip(self.inner_blocks, x)]
# Upsample & sum
for idx in range(len(results) - 1, -1):
results[idx] += self.upsample(results[idx + 1])
# Conv & upsample
results = [block(fmap, **kwargs) for block, fmap in zip(self.layer_blocks, results)]
return layers.concatenate(results)
class DBNet(_DBNet, keras.Model, NestedObject):
"""DBNet as described in `"Real-time Scene Text Detection with Differentiable Binarization"
<https://arxiv.org/pdf/1911.08947.pdf>`_.
Args:
feature extractor: the backbone serving as feature extractor
fpn_channels: number of channels each extracted feature maps is mapped to
assume_straight_pages: if True, fit straight bounding boxes only
exportable: onnx exportable returns only logits
cfg: the configuration dict of the model
class_names: list of class names
"""
_children_names: List[str] = ["feat_extractor", "fpn", "probability_head", "threshold_head", "postprocessor"]
def __init__(
self,
feature_extractor: IntermediateLayerGetter,
fpn_channels: int = 128, # to be set to 256 to represent the author's initial idea
bin_thresh: float = 0.3,
assume_straight_pages: bool = True,
exportable: bool = False,
cfg: Optional[Dict[str, Any]] = None,
class_names: List[str] = [CLASS_NAME],
) -> None:
super().__init__()
self.class_names = class_names
num_classes: int = len(self.class_names)
self.cfg = cfg
self.feat_extractor = feature_extractor
self.exportable = exportable
self.assume_straight_pages = assume_straight_pages
self.fpn = FeaturePyramidNetwork(channels=fpn_channels)
# Initialize kernels
_inputs = [layers.Input(shape=in_shape[1:]) for in_shape in self.feat_extractor.output_shape]
output_shape = tuple(self.fpn(_inputs).shape)
self.probability_head = keras.Sequential(
[
*conv_sequence(64, "relu", True, kernel_size=3, input_shape=output_shape[1:]),
layers.Conv2DTranspose(64, 2, strides=2, use_bias=False, kernel_initializer="he_normal"),
layers.BatchNormalization(),
layers.Activation("relu"),
layers.Conv2DTranspose(num_classes, 2, strides=2, kernel_initializer="he_normal"),
]
)
self.threshold_head = keras.Sequential(
[
*conv_sequence(64, "relu", True, kernel_size=3, input_shape=output_shape[1:]),
layers.Conv2DTranspose(64, 2, strides=2, use_bias=False, kernel_initializer="he_normal"),
layers.BatchNormalization(),
layers.Activation("relu"),
layers.Conv2DTranspose(num_classes, 2, strides=2, kernel_initializer="he_normal"),
]
)
self.postprocessor = DBPostProcessor(assume_straight_pages=assume_straight_pages, bin_thresh=bin_thresh)
def compute_loss(
self,
out_map: tf.Tensor,
thresh_map: tf.Tensor,
target: List[Dict[str, np.ndarray]],
) -> tf.Tensor:
"""Compute a batch of gts, masks, thresh_gts, thresh_masks from a list of boxes
and a list of masks for each image. From there it computes the loss with the model output
Args:
out_map: output feature map of the model of shape (N, H, W, C)
thresh_map: threshold map of shape (N, H, W, C)
target: list of dictionary where each dict has a `boxes` and a `flags` entry
Returns:
A loss tensor
"""
prob_map = tf.math.sigmoid(out_map)
thresh_map = tf.math.sigmoid(thresh_map)
seg_target, seg_mask, thresh_target, thresh_mask = self.build_target(target, out_map.shape, True)
seg_target = tf.convert_to_tensor(seg_target, dtype=out_map.dtype)
seg_mask = tf.convert_to_tensor(seg_mask, dtype=tf.bool)
thresh_target = tf.convert_to_tensor(thresh_target, dtype=out_map.dtype)
thresh_mask = tf.convert_to_tensor(thresh_mask, dtype=tf.bool)
# Compute balanced BCE loss for proba_map
bce_scale = 5.0
bce_loss = tf.keras.losses.binary_crossentropy(
seg_target[..., None],
out_map[..., None],
from_logits=True,
)[seg_mask]
neg_target = 1 - seg_target[seg_mask]
positive_count = tf.math.reduce_sum(seg_target[seg_mask])
negative_count = tf.math.reduce_min([tf.math.reduce_sum(neg_target), 3.0 * positive_count])
negative_loss = bce_loss * neg_target
negative_loss, _ = tf.nn.top_k(negative_loss, tf.cast(negative_count, tf.int32))
sum_losses = tf.math.reduce_sum(bce_loss * seg_target[seg_mask]) + tf.math.reduce_sum(negative_loss)
balanced_bce_loss = sum_losses / (positive_count + negative_count + 1e-6)
# Compute dice loss for approxbin_map
bin_map = 1 / (1 + tf.exp(-50.0 * (prob_map[seg_mask] - thresh_map[seg_mask])))
bce_min = tf.math.reduce_min(bce_loss)
weights = (bce_loss - bce_min) / (tf.math.reduce_max(bce_loss) - bce_min) + 1.0
inter = tf.math.reduce_sum(bin_map * seg_target[seg_mask] * weights)
union = tf.math.reduce_sum(bin_map) + tf.math.reduce_sum(seg_target[seg_mask]) + 1e-8
dice_loss = 1 - 2.0 * inter / union
# Compute l1 loss for thresh_map
l1_scale = 10.0
if tf.reduce_any(thresh_mask):
l1_loss = tf.math.reduce_mean(tf.math.abs(thresh_map[thresh_mask] - thresh_target[thresh_mask]))
else:
l1_loss = tf.constant(0.0)
return l1_scale * l1_loss + bce_scale * balanced_bce_loss + dice_loss
def call(
self,
x: tf.Tensor,
target: Optional[List[Dict[str, np.ndarray]]] = None,
return_model_output: bool = False,
return_preds: bool = False,
**kwargs: Any,
) -> Dict[str, Any]:
feat_maps = self.feat_extractor(x, **kwargs)
feat_concat = self.fpn(feat_maps, **kwargs)
logits = self.probability_head(feat_concat, **kwargs)
out: Dict[str, tf.Tensor] = {}
if self.exportable:
out["logits"] = logits
return out
if return_model_output or target is None or return_preds:
prob_map = tf.math.sigmoid(logits)
if return_model_output:
out["out_map"] = prob_map
if target is None or return_preds:
# Post-process boxes (keep only text predictions)
out["preds"] = [dict(zip(self.class_names, preds)) for preds in self.postprocessor(prob_map.numpy())]
if target is not None:
thresh_map = self.threshold_head(feat_concat, **kwargs)
loss = self.compute_loss(logits, thresh_map, target)
out["loss"] = loss
return out
def _db_resnet(
arch: str,
pretrained: bool,
backbone_fn,
fpn_layers: List[str],
pretrained_backbone: bool = True,
input_shape: Optional[Tuple[int, int, int]] = None,
**kwargs: Any,
) -> DBNet:
pretrained_backbone = pretrained_backbone and not pretrained
# Patch the config
_cfg = deepcopy(default_cfgs[arch])
_cfg["input_shape"] = input_shape or _cfg["input_shape"]
if not kwargs.get("class_names", None):
kwargs["class_names"] = _cfg.get("class_names", [CLASS_NAME])
else:
kwargs["class_names"] = sorted(kwargs["class_names"])
# Feature extractor
feat_extractor = IntermediateLayerGetter(
backbone_fn(
weights="imagenet" if pretrained_backbone else None,
include_top=False,
pooling=None,
input_shape=_cfg["input_shape"],
),
fpn_layers,
)
# Build the model
model = DBNet(feat_extractor, cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, _cfg["url"])
return model
def _db_mobilenet(
arch: str,
pretrained: bool,
backbone_fn,
fpn_layers: List[str],
pretrained_backbone: bool = True,
input_shape: Optional[Tuple[int, int, int]] = None,
**kwargs: Any,
) -> DBNet:
pretrained_backbone = pretrained_backbone and not pretrained
# Patch the config
_cfg = deepcopy(default_cfgs[arch])
_cfg["input_shape"] = input_shape or _cfg["input_shape"]
# Feature extractor
feat_extractor = IntermediateLayerGetter(
backbone_fn(
input_shape=_cfg["input_shape"],
include_top=False,
pretrained=pretrained_backbone,
),
fpn_layers,
)
# Build the model
model = DBNet(feat_extractor, cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, _cfg["url"])
return model
def db_resnet50(pretrained: bool = False, **kwargs: Any) -> DBNet:
"""DBNet as described in `"Real-time Scene Text Detection with Differentiable Binarization"
<https://arxiv.org/pdf/1911.08947.pdf>`_, using a ResNet-50 backbone.
>>> import tensorflow as tf
>>> from doctr.models import db_resnet50
>>> model = db_resnet50(pretrained=True)
>>> input_tensor = tf.random.uniform(shape=[1, 1024, 1024, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _db_resnet(
"db_resnet50",
pretrained,
ResNet50,
["conv2_block3_out", "conv3_block4_out", "conv4_block6_out", "conv5_block3_out"],
**kwargs,
)
def db_mobilenet_v3_large(pretrained: bool = False, **kwargs: Any) -> DBNet:
"""DBNet as described in `"Real-time Scene Text Detection with Differentiable Binarization"
<https://arxiv.org/pdf/1911.08947.pdf>`_, using a mobilenet v3 large backbone.
>>> import tensorflow as tf
>>> from doctr.models import db_mobilenet_v3_large
>>> model = db_mobilenet_v3_large(pretrained=True)
>>> input_tensor = tf.random.uniform(shape=[1, 1024, 1024, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _db_mobilenet(
"db_mobilenet_v3_large",
pretrained,
mobilenet_v3_large,
["inverted_2", "inverted_5", "inverted_11", "final_block"],
**kwargs,
)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, Callable, Dict, List, Optional
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.models import resnet34, resnet50
from torchvision.models._utils import IntermediateLayerGetter
from torchvision.ops.deform_conv import DeformConv2d
from doctr.file_utils import CLASS_NAME
from ...classification import mobilenet_v3_large
from ...utils import load_pretrained_params
from .base import DBPostProcessor, _DBNet
__all__ = ["DBNet", "db_resnet50", "db_resnet34", "db_mobilenet_v3_large", "db_resnet50_rotation"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"db_resnet50": {
"input_shape": (3, 1024, 1024),
"mean": (0.798, 0.785, 0.772),
"std": (0.264, 0.2749, 0.287),
"url": "https://doctr-static.mindee.com/models?id=v0.3.1/db_resnet50-ac60cadc.pt&src=0",
},
"db_resnet34": {
"input_shape": (3, 1024, 1024),
"mean": (0.5, 0.5, 0.5),
"std": (1.0, 1.0, 1.0),
"url": None,
},
"db_mobilenet_v3_large": {
"input_shape": (3, 1024, 1024),
"mean": (0.798, 0.785, 0.772),
"std": (0.264, 0.2749, 0.287),
"url": "https://doctr-static.mindee.com/models?id=v0.3.1/db_mobilenet_v3_large-fd62154b.pt&src=0",
},
"db_resnet50_rotation": {
"input_shape": (3, 1024, 1024),
"mean": (0.798, 0.785, 0.772),
"std": (0.264, 0.2749, 0.287),
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/db_resnet50-1138863a.pt&src=0",
},
}
class FeaturePyramidNetwork(nn.Module):
def __init__(
self,
in_channels: List[int],
out_channels: int,
deform_conv: bool = False,
) -> None:
super().__init__()
out_chans = out_channels // len(in_channels)
conv_layer = DeformConv2d if deform_conv else nn.Conv2d
self.in_branches = nn.ModuleList(
[
nn.Sequential(
conv_layer(chans, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
for idx, chans in enumerate(in_channels)
]
)
self.upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True)
self.out_branches = nn.ModuleList(
[
nn.Sequential(
conv_layer(out_channels, out_chans, 3, padding=1, bias=False),
nn.BatchNorm2d(out_chans),
nn.ReLU(inplace=True),
nn.Upsample(scale_factor=2**idx, mode="bilinear", align_corners=True),
)
for idx, chans in enumerate(in_channels)
]
)
def forward(self, x: List[torch.Tensor]) -> torch.Tensor:
if len(x) != len(self.out_branches):
raise AssertionError
# Conv1x1 to get the same number of channels
_x: List[torch.Tensor] = [branch(t) for branch, t in zip(self.in_branches, x)]
out: List[torch.Tensor] = [_x[-1]]
for t in _x[:-1][::-1]:
out.append(self.upsample(out[-1]) + t)
# Conv and final upsampling
out = [branch(t) for branch, t in zip(self.out_branches, out[::-1])]
return torch.cat(out, dim=1)
class DBNet(_DBNet, nn.Module):
"""DBNet as described in `"Real-time Scene Text Detection with Differentiable Binarization"
<https://arxiv.org/pdf/1911.08947.pdf>`_.
Args:
feature extractor: the backbone serving as feature extractor
head_chans: the number of channels in the head
deform_conv: whether to use deformable convolution
assume_straight_pages: if True, fit straight bounding boxes only
exportable: onnx exportable returns only logits
cfg: the configuration dict of the model
class_names: list of class names
"""
def __init__(
self,
feat_extractor: IntermediateLayerGetter,
head_chans: int = 256,
deform_conv: bool = False,
bin_thresh: float = 0.3,
assume_straight_pages: bool = True,
exportable: bool = False,
cfg: Optional[Dict[str, Any]] = None,
class_names: List[str] = [CLASS_NAME],
) -> None:
super().__init__()
self.class_names = class_names
num_classes: int = len(self.class_names)
self.cfg = cfg
conv_layer = DeformConv2d if deform_conv else nn.Conv2d
self.exportable = exportable
self.assume_straight_pages = assume_straight_pages
self.feat_extractor = feat_extractor
# Identify the number of channels for the head initialization
_is_training = self.feat_extractor.training
self.feat_extractor = self.feat_extractor.eval()
with torch.no_grad():
out = self.feat_extractor(torch.zeros((1, 3, 224, 224)))
fpn_channels = [v.shape[1] for _, v in out.items()]
if _is_training:
self.feat_extractor = self.feat_extractor.train()
self.fpn = FeaturePyramidNetwork(fpn_channels, head_chans, deform_conv)
# Conv1 map to channels
self.prob_head = nn.Sequential(
conv_layer(head_chans, head_chans // 4, 3, padding=1, bias=False),
nn.BatchNorm2d(head_chans // 4),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(head_chans // 4, head_chans // 4, 2, stride=2, bias=False),
nn.BatchNorm2d(head_chans // 4),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(head_chans // 4, num_classes, 2, stride=2),
)
self.thresh_head = nn.Sequential(
conv_layer(head_chans, head_chans // 4, 3, padding=1, bias=False),
nn.BatchNorm2d(head_chans // 4),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(head_chans // 4, head_chans // 4, 2, stride=2, bias=False),
nn.BatchNorm2d(head_chans // 4),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(head_chans // 4, num_classes, 2, stride=2),
)
self.postprocessor = DBPostProcessor(assume_straight_pages=assume_straight_pages, bin_thresh=bin_thresh)
for n, m in self.named_modules():
# Don't override the initialization of the backbone
if n.startswith("feat_extractor."):
continue
if isinstance(m, (nn.Conv2d, DeformConv2d)):
nn.init.kaiming_normal_(m.weight.data, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
def forward(
self,
x: torch.Tensor,
target: Optional[List[np.ndarray]] = None,
return_model_output: bool = False,
return_preds: bool = False,
) -> Dict[str, torch.Tensor]:
# Extract feature maps at different stages
feats = self.feat_extractor(x)
feats = [feats[str(idx)] for idx in range(len(feats))]
# Pass through the FPN
feat_concat = self.fpn(feats)
logits = self.prob_head(feat_concat)
out: Dict[str, Any] = {}
if self.exportable:
out["logits"] = logits
return out
if return_model_output or target is None or return_preds:
prob_map = torch.sigmoid(logits)
if return_model_output:
out["out_map"] = prob_map
if target is None or return_preds:
# Post-process boxes (keep only text predictions)
out["preds"] = [
dict(zip(self.class_names, preds))
for preds in self.postprocessor(prob_map.detach().cpu().permute((0, 2, 3, 1)).numpy())
]
if target is not None:
thresh_map = self.thresh_head(feat_concat)
loss = self.compute_loss(logits, thresh_map, target)
out["loss"] = loss
return out
def compute_loss(self, out_map: torch.Tensor, thresh_map: torch.Tensor, target: List[np.ndarray]) -> torch.Tensor:
"""Compute a batch of gts, masks, thresh_gts, thresh_masks from a list of boxes
and a list of masks for each image. From there it computes the loss with the model output
Args:
out_map: output feature map of the model of shape (N, C, H, W)
thresh_map: threshold map of shape (N, C, H, W)
target: list of dictionary where each dict has a `boxes` and a `flags` entry
Returns:
A loss tensor
"""
prob_map = torch.sigmoid(out_map)
thresh_map = torch.sigmoid(thresh_map)
targets = self.build_target(target, prob_map.shape, False) # type: ignore[arg-type]
seg_target, seg_mask = torch.from_numpy(targets[0]), torch.from_numpy(targets[1])
seg_target, seg_mask = seg_target.to(out_map.device), seg_mask.to(out_map.device)
thresh_target, thresh_mask = torch.from_numpy(targets[2]), torch.from_numpy(targets[3])
thresh_target, thresh_mask = thresh_target.to(out_map.device), thresh_mask.to(out_map.device)
# Compute balanced BCE loss for proba_map
bce_scale = 5.0
balanced_bce_loss = torch.zeros(1, device=out_map.device)
dice_loss = torch.zeros(1, device=out_map.device)
l1_loss = torch.zeros(1, device=out_map.device)
if torch.any(seg_mask):
bce_loss = F.binary_cross_entropy_with_logits(
out_map,
seg_target,
reduction="none",
)[seg_mask]
neg_target = 1 - seg_target[seg_mask]
positive_count = seg_target[seg_mask].sum()
negative_count = torch.minimum(neg_target.sum(), 3.0 * positive_count)
negative_loss = bce_loss * neg_target
negative_loss = negative_loss.sort().values[-int(negative_count.item()) :]
sum_losses = torch.sum(bce_loss * seg_target[seg_mask]) + torch.sum(negative_loss)
balanced_bce_loss = sum_losses / (positive_count + negative_count + 1e-6)
# Compute dice loss for approxbin_map
bin_map = 1 / (1 + torch.exp(-50.0 * (prob_map[seg_mask] - thresh_map[seg_mask])))
bce_min = bce_loss.min()
weights = (bce_loss - bce_min) / (bce_loss.max() - bce_min) + 1.0
inter = torch.sum(bin_map * seg_target[seg_mask] * weights)
union = torch.sum(bin_map) + torch.sum(seg_target[seg_mask]) + 1e-8
dice_loss = 1 - 2.0 * inter / union
# Compute l1 loss for thresh_map
l1_scale = 10.0
if torch.any(thresh_mask):
l1_loss = torch.mean(torch.abs(thresh_map[thresh_mask] - thresh_target[thresh_mask]))
return l1_scale * l1_loss + bce_scale * balanced_bce_loss + dice_loss
def _dbnet(
arch: str,
pretrained: bool,
backbone_fn: Callable[[bool], nn.Module],
fpn_layers: List[str],
backbone_submodule: Optional[str] = None,
pretrained_backbone: bool = True,
**kwargs: Any,
) -> DBNet:
# Starting with Imagenet pretrained params introduces some NaNs in layer3 & layer4 of resnet50
pretrained_backbone = pretrained_backbone and not arch.split("_")[1].startswith("resnet")
pretrained_backbone = pretrained_backbone and not pretrained
# Feature extractor
backbone = backbone_fn(pretrained_backbone)
if isinstance(backbone_submodule, str):
backbone = getattr(backbone, backbone_submodule)
feat_extractor = IntermediateLayerGetter(
backbone,
{layer_name: str(idx) for idx, layer_name in enumerate(fpn_layers)},
)
if not kwargs.get("class_names", None):
kwargs["class_names"] = default_cfgs[arch].get("class_names", [CLASS_NAME])
else:
kwargs["class_names"] = sorted(kwargs["class_names"])
# Build the model
model = DBNet(feat_extractor, cfg=default_cfgs[arch], **kwargs)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, default_cfgs[arch]["url"])
return model
def db_resnet34(pretrained: bool = False, **kwargs: Any) -> DBNet:
"""DBNet as described in `"Real-time Scene Text Detection with Differentiable Binarization"
<https://arxiv.org/pdf/1911.08947.pdf>`_, using a ResNet-34 backbone.
>>> import torch
>>> from doctr.models import db_resnet34
>>> model = db_resnet34(pretrained=True)
>>> input_tensor = torch.rand((1, 3, 1024, 1024), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _dbnet(
"db_resnet34",
pretrained,
resnet34,
["layer1", "layer2", "layer3", "layer4"],
None,
**kwargs,
)
def db_resnet50(pretrained: bool = False, **kwargs: Any) -> DBNet:
"""DBNet as described in `"Real-time Scene Text Detection with Differentiable Binarization"
<https://arxiv.org/pdf/1911.08947.pdf>`_, using a ResNet-50 backbone.
>>> import torch
>>> from doctr.models import db_resnet50
>>> model = db_resnet50(pretrained=True)
>>> input_tensor = torch.rand((1, 3, 1024, 1024), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _dbnet(
"db_resnet50",
pretrained,
resnet50,
["layer1", "layer2", "layer3", "layer4"],
None,
**kwargs,
)
def db_mobilenet_v3_large(pretrained: bool = False, **kwargs: Any) -> DBNet:
"""DBNet as described in `"Real-time Scene Text Detection with Differentiable Binarization"
<https://arxiv.org/pdf/1911.08947.pdf>`_, using a MobileNet V3 Large backbone.
>>> import torch
>>> from doctr.models import db_mobilenet_v3_large
>>> model = db_mobilenet_v3_large(pretrained=True)
>>> input_tensor = torch.rand((1, 3, 1024, 1024), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _dbnet(
"db_mobilenet_v3_large",
pretrained,
mobilenet_v3_large,
["3", "6", "12", "16"],
"features",
**kwargs,
)
def db_resnet50_rotation(pretrained: bool = False, **kwargs: Any) -> DBNet:
"""DBNet as described in `"Real-time Scene Text Detection with Differentiable Binarization"
<https://arxiv.org/pdf/1911.08947.pdf>`_, using a ResNet-50 backbone.
This model is trained with rotated documents
>>> import torch
>>> from doctr.models import db_resnet50_rotation
>>> model = db_resnet50_rotation(pretrained=True)
>>> input_tensor = torch.rand((1, 3, 1024, 1024), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _dbnet(
"db_resnet50_rotation",
pretrained,
resnet50,
["layer1", "layer2", "layer3", "layer4"],
None,
**kwargs,
)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
# Credits: post-processing adapted from https://github.com/xuannianz/DifferentiableBinarization
from typing import Dict, List, Tuple, Union
import cv2
import numpy as np
import pyclipper
from shapely.geometry import Polygon
from ..core import DetectionPostProcessor
__all__ = ["DBPostProcessor"]
class DBPostProcessor(DetectionPostProcessor):
"""Implements a post processor for DBNet adapted from the implementation of `xuannianz
<https://github.com/xuannianz/DifferentiableBinarization>`_.
Args:
unclip ratio: ratio used to unshrink polygons
min_size_box: minimal length (pix) to keep a box
max_candidates: maximum boxes to consider in a single page
box_thresh: minimal objectness score to consider a box
bin_thresh: threshold used to binzarized p_map at inference time
"""
def __init__(
self,
box_thresh: float = 0.1,
bin_thresh: float = 0.3,
assume_straight_pages: bool = True,
) -> None:
super().__init__(box_thresh, bin_thresh, assume_straight_pages)
self.unclip_ratio = 1.5 if assume_straight_pages else 2.2
def polygon_to_box(
self,
points: np.ndarray,
) -> np.ndarray:
"""Expand a polygon (points) by a factor unclip_ratio, and returns a polygon
Args:
points: The first parameter.
Returns:
a box in absolute coordinates (xmin, ymin, xmax, ymax) or (4, 2) array (quadrangle)
"""
if not self.assume_straight_pages:
# Compute the rectangle polygon enclosing the raw polygon
rect = cv2.minAreaRect(points)
points = cv2.boxPoints(rect)
# Add 1 pixel to correct cv2 approx
area = (rect[1][0] + 1) * (1 + rect[1][1])
length = 2 * (rect[1][0] + rect[1][1]) + 2
else:
poly = Polygon(points)
area = poly.area
length = poly.length
distance = area * self.unclip_ratio / length # compute distance to expand polygon
offset = pyclipper.PyclipperOffset()
offset.AddPath(points, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
_points = offset.Execute(distance)
# Take biggest stack of points
idx = 0
if len(_points) > 1:
max_size = 0
for _idx, p in enumerate(_points):
if len(p) > max_size:
idx = _idx
max_size = len(p)
# We ensure that _points can be correctly casted to a ndarray
_points = [_points[idx]]
expanded_points: np.ndarray = np.asarray(_points) # expand polygon
if len(expanded_points) < 1:
return None # type: ignore[return-value]
return (
cv2.boundingRect(expanded_points)
if self.assume_straight_pages
else np.roll(cv2.boxPoints(cv2.minAreaRect(expanded_points)), -1, axis=0)
)
def bitmap_to_boxes(
self,
pred: np.ndarray,
bitmap: np.ndarray,
) -> np.ndarray:
"""Compute boxes from a bitmap/pred_map
Args:
pred: Pred map from differentiable binarization output
bitmap: Bitmap map computed from pred (binarized)
angle_tol: Comparison tolerance of the angle with the median angle across the page
ratio_tol: Under this limit aspect ratio, we cannot resolve the direction of the crop
Returns:
np tensor boxes for the bitmap, each box is a 5-element list
containing x, y, w, h, score for the box
"""
height, width = bitmap.shape[:2]
min_size_box = 1 + int(height / 512)
boxes: List[Union[np.ndarray, List[float]]] = []
# get contours from connected components on the bitmap
contours, _ = cv2.findContours(bitmap.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
# Check whether smallest enclosing bounding box is not too small
if np.any(contour[:, 0].max(axis=0) - contour[:, 0].min(axis=0) < min_size_box):
continue
# Compute objectness
if self.assume_straight_pages:
x, y, w, h = cv2.boundingRect(contour)
points: np.ndarray = np.array([[x, y], [x, y + h], [x + w, y + h], [x + w, y]])
score = self.box_score(pred, points, assume_straight_pages=True)
else:
score = self.box_score(pred, contour, assume_straight_pages=False)
if score < self.box_thresh: # remove polygons with a weak objectness
continue
if self.assume_straight_pages:
_box = self.polygon_to_box(points)
else:
_box = self.polygon_to_box(np.squeeze(contour))
# Remove too small boxes
if self.assume_straight_pages:
if _box is None or _box[2] < min_size_box or _box[3] < min_size_box:
continue
elif np.linalg.norm(_box[2, :] - _box[0, :], axis=-1) < min_size_box:
continue
if self.assume_straight_pages:
x, y, w, h = _box
# compute relative polygon to get rid of img shape
xmin, ymin, xmax, ymax = x / width, y / height, (x + w) / width, (y + h) / height
boxes.append([xmin, ymin, xmax, ymax, score])
else:
# compute relative box to get rid of img shape, in that case _box is a 4pt polygon
if not isinstance(_box, np.ndarray) and _box.shape == (4, 2):
raise AssertionError("When assume straight pages is false a box is a (4, 2) array (polygon)")
_box[:, 0] /= width
_box[:, 1] /= height
boxes.append(_box)
if not self.assume_straight_pages:
return np.clip(np.asarray(boxes), 0, 1) if len(boxes) > 0 else np.zeros((0, 4, 2), dtype=pred.dtype)
else:
return np.clip(np.asarray(boxes), 0, 1) if len(boxes) > 0 else np.zeros((0, 5), dtype=pred.dtype)
class _DBNet:
"""DBNet as described in `"Real-time Scene Text Detection with Differentiable Binarization"
<https://arxiv.org/pdf/1911.08947.pdf>`_.
Args:
feature extractor: the backbone serving as feature extractor
fpn_channels: number of channels each extracted feature maps is mapped to
"""
shrink_ratio = 0.4
thresh_min = 0.3
thresh_max = 0.7
min_size_box = 3
assume_straight_pages: bool = True
@staticmethod
def compute_distance(
xs: np.ndarray,
ys: np.ndarray,
a: np.ndarray,
b: np.ndarray,
eps: float = 1e-7,
) -> float:
"""Compute the distance for each point of the map (xs, ys) to the (a, b) segment
Args:
xs : map of x coordinates (height, width)
ys : map of y coordinates (height, width)
a: first point defining the [ab] segment
b: second point defining the [ab] segment
Returns:
The computed distance
"""
square_dist_1 = np.square(xs - a[0]) + np.square(ys - a[1])
square_dist_2 = np.square(xs - b[0]) + np.square(ys - b[1])
square_dist = np.square(a[0] - b[0]) + np.square(a[1] - b[1])
cosin = (square_dist - square_dist_1 - square_dist_2) / (2 * np.sqrt(square_dist_1 * square_dist_2) + eps)
square_sin = 1 - np.square(cosin)
square_sin = np.nan_to_num(square_sin)
result = np.sqrt(square_dist_1 * square_dist_2 * square_sin / square_dist)
result[cosin < 0] = np.sqrt(np.fmin(square_dist_1, square_dist_2))[cosin < 0]
return result
def draw_thresh_map(
self,
polygon: np.ndarray,
canvas: np.ndarray,
mask: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Draw a polygon treshold map on a canvas, as described in the DB paper
Args:
polygon : array of coord., to draw the boundary of the polygon
canvas : threshold map to fill with polygons
mask : mask for training on threshold polygons
"""
if polygon.ndim != 2 or polygon.shape[1] != 2:
raise AttributeError("polygon should be a 2 dimensional array of coords")
# Augment polygon by shrink_ratio
polygon_shape = Polygon(polygon)
distance = polygon_shape.area * (1 - np.power(self.shrink_ratio, 2)) / polygon_shape.length
subject = [tuple(coor) for coor in polygon] # Get coord as list of tuples
padding = pyclipper.PyclipperOffset()
padding.AddPath(subject, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
padded_polygon: np.ndarray = np.array(padding.Execute(distance)[0])
# Fill the mask with 1 on the new padded polygon
cv2.fillPoly(mask, [padded_polygon.astype(np.int32)], 1.0)
# Get min/max to recover polygon after distance computation
xmin = padded_polygon[:, 0].min()
xmax = padded_polygon[:, 0].max()
ymin = padded_polygon[:, 1].min()
ymax = padded_polygon[:, 1].max()
width = xmax - xmin + 1
height = ymax - ymin + 1
# Get absolute polygon for distance computation
polygon[:, 0] = polygon[:, 0] - xmin
polygon[:, 1] = polygon[:, 1] - ymin
# Get absolute padded polygon
xs: np.ndarray = np.broadcast_to(np.linspace(0, width - 1, num=width).reshape(1, width), (height, width))
ys: np.ndarray = np.broadcast_to(np.linspace(0, height - 1, num=height).reshape(height, 1), (height, width))
# Compute distance map to fill the padded polygon
distance_map = np.zeros((polygon.shape[0], height, width), dtype=polygon.dtype)
for i in range(polygon.shape[0]):
j = (i + 1) % polygon.shape[0]
absolute_distance = self.compute_distance(xs, ys, polygon[i], polygon[j])
distance_map[i] = np.clip(absolute_distance / distance, 0, 1)
distance_map = np.min(distance_map, axis=0)
# Clip the padded polygon inside the canvas
xmin_valid = min(max(0, xmin), canvas.shape[1] - 1)
xmax_valid = min(max(0, xmax), canvas.shape[1] - 1)
ymin_valid = min(max(0, ymin), canvas.shape[0] - 1)
ymax_valid = min(max(0, ymax), canvas.shape[0] - 1)
# Fill the canvas with the distances computed inside the valid padded polygon
canvas[ymin_valid : ymax_valid + 1, xmin_valid : xmax_valid + 1] = np.fmax(
1 - distance_map[ymin_valid - ymin : ymax_valid - ymin + 1, xmin_valid - xmin : xmax_valid - xmin + 1],
canvas[ymin_valid : ymax_valid + 1, xmin_valid : xmax_valid + 1],
)
return polygon, canvas, mask
def build_target(
self,
target: List[Dict[str, np.ndarray]],
output_shape: Tuple[int, int, int, int],
channels_last: bool = True,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
if any(t.dtype != np.float32 for tgt in target for t in tgt.values()):
raise AssertionError("the expected dtype of target 'boxes' entry is 'np.float32'.")
if any(np.any((t[:, :4] > 1) | (t[:, :4] < 0)) for tgt in target for t in tgt.values()):
raise ValueError("the 'boxes' entry of the target is expected to take values between 0 & 1.")
input_dtype = next(iter(target[0].values())).dtype if len(target) > 0 else np.float32
if channels_last:
h, w = output_shape[1:-1]
target_shape = (output_shape[0], output_shape[-1], h, w) # (Batch_size, num_classes, h, w)
else:
h, w = output_shape[-2:]
target_shape = output_shape # (Batch_size, num_classes, h, w)
seg_target: np.ndarray = np.zeros(target_shape, dtype=np.uint8)
seg_mask: np.ndarray = np.ones(target_shape, dtype=bool)
thresh_target: np.ndarray = np.zeros(target_shape, dtype=np.float32)
thresh_mask: np.ndarray = np.ones(target_shape, dtype=np.uint8)
for idx, tgt in enumerate(target):
for class_idx, _tgt in enumerate(tgt.values()):
# Draw each polygon on gt
if _tgt.shape[0] == 0:
# Empty image, full masked
# seg_mask[idx, :, :, class_idx] = False
seg_mask[idx, class_idx] = False
# Absolute bounding boxes
abs_boxes = _tgt.copy()
if abs_boxes.ndim == 3:
abs_boxes[:, :, 0] *= w
abs_boxes[:, :, 1] *= h
polys = abs_boxes
boxes_size = np.linalg.norm(abs_boxes[:, 2, :] - abs_boxes[:, 0, :], axis=-1)
abs_boxes = np.concatenate((abs_boxes.min(1), abs_boxes.max(1)), -1).round().astype(np.int32)
else:
abs_boxes[:, [0, 2]] *= w
abs_boxes[:, [1, 3]] *= h
abs_boxes = abs_boxes.round().astype(np.int32)
polys = np.stack(
[
abs_boxes[:, [0, 1]],
abs_boxes[:, [0, 3]],
abs_boxes[:, [2, 3]],
abs_boxes[:, [2, 1]],
],
axis=1,
)
boxes_size = np.minimum(abs_boxes[:, 2] - abs_boxes[:, 0], abs_boxes[:, 3] - abs_boxes[:, 1])
for box, box_size, poly in zip(abs_boxes, boxes_size, polys):
# Mask boxes that are too small
if box_size < self.min_size_box:
# seg_mask[idx, box[1] : box[3] + 1, box[0] : box[2] + 1, class_idx] = False
seg_mask[idx, class_idx, box[1] : box[3] + 1, box[0] : box[2] + 1] = False
continue
# Negative shrink for gt, as described in paper
polygon = Polygon(poly)
distance = polygon.area * (1 - np.power(self.shrink_ratio, 2)) / polygon.length
subject = [tuple(coor) for coor in poly]
padding = pyclipper.PyclipperOffset()
padding.AddPath(subject, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
shrinked = padding.Execute(-distance)
# Draw polygon on gt if it is valid
if len(shrinked) == 0:
# seg_mask[idx, box[1] : box[3] + 1, box[0] : box[2] + 1, class_idx] = False
seg_mask[idx, class_idx, box[1] : box[3] + 1, box[0] : box[2] + 1] = False
continue
shrinked = np.array(shrinked[0]).reshape(-1, 2)
if shrinked.shape[0] <= 2 or not Polygon(shrinked).is_valid:
# seg_mask[idx, box[1] : box[3] + 1, box[0] : box[2] + 1, class_idx] = False
seg_mask[idx, class_idx, box[1] : box[3] + 1, box[0] : box[2] + 1] = False
continue
cv2.fillPoly(seg_target[idx, class_idx], [shrinked.astype(np.int32)], 1)
# Draw on both thresh map and thresh mask
poly, thresh_target[idx, class_idx], thresh_mask[idx, class_idx] = self.draw_thresh_map(
poly, thresh_target[idx, class_idx], thresh_mask[idx, class_idx]
)
if channels_last:
seg_target = seg_target.transpose((0, 2, 3, 1))
seg_mask = seg_mask.transpose((0, 2, 3, 1))
thresh_target = thresh_target.transpose((0, 2, 3, 1))
thresh_mask = thresh_mask.transpose((0, 2, 3, 1))
thresh_target = thresh_target.astype(input_dtype) * (self.thresh_max - self.thresh_min) + self.thresh_min
seg_target = seg_target.astype(input_dtype)
seg_mask = seg_mask.astype(bool)
thresh_target = thresh_target.astype(input_dtype)
thresh_mask = thresh_mask.astype(bool)
return seg_target, seg_mask, thresh_target, thresh_mask
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
# Credits: post-processing adapted from https://github.com/xuannianz/DifferentiableBinarization
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Model, Sequential, layers
from doctr.file_utils import CLASS_NAME
from doctr.models.classification import resnet18, resnet34, resnet50
from doctr.models.utils import IntermediateLayerGetter, conv_sequence, load_pretrained_params
from doctr.utils.repr import NestedObject
from .base import LinkNetPostProcessor, _LinkNet
__all__ = ["LinkNet", "linknet_resnet18", "linknet_resnet34", "linknet_resnet50", "linknet_resnet18_rotation"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"linknet_resnet18": {
"mean": (0.798, 0.785, 0.772),
"std": (0.264, 0.2749, 0.287),
"input_shape": (1024, 1024, 3),
"url": None,
},
"linknet_resnet18_rotation": {
"mean": (0.798, 0.785, 0.772),
"std": (0.264, 0.2749, 0.287),
"input_shape": (1024, 1024, 3),
"url": "https://doctr-static.mindee.com/models?id=v0.5.0/linknet_resnet18-a48e6ed3.zip&src=0",
},
"linknet_resnet34": {
"mean": (0.798, 0.785, 0.772),
"std": (0.264, 0.2749, 0.287),
"input_shape": (1024, 1024, 3),
"url": None,
},
"linknet_resnet50": {
"mean": (0.798, 0.785, 0.772),
"std": (0.264, 0.2749, 0.287),
"input_shape": (1024, 1024, 3),
"url": None,
},
}
def decoder_block(in_chan: int, out_chan: int, stride: int, **kwargs: Any) -> Sequential:
"""Creates a LinkNet decoder block"""
return Sequential(
[
*conv_sequence(in_chan // 4, "relu", True, kernel_size=1, **kwargs),
layers.Conv2DTranspose(
filters=in_chan // 4,
kernel_size=3,
strides=stride,
padding="same",
use_bias=False,
kernel_initializer="he_normal",
),
layers.BatchNormalization(),
layers.Activation("relu"),
*conv_sequence(out_chan, "relu", True, kernel_size=1),
]
)
class LinkNetFPN(Model, NestedObject):
"""LinkNet Decoder module"""
def __init__(
self,
out_chans: int,
in_shapes: List[Tuple[int, ...]],
) -> None:
super().__init__()
self.out_chans = out_chans
strides = [2] * (len(in_shapes) - 1) + [1]
i_chans = [s[-1] for s in in_shapes[::-1]]
o_chans = i_chans[1:] + [out_chans]
self.decoders = [
decoder_block(in_chan, out_chan, s, input_shape=in_shape)
for in_chan, out_chan, s, in_shape in zip(i_chans, o_chans, strides, in_shapes[::-1])
]
def call(self, x: List[tf.Tensor]) -> tf.Tensor:
out = 0
for decoder, fmap in zip(self.decoders, x[::-1]):
out = decoder(out + fmap)
return out
def extra_repr(self) -> str:
return f"out_chans={self.out_chans}"
class LinkNet(_LinkNet, keras.Model):
"""LinkNet as described in `"LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation"
<https://arxiv.org/pdf/1707.03718.pdf>`_.
Args:
feature extractor: the backbone serving as feature extractor
fpn_channels: number of channels each extracted feature maps is mapped to
assume_straight_pages: if True, fit straight bounding boxes only
exportable: onnx exportable returns only logits
cfg: the configuration dict of the model
class_names: list of class names
"""
_children_names: List[str] = ["feat_extractor", "fpn", "classifier", "postprocessor"]
def __init__(
self,
feat_extractor: IntermediateLayerGetter,
fpn_channels: int = 64,
bin_thresh: float = 0.1,
assume_straight_pages: bool = True,
exportable: bool = False,
cfg: Optional[Dict[str, Any]] = None,
class_names: List[str] = [CLASS_NAME],
) -> None:
super().__init__(cfg=cfg)
self.class_names = class_names
num_classes: int = len(self.class_names)
self.exportable = exportable
self.assume_straight_pages = assume_straight_pages
self.feat_extractor = feat_extractor
self.fpn = LinkNetFPN(fpn_channels, [_shape[1:] for _shape in self.feat_extractor.output_shape])
self.fpn.build(self.feat_extractor.output_shape)
self.classifier = Sequential(
[
layers.Conv2DTranspose(
filters=32,
kernel_size=3,
strides=2,
padding="same",
use_bias=False,
kernel_initializer="he_normal",
input_shape=self.fpn.decoders[-1].output_shape[1:],
),
layers.BatchNormalization(),
layers.Activation("relu"),
*conv_sequence(32, "relu", True, kernel_size=3, strides=1),
layers.Conv2DTranspose(
filters=num_classes,
kernel_size=2,
strides=2,
padding="same",
use_bias=True,
kernel_initializer="he_normal",
),
]
)
self.postprocessor = LinkNetPostProcessor(assume_straight_pages=assume_straight_pages, bin_thresh=bin_thresh)
def compute_loss(
self,
out_map: tf.Tensor,
target: List[Dict[str, np.ndarray]],
gamma: float = 2.0,
alpha: float = 0.5,
eps: float = 1e-8,
) -> tf.Tensor:
"""Compute linknet loss, BCE with boosted box edges or focal loss. Focal loss implementation based on
<https://github.com/tensorflow/addons/>`_.
Args:
out_map: output feature map of the model of shape N x H x W x 1
target: list of dictionary where each dict has a `boxes` and a `flags` entry
gamma: modulating factor in the focal loss formula
alpha: balancing factor in the focal loss formula
eps: epsilon factor in dice loss
Returns:
A loss tensor
"""
seg_target, seg_mask = self.build_target(target, out_map.shape[1:], True)
seg_target = tf.convert_to_tensor(seg_target, dtype=out_map.dtype)
seg_mask = tf.convert_to_tensor(seg_mask, dtype=tf.bool)
seg_mask = tf.cast(seg_mask, tf.float32)
bce_loss = tf.keras.losses.binary_crossentropy(seg_target[..., None], out_map[..., None], from_logits=True)
proba_map = tf.sigmoid(out_map)
# Focal loss
if gamma < 0:
raise ValueError("Value of gamma should be greater than or equal to zero.")
# Convert logits to prob, compute gamma factor
p_t = (seg_target * proba_map) + ((1 - seg_target) * (1 - proba_map))
alpha_t = seg_target * alpha + (1 - seg_target) * (1 - alpha)
# Unreduced loss
focal_loss = alpha_t * (1 - p_t) ** gamma * bce_loss
# Class reduced
focal_loss = tf.reduce_sum(seg_mask * focal_loss, (0, 1, 2, 3)) / tf.reduce_sum(seg_mask, (0, 1, 2, 3))
# Dice loss
inter = tf.math.reduce_sum(seg_mask * proba_map * seg_target, (0, 1, 2, 3))
cardinality = tf.math.reduce_sum((proba_map + seg_target), (0, 1, 2, 3))
dice_loss = 1 - 2 * (inter + eps) / (cardinality + eps)
return focal_loss + dice_loss
def call(
self,
x: tf.Tensor,
target: Optional[List[Dict[str, np.ndarray]]] = None,
return_model_output: bool = False,
return_preds: bool = False,
**kwargs: Any,
) -> Dict[str, Any]:
feat_maps = self.feat_extractor(x, **kwargs)
logits = self.fpn(feat_maps, **kwargs)
logits = self.classifier(logits, **kwargs)
out: Dict[str, tf.Tensor] = {}
if self.exportable:
out["logits"] = logits
return out
if return_model_output or target is None or return_preds:
prob_map = tf.math.sigmoid(logits)
if return_model_output:
out["out_map"] = prob_map
if target is None or return_preds:
# Post-process boxes
out["preds"] = [dict(zip(self.class_names, preds)) for preds in self.postprocessor(prob_map.numpy())]
if target is not None:
loss = self.compute_loss(logits, target)
out["loss"] = loss
return out
def _linknet(
arch: str,
pretrained: bool,
backbone_fn,
fpn_layers: List[str],
pretrained_backbone: bool = True,
input_shape: Optional[Tuple[int, int, int]] = None,
**kwargs: Any,
) -> LinkNet:
pretrained_backbone = pretrained_backbone and not pretrained
# Patch the config
_cfg = deepcopy(default_cfgs[arch])
_cfg["input_shape"] = input_shape or default_cfgs[arch]["input_shape"]
if not kwargs.get("class_names", None):
kwargs["class_names"] = _cfg.get("class_names", [CLASS_NAME])
else:
kwargs["class_names"] = sorted(kwargs["class_names"])
# Feature extractor
feat_extractor = IntermediateLayerGetter(
backbone_fn(
pretrained=pretrained_backbone,
include_top=False,
input_shape=_cfg["input_shape"],
),
fpn_layers,
)
# Build the model
model = LinkNet(feat_extractor, cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, _cfg["url"])
return model
def linknet_resnet18(pretrained: bool = False, **kwargs: Any) -> LinkNet:
"""LinkNet as described in `"LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation"
<https://arxiv.org/pdf/1707.03718.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import linknet_resnet18
>>> model = linknet_resnet18(pretrained=True)
>>> input_tensor = tf.random.uniform(shape=[1, 1024, 1024, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _linknet(
"linknet_resnet18",
pretrained,
resnet18,
["resnet_block_1", "resnet_block_3", "resnet_block_5", "resnet_block_7"],
**kwargs,
)
def linknet_resnet18_rotation(pretrained: bool = False, **kwargs: Any) -> LinkNet:
"""LinkNet as described in `"LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation"
<https://arxiv.org/pdf/1707.03718.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import linknet_resnet18_rotation
>>> model = linknet_resnet18_rotation(pretrained=True)
>>> input_tensor = tf.random.uniform(shape=[1, 1024, 1024, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _linknet(
"linknet_resnet18_rotation",
pretrained,
resnet18,
["resnet_block_1", "resnet_block_3", "resnet_block_5", "resnet_block_7"],
**kwargs,
)
def linknet_resnet34(pretrained: bool = False, **kwargs: Any) -> LinkNet:
"""LinkNet as described in `"LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation"
<https://arxiv.org/pdf/1707.03718.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import linknet_resnet34
>>> model = linknet_resnet34(pretrained=True)
>>> input_tensor = tf.random.uniform(shape=[1, 1024, 1024, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _linknet(
"linknet_resnet34",
pretrained,
resnet34,
["resnet_block_2", "resnet_block_6", "resnet_block_12", "resnet_block_15"],
**kwargs,
)
def linknet_resnet50(pretrained: bool = False, **kwargs: Any) -> LinkNet:
"""LinkNet as described in `"LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation"
<https://arxiv.org/pdf/1707.03718.pdf>`_.
>>> import tensorflow as tf
>>> from doctr.models import linknet_resnet50
>>> model = linknet_resnet50(pretrained=True)
>>> input_tensor = tf.random.uniform(shape=[1, 1024, 1024, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _linknet(
"linknet_resnet50",
pretrained,
resnet50,
["conv2_block3_out", "conv3_block4_out", "conv4_block6_out", "conv5_block3_out"],
**kwargs,
)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.models._utils import IntermediateLayerGetter
from doctr.file_utils import CLASS_NAME
from doctr.models.classification import resnet18, resnet34, resnet50
from ...utils import load_pretrained_params
from .base import LinkNetPostProcessor, _LinkNet
__all__ = ["LinkNet", "linknet_resnet18", "linknet_resnet34", "linknet_resnet50"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"linknet_resnet18": {
"input_shape": (3, 1024, 1024),
"mean": (0.5, 0.5, 0.5),
"std": (1.0, 1.0, 1.0),
"url": None,
},
"linknet_resnet34": {
"input_shape": (3, 1024, 1024),
"mean": (0.5, 0.5, 0.5),
"std": (1.0, 1.0, 1.0),
"url": None,
},
"linknet_resnet50": {
"input_shape": (3, 1024, 1024),
"mean": (0.5, 0.5, 0.5),
"std": (1.0, 1.0, 1.0),
"url": None,
},
}
class LinkNetFPN(nn.Module):
def __init__(self, layer_shapes: List[Tuple[int, int, int]]) -> None:
super().__init__()
strides = [
1 if (in_shape[-1] == out_shape[-1]) else 2
for in_shape, out_shape in zip(layer_shapes[:-1], layer_shapes[1:])
]
chans = [shape[0] for shape in layer_shapes]
_decoder_layers = [
self.decoder_block(ochan, ichan, stride) for ichan, ochan, stride in zip(chans[:-1], chans[1:], strides)
]
self.decoders = nn.ModuleList(_decoder_layers)
@staticmethod
def decoder_block(in_chan: int, out_chan: int, stride: int) -> nn.Sequential:
"""Creates a LinkNet decoder block"""
mid_chan = in_chan // 4
return nn.Sequential(
nn.Conv2d(in_chan, mid_chan, kernel_size=1, bias=False),
nn.BatchNorm2d(mid_chan),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(mid_chan, mid_chan, 3, padding=1, output_padding=stride - 1, stride=stride, bias=False),
nn.BatchNorm2d(mid_chan),
nn.ReLU(inplace=True),
nn.Conv2d(mid_chan, out_chan, kernel_size=1, bias=False),
nn.BatchNorm2d(out_chan),
nn.ReLU(inplace=True),
)
def forward(self, feats: List[torch.Tensor]) -> torch.Tensor:
out = feats[-1]
for decoder, fmap in zip(self.decoders[::-1], feats[:-1][::-1]):
out = decoder(out) + fmap
out = self.decoders[0](out)
return out
class LinkNet(nn.Module, _LinkNet):
"""LinkNet as described in `"LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation"
<https://arxiv.org/pdf/1707.03718.pdf>`_.
Args:
feature extractor: the backbone serving as feature extractor
head_chans: number of channels in the head layers
assume_straight_pages: if True, fit straight bounding boxes only
exportable: onnx exportable returns only logits
cfg: the configuration dict of the model
class_names: list of class names
"""
def __init__(
self,
feat_extractor: IntermediateLayerGetter,
bin_thresh: float = 0.1,
head_chans: int = 32,
assume_straight_pages: bool = True,
exportable: bool = False,
cfg: Optional[Dict[str, Any]] = None,
class_names: List[str] = [CLASS_NAME],
) -> None:
super().__init__()
self.class_names = class_names
num_classes: int = len(self.class_names)
self.cfg = cfg
self.exportable = exportable
self.assume_straight_pages = assume_straight_pages
self.feat_extractor = feat_extractor
# Identify the number of channels for the FPN initialization
self.feat_extractor.eval()
with torch.no_grad():
in_shape = (3, 512, 512)
out = self.feat_extractor(torch.zeros((1, *in_shape)))
# Get the shapes of the extracted feature maps
_shapes = [v.shape[1:] for _, v in out.items()]
# Prepend the expected shapes of the first encoder
_shapes = [(_shapes[0][0], in_shape[1] // 4, in_shape[2] // 4)] + _shapes
self.feat_extractor.train()
self.fpn = LinkNetFPN(_shapes)
self.classifier = nn.Sequential(
nn.ConvTranspose2d(
_shapes[0][0], head_chans, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False
),
nn.BatchNorm2d(head_chans),
nn.ReLU(inplace=True),
nn.Conv2d(head_chans, head_chans, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(head_chans),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(head_chans, num_classes, kernel_size=2, stride=2),
)
self.postprocessor = LinkNetPostProcessor(
assume_straight_pages=self.assume_straight_pages, bin_thresh=bin_thresh
)
for n, m in self.named_modules():
# Don't override the initialization of the backbone
if n.startswith("feat_extractor."):
continue
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
nn.init.kaiming_normal_(m.weight.data, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
def forward(
self,
x: torch.Tensor,
target: Optional[List[np.ndarray]] = None,
return_model_output: bool = False,
return_preds: bool = False,
**kwargs: Any,
) -> Dict[str, Any]:
feats = self.feat_extractor(x)
logits = self.fpn([feats[str(idx)] for idx in range(len(feats))])
logits = self.classifier(logits)
out: Dict[str, Any] = {}
if self.exportable:
out["logits"] = logits
return out
if return_model_output or target is None or return_preds:
prob_map = torch.sigmoid(logits)
if return_model_output:
out["out_map"] = prob_map
if target is None or return_preds:
# Post-process boxes
out["preds"] = [
dict(zip(self.class_names, preds))
for preds in self.postprocessor(prob_map.detach().cpu().permute((0, 2, 3, 1)).numpy())
]
if target is not None:
loss = self.compute_loss(logits, target)
out["loss"] = loss
return out
def compute_loss(
self,
out_map: torch.Tensor,
target: List[np.ndarray],
gamma: float = 2.0,
alpha: float = 0.5,
eps: float = 1e-8,
) -> torch.Tensor:
"""Compute linknet loss, BCE with boosted box edges or focal loss. Focal loss implementation based on
<https://github.com/tensorflow/addons/>`_.
Args:
out_map: output feature map of the model of shape (N, num_classes, H, W)
target: list of dictionary where each dict has a `boxes` and a `flags` entry
gamma: modulating factor in the focal loss formula
alpha: balancing factor in the focal loss formula
eps: epsilon factor in dice loss
Returns:
A loss tensor
"""
_target, _mask = self.build_target(target, out_map.shape[1:], False) # type: ignore[arg-type]
seg_target, seg_mask = torch.from_numpy(_target).to(dtype=out_map.dtype), torch.from_numpy(_mask)
seg_target, seg_mask = seg_target.to(out_map.device), seg_mask.to(out_map.device)
seg_mask = seg_mask.to(dtype=torch.float32)
bce_loss = F.binary_cross_entropy_with_logits(out_map, seg_target, reduction="none")
proba_map = torch.sigmoid(out_map)
# Focal loss
if gamma < 0:
raise ValueError("Value of gamma should be greater than or equal to zero.")
p_t = proba_map * seg_target + (1 - proba_map) * (1 - seg_target)
alpha_t = alpha * seg_target + (1 - alpha) * (1 - seg_target)
# Unreduced version
focal_loss = alpha_t * (1 - p_t) ** gamma * bce_loss
# Class reduced
focal_loss = (seg_mask * focal_loss).sum((0, 1, 2, 3)) / seg_mask.sum((0, 1, 2, 3))
# Dice loss
inter = (seg_mask * proba_map * seg_target).sum((0, 1, 2, 3))
cardinality = (seg_mask * (proba_map + seg_target)).sum((0, 1, 2, 3))
dice_loss = 1 - 2 * (inter + eps) / (cardinality + eps)
# Return the full loss (equal sum of focal loss and dice loss)
return focal_loss + dice_loss
def _linknet(
arch: str,
pretrained: bool,
backbone_fn: Callable[[bool], nn.Module],
fpn_layers: List[str],
pretrained_backbone: bool = True,
**kwargs: Any,
) -> LinkNet:
pretrained_backbone = pretrained_backbone and not pretrained
# Build the feature extractor
backbone = backbone_fn(pretrained_backbone)
feat_extractor = IntermediateLayerGetter(
backbone,
{layer_name: str(idx) for idx, layer_name in enumerate(fpn_layers)},
)
if not kwargs.get("class_names", None):
kwargs["class_names"] = default_cfgs[arch].get("class_names", [CLASS_NAME])
else:
kwargs["class_names"] = sorted(kwargs["class_names"])
# Build the model
model = LinkNet(feat_extractor, cfg=default_cfgs[arch], **kwargs)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, default_cfgs[arch]["url"])
return model
def linknet_resnet18(pretrained: bool = False, **kwargs: Any) -> LinkNet:
"""LinkNet as described in `"LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation"
<https://arxiv.org/pdf/1707.03718.pdf>`_.
>>> import torch
>>> from doctr.models import linknet_resnet18
>>> model = linknet_resnet18(pretrained=True).eval()
>>> input_tensor = torch.rand((1, 3, 1024, 1024), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _linknet("linknet_resnet18", pretrained, resnet18, ["layer1", "layer2", "layer3", "layer4"], **kwargs)
def linknet_resnet34(pretrained: bool = False, **kwargs: Any) -> LinkNet:
"""LinkNet as described in `"LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation"
<https://arxiv.org/pdf/1707.03718.pdf>`_.
>>> import torch
>>> from doctr.models import linknet_resnet34
>>> model = linknet_resnet34(pretrained=True).eval()
>>> input_tensor = torch.rand((1, 3, 1024, 1024), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _linknet("linknet_resnet34", pretrained, resnet34, ["layer1", "layer2", "layer3", "layer4"], **kwargs)
def linknet_resnet50(pretrained: bool = False, **kwargs: Any) -> LinkNet:
"""LinkNet as described in `"LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation"
<https://arxiv.org/pdf/1707.03718.pdf>`_.
>>> import torch
>>> from doctr.models import linknet_resnet50
>>> model = linknet_resnet50(pretrained=True).eval()
>>> input_tensor = torch.rand((1, 3, 1024, 1024), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _linknet("linknet_resnet50", pretrained, resnet50, ["layer1", "layer2", "layer3", "layer4"], **kwargs)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
# Credits: post-processing adapted from https://github.com/xuannianz/DifferentiableBinarization
from typing import Dict, List, Tuple, Union
import cv2
import numpy as np
import pyclipper
from shapely.geometry import Polygon
from doctr.models.core import BaseModel
from ..core import DetectionPostProcessor
__all__ = ["_LinkNet", "LinkNetPostProcessor"]
class LinkNetPostProcessor(DetectionPostProcessor):
"""Implements a post processor for LinkNet model.
Args:
bin_thresh: threshold used to binzarized p_map at inference time
box_thresh: minimal objectness score to consider a box
assume_straight_pages: whether the inputs were expected to have horizontal text elements
"""
def __init__(
self,
bin_thresh: float = 0.1,
box_thresh: float = 0.1,
assume_straight_pages: bool = True,
) -> None:
super().__init__(box_thresh, bin_thresh, assume_straight_pages)
self.unclip_ratio = 1.2
def polygon_to_box(
self,
points: np.ndarray,
) -> np.ndarray:
"""Expand a polygon (points) by a factor unclip_ratio, and returns a polygon
Args:
points: The first parameter.
Returns:
a box in absolute coordinates (xmin, ymin, xmax, ymax) or (4, 2) array (quadrangle)
"""
if not self.assume_straight_pages:
# Compute the rectangle polygon enclosing the raw polygon
rect = cv2.minAreaRect(points)
points = cv2.boxPoints(rect)
# Add 1 pixel to correct cv2 approx
area = (rect[1][0] + 1) * (1 + rect[1][1])
length = 2 * (rect[1][0] + rect[1][1]) + 2
else:
poly = Polygon(points)
area = poly.area
length = poly.length
distance = area * self.unclip_ratio / length # compute distance to expand polygon
offset = pyclipper.PyclipperOffset()
offset.AddPath(points, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
_points = offset.Execute(distance)
# Take biggest stack of points
idx = 0
if len(_points) > 1:
max_size = 0
for _idx, p in enumerate(_points):
if len(p) > max_size:
idx = _idx
max_size = len(p)
# We ensure that _points can be correctly casted to a ndarray
_points = [_points[idx]]
expanded_points: np.ndarray = np.asarray(_points) # expand polygon
if len(expanded_points) < 1:
return None # type: ignore[return-value]
return (
cv2.boundingRect(expanded_points)
if self.assume_straight_pages
else np.roll(cv2.boxPoints(cv2.minAreaRect(expanded_points)), -1, axis=0)
)
def bitmap_to_boxes(
self,
pred: np.ndarray,
bitmap: np.ndarray,
) -> np.ndarray:
"""Compute boxes from a bitmap/pred_map: find connected components then filter boxes
Args:
pred: Pred map from differentiable linknet output
bitmap: Bitmap map computed from pred (binarized)
angle_tol: Comparison tolerance of the angle with the median angle across the page
ratio_tol: Under this limit aspect ratio, we cannot resolve the direction of the crop
Returns:
np tensor boxes for the bitmap, each box is a 6-element list
containing x, y, w, h, alpha, score for the box
"""
height, width = bitmap.shape[:2]
boxes: List[Union[np.ndarray, List[float]]] = []
# get contours from connected components on the bitmap
contours, _ = cv2.findContours(bitmap.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
# Check whether smallest enclosing bounding box is not too small
if np.any(contour[:, 0].max(axis=0) - contour[:, 0].min(axis=0) < 2):
continue
# Compute objectness
if self.assume_straight_pages:
x, y, w, h = cv2.boundingRect(contour)
points: np.ndarray = np.array([[x, y], [x, y + h], [x + w, y + h], [x + w, y]])
score = self.box_score(pred, points, assume_straight_pages=True)
else:
score = self.box_score(pred, contour, assume_straight_pages=False)
if score < self.box_thresh: # remove polygons with a weak objectness
continue
if self.assume_straight_pages:
_box = self.polygon_to_box(points)
else:
_box = self.polygon_to_box(np.squeeze(contour))
if self.assume_straight_pages:
# compute relative polygon to get rid of img shape
x, y, w, h = _box
xmin, ymin, xmax, ymax = x / width, y / height, (x + w) / width, (y + h) / height
boxes.append([xmin, ymin, xmax, ymax, score])
else:
# compute relative box to get rid of img shape
_box[:, 0] /= width
_box[:, 1] /= height
boxes.append(_box)
if not self.assume_straight_pages:
return np.clip(np.asarray(boxes), 0, 1) if len(boxes) > 0 else np.zeros((0, 4, 2), dtype=pred.dtype)
else:
return np.clip(np.asarray(boxes), 0, 1) if len(boxes) > 0 else np.zeros((0, 5), dtype=pred.dtype)
class _LinkNet(BaseModel):
"""LinkNet as described in `"LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation"
<https://arxiv.org/pdf/1707.03718.pdf>`_.
Args:
out_chan: number of channels for the output
"""
min_size_box: int = 3
assume_straight_pages: bool = True
shrink_ratio = 0.5
def build_target(
self,
target: List[Dict[str, np.ndarray]],
output_shape: Tuple[int, int, int],
channels_last: bool = True,
) -> Tuple[np.ndarray, np.ndarray]:
"""Build the target, and it's mask to be used from loss computation.
Args:
target: target coming from dataset
output_shape: shape of the output of the model without batch_size
channels_last: whether channels are last or not
Returns:
the new formatted target and the mask
"""
if any(t.dtype != np.float32 for tgt in target for t in tgt.values()):
raise AssertionError("the expected dtype of target 'boxes' entry is 'np.float32'.")
if any(np.any((t[:, :4] > 1) | (t[:, :4] < 0)) for tgt in target for t in tgt.values()):
raise ValueError("the 'boxes' entry of the target is expected to take values between 0 & 1.")
h: int
w: int
if channels_last:
h, w, num_classes = output_shape
else:
num_classes, h, w = output_shape
target_shape = (len(target), num_classes, h, w)
seg_target: np.ndarray = np.zeros(target_shape, dtype=np.uint8)
seg_mask: np.ndarray = np.ones(target_shape, dtype=bool)
for idx, tgt in enumerate(target):
for class_idx, _tgt in enumerate(tgt.values()):
# Draw each polygon on gt
if _tgt.shape[0] == 0:
# Empty image, full masked
seg_mask[idx, class_idx] = False
# Absolute bounding boxes
abs_boxes = _tgt.copy()
if abs_boxes.ndim == 3:
abs_boxes[:, :, 0] *= w
abs_boxes[:, :, 1] *= h
polys = abs_boxes
boxes_size = np.linalg.norm(abs_boxes[:, 2, :] - abs_boxes[:, 0, :], axis=-1)
abs_boxes = np.concatenate((abs_boxes.min(1), abs_boxes.max(1)), -1).round().astype(np.int32)
else:
abs_boxes[:, [0, 2]] *= w
abs_boxes[:, [1, 3]] *= h
abs_boxes = abs_boxes.round().astype(np.int32)
polys = np.stack(
[
abs_boxes[:, [0, 1]],
abs_boxes[:, [0, 3]],
abs_boxes[:, [2, 3]],
abs_boxes[:, [2, 1]],
],
axis=1,
)
boxes_size = np.minimum(abs_boxes[:, 2] - abs_boxes[:, 0], abs_boxes[:, 3] - abs_boxes[:, 1])
for poly, box, box_size in zip(polys, abs_boxes, boxes_size):
# Mask boxes that are too small
if box_size < self.min_size_box:
seg_mask[idx, class_idx, box[1] : box[3] + 1, box[0] : box[2] + 1] = False
continue
# Negative shrink for gt, as described in paper
polygon = Polygon(poly)
distance = polygon.area * (1 - np.power(self.shrink_ratio, 2)) / polygon.length
subject = [tuple(coor) for coor in poly]
padding = pyclipper.PyclipperOffset()
padding.AddPath(subject, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
shrunken = padding.Execute(-distance)
# Draw polygon on gt if it is valid
if len(shrunken) == 0:
seg_mask[idx, class_idx, box[1] : box[3] + 1, box[0] : box[2] + 1] = False
continue
shrunken = np.array(shrunken[0]).reshape(-1, 2)
if shrunken.shape[0] <= 2 or not Polygon(shrunken).is_valid:
seg_mask[idx, class_idx, box[1] : box[3] + 1, box[0] : box[2] + 1] = False
continue
cv2.fillPoly(seg_target[idx, class_idx], [shrunken.astype(np.int32)], 1)
# Don't forget to switch back to channel last if Tensorflow is used
if channels_last:
seg_target = seg_target.transpose((0, 2, 3, 1))
seg_mask = seg_mask.transpose((0, 2, 3, 1))
return seg_target, seg_mask
|
from doctr.file_utils import is_tf_available
if is_tf_available():
from .tensorflow import *
else:
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, Dict, List, Union
import numpy as np
import tensorflow as tf
from tensorflow import keras
from doctr.models.preprocessor import PreProcessor
from doctr.utils.repr import NestedObject
__all__ = ["DetectionPredictor"]
class DetectionPredictor(NestedObject):
"""Implements an object able to localize text elements in a document
Args:
pre_processor: transform inputs for easier batched model inference
model: core detection architecture
"""
_children_names: List[str] = ["pre_processor", "model"]
def __init__(
self,
pre_processor: PreProcessor,
model: keras.Model,
) -> None:
self.pre_processor = pre_processor
self.model = model
def __call__(
self,
pages: List[Union[np.ndarray, tf.Tensor]],
**kwargs: Any,
) -> List[Dict[str, np.ndarray]]:
# Dimension check
if any(page.ndim != 3 for page in pages):
raise ValueError("incorrect input shape: all pages are expected to be multi-channel 2D images.")
processed_batches = self.pre_processor(pages)
predicted_batches = [
self.model(batch, return_preds=True, training=False, **kwargs)["preds"] for batch in processed_batches
]
return [pred for batch in predicted_batches for pred in batch]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, List, Union
import numpy as np
import torch
from torch import nn
from doctr.models.preprocessor import PreProcessor
__all__ = ["DetectionPredictor"]
class DetectionPredictor(nn.Module):
"""Implements an object able to localize text elements in a document
Args:
pre_processor: transform inputs for easier batched model inference
model: core detection architecture
"""
def __init__(
self,
pre_processor: PreProcessor,
model: nn.Module,
) -> None:
super().__init__()
self.pre_processor = pre_processor
self.model = model.eval()
@torch.no_grad()
def forward(
self,
pages: List[Union[np.ndarray, torch.Tensor]],
**kwargs: Any,
) -> List[np.ndarray]:
# Dimension check
if any(page.ndim != 3 for page in pages):
raise ValueError("incorrect input shape: all pages are expected to be multi-channel 2D images.")
processed_batches = self.pre_processor(pages)
_device = next(self.model.parameters()).device
predicted_batches = [
self.model(batch.to(device=_device), return_preds=True, **kwargs)["preds"] for batch in processed_batches
]
return [pred for batch in predicted_batches for pred in batch]
|
from .hub import *
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
# Inspired by: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/hub.py
import json
import logging
import os
import subprocess
import textwrap
from pathlib import Path
from typing import Any
from huggingface_hub import HfApi, HfFolder, Repository, hf_hub_download, snapshot_download
from doctr import models
from doctr.file_utils import is_tf_available, is_torch_available
if is_torch_available():
import torch
__all__ = ["login_to_hub", "push_to_hf_hub", "from_hub", "_save_model_and_config_for_hf_hub"]
AVAILABLE_ARCHS = {
"classification": models.classification.zoo.ARCHS,
"detection": models.detection.zoo.ARCHS + models.detection.zoo.ROT_ARCHS,
"recognition": models.recognition.zoo.ARCHS,
"obj_detection": ["fasterrcnn_mobilenet_v3_large_fpn"] if is_torch_available() else None,
}
def login_to_hub() -> None:
"""Login to huggingface hub"""
access_token = HfFolder.get_token()
if access_token is not None and HfApi()._is_valid_token(access_token):
logging.info("Huggingface Hub token found and valid")
HfApi().set_access_token(access_token)
else:
subprocess.call(["huggingface-cli", "login"])
HfApi().set_access_token(HfFolder().get_token())
# check if git lfs is installed
try:
subprocess.call(["git", "lfs", "version"])
except FileNotFoundError:
raise OSError(
"Looks like you do not have git-lfs installed, please install. \
You can install from https://git-lfs.github.com/. \
Then run `git lfs install` (you only have to do this once)."
)
def _save_model_and_config_for_hf_hub(model: Any, save_dir: str, arch: str, task: str) -> None:
"""Save model and config to disk for pushing to huggingface hub
Args:
model: TF or PyTorch model to be saved
save_dir: directory to save model and config
arch: architecture name
task: task name
"""
save_directory = Path(save_dir)
if is_torch_available():
weights_path = save_directory / "pytorch_model.bin"
torch.save(model.state_dict(), weights_path)
elif is_tf_available():
weights_path = save_directory / "tf_model" / "weights"
model.save_weights(str(weights_path))
config_path = save_directory / "config.json"
# add model configuration
model_config = model.cfg
model_config["arch"] = arch
model_config["task"] = task
with config_path.open("w") as f:
json.dump(model_config, f, indent=2, ensure_ascii=False)
def push_to_hf_hub(model: Any, model_name: str, task: str, **kwargs) -> None:
"""Save model and its configuration on HF hub
>>> from doctr.models import login_to_hub, push_to_hf_hub
>>> from doctr.models.recognition import crnn_mobilenet_v3_small
>>> login_to_hub()
>>> model = crnn_mobilenet_v3_small(pretrained=True)
>>> push_to_hf_hub(model, 'my-model', 'recognition', arch='crnn_mobilenet_v3_small')
Args:
model: TF or PyTorch model to be saved
model_name: name of the model which is also the repository name
task: task name
**kwargs: keyword arguments for push_to_hf_hub
"""
run_config = kwargs.get("run_config", None)
arch = kwargs.get("arch", None)
if run_config is None and arch is None:
raise ValueError("run_config or arch must be specified")
if task not in ["classification", "detection", "recognition", "obj_detection"]:
raise ValueError("task must be one of classification, detection, recognition, obj_detection")
# default readme
readme = textwrap.dedent(
f"""
---
language: en
---
<p align="center">
<img src="https://doctr-static.mindee.com/models?id=v0.3.1/Logo_doctr.gif&src=0" width="60%">
</p>
**Optical Character Recognition made seamless & accessible to anyone, powered by TensorFlow 2 & PyTorch**
## Task: {task}
https://github.com/mindee/doctr
### Example usage:
```python
>>> from doctr.io import DocumentFile
>>> from doctr.models import ocr_predictor, from_hub
>>> img = DocumentFile.from_images(['<image_path>'])
>>> # Load your model from the hub
>>> model = from_hub('mindee/my-model')
>>> # Pass it to the predictor
>>> # If your model is a recognition model:
>>> predictor = ocr_predictor(det_arch='db_mobilenet_v3_large',
>>> reco_arch=model,
>>> pretrained=True)
>>> # If your model is a detection model:
>>> predictor = ocr_predictor(det_arch=model,
>>> reco_arch='crnn_mobilenet_v3_small',
>>> pretrained=True)
>>> # Get your predictions
>>> res = predictor(img)
```
"""
)
# add run configuration to readme if available
if run_config is not None:
arch = run_config.arch
readme += textwrap.dedent(
f"""### Run Configuration
\n{json.dumps(vars(run_config), indent=2, ensure_ascii=False)}"""
)
if arch not in AVAILABLE_ARCHS[task]: # type: ignore
raise ValueError(
f"Architecture: {arch} for task: {task} not found.\
\nAvailable architectures: {AVAILABLE_ARCHS}"
)
commit_message = f"Add {model_name} model"
local_cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "huggingface", "hub", model_name)
repo_url = HfApi().create_repo(model_name, token=HfFolder.get_token(), exist_ok=False)
repo = Repository(local_dir=local_cache_dir, clone_from=repo_url, token=True)
with repo.commit(commit_message):
_save_model_and_config_for_hf_hub(model, repo.local_dir, arch=arch, task=task)
readme_path = Path(repo.local_dir) / "README.md"
readme_path.write_text(readme)
repo.git_push()
def from_hub(repo_id: str, **kwargs: Any):
"""Instantiate & load a pretrained model from HF hub.
>>> from doctr.models import from_hub
>>> model = from_hub("mindee/fasterrcnn_mobilenet_v3_large_fpn")
Args:
repo_id: HuggingFace model hub repo
kwargs: kwargs of `hf_hub_download` or `snapshot_download`
Returns:
Model loaded with the checkpoint
"""
# Get the config
with open(hf_hub_download(repo_id, filename="config.json", **kwargs), "rb") as f:
cfg = json.load(f)
arch = cfg["arch"]
task = cfg["task"]
cfg.pop("arch")
cfg.pop("task")
if task == "classification":
model = models.classification.__dict__[arch](
pretrained=False, classes=cfg["classes"], num_classes=cfg["num_classes"]
)
elif task == "detection":
model = models.detection.__dict__[arch](pretrained=False)
elif task == "recognition":
model = models.recognition.__dict__[arch](pretrained=False, input_shape=cfg["input_shape"], vocab=cfg["vocab"])
elif task == "obj_detection" and is_torch_available():
model = models.obj_detection.__dict__[arch](
pretrained=False,
image_mean=cfg["mean"],
image_std=cfg["std"],
max_size=cfg["input_shape"][-1],
num_classes=len(cfg["classes"]),
)
# update model cfg
model.cfg = cfg
# Load checkpoint
if is_torch_available():
state_dict = torch.load(hf_hub_download(repo_id, filename="pytorch_model.bin", **kwargs), map_location="cpu")
model.load_state_dict(state_dict)
else: # tf
repo_path = snapshot_download(repo_id, **kwargs)
model.load_weights(os.path.join(repo_path, "tf_model", "weights"))
return model
|
from .faster_rcnn import *
|
from doctr.file_utils import is_tf_available, is_torch_available
if not is_tf_available() and is_torch_available():
from .pytorch import *
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Any, Dict
from torchvision.models.detection import FasterRCNN, faster_rcnn
from ...utils import load_pretrained_params
__all__ = ["fasterrcnn_mobilenet_v3_large_fpn"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"fasterrcnn_mobilenet_v3_large_fpn": {
"input_shape": (3, 1024, 1024),
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
"classes": ["background", "qr_code", "bar_code", "logo", "photo"],
"url": "https://doctr-static.mindee.com/models?id=v0.4.1/fasterrcnn_mobilenet_v3_large_fpn-d5b2490d.pt&src=0",
},
}
def _fasterrcnn(arch: str, pretrained: bool, **kwargs: Any) -> FasterRCNN:
_kwargs = {
"image_mean": default_cfgs[arch]["mean"],
"image_std": default_cfgs[arch]["std"],
"box_detections_per_img": 150,
"box_score_thresh": 0.5,
"box_positive_fraction": 0.35,
"box_nms_thresh": 0.2,
"rpn_nms_thresh": 0.2,
"num_classes": len(default_cfgs[arch]["classes"]),
}
# Build the model
_kwargs.update(kwargs)
model = faster_rcnn.__dict__[arch](pretrained=False, pretrained_backbone=False, **_kwargs)
model.cfg = default_cfgs[arch]
if pretrained:
# Load pretrained parameters
load_pretrained_params(model, default_cfgs[arch]["url"])
else:
# Filter keys
state_dict = {
k: v
for k, v in faster_rcnn.__dict__[arch](pretrained=True).state_dict().items()
if not k.startswith("roi_heads.")
}
# Load state dict
model.load_state_dict(state_dict, strict=False)
return model
def fasterrcnn_mobilenet_v3_large_fpn(pretrained: bool = False, **kwargs: Any) -> FasterRCNN:
"""Faster-RCNN architecture with a MobileNet V3 backbone as described in `"Faster R-CNN: Towards Real-Time
Object Detection with Region Proposal Networks" <https://arxiv.org/pdf/1506.01497.pdf>`_.
>>> import torch
>>> from doctr.models.obj_detection import fasterrcnn_mobilenet_v3_large_fpn
>>> model = fasterrcnn_mobilenet_v3_large_fpn(pretrained=True)
>>> input_tensor = torch.rand((1, 3, 1024, 1024), dtype=torch.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our object detection dataset
Returns:
object detection architecture
"""
return _fasterrcnn("fasterrcnn_mobilenet_v3_large_fpn", pretrained, **kwargs)
|
from .modules import *
|
from doctr.file_utils import is_tf_available, is_torch_available
from .base import *
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[assignment]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import random
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from doctr.utils.repr import NestedObject
from ..functional.tensorflow import random_shadow
__all__ = [
"Compose",
"Resize",
"Normalize",
"LambdaTransformation",
"ToGray",
"RandomBrightness",
"RandomContrast",
"RandomSaturation",
"RandomHue",
"RandomGamma",
"RandomJpegQuality",
"GaussianBlur",
"ChannelShuffle",
"GaussianNoise",
"RandomHorizontalFlip",
"RandomShadow",
]
class Compose(NestedObject):
"""Implements a wrapper that will apply transformations sequentially
>>> import tensorflow as tf
>>> from doctr.transforms import Compose, Resize
>>> transfos = Compose([Resize((32, 32))])
>>> out = transfos(tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1))
Args:
transforms: list of transformation modules
"""
_children_names: List[str] = ["transforms"]
def __init__(self, transforms: List[Callable[[Any], Any]]) -> None:
self.transforms = transforms
def __call__(self, x: Any) -> Any:
for t in self.transforms:
x = t(x)
return x
class Resize(NestedObject):
"""Resizes a tensor to a target size
>>> import tensorflow as tf
>>> from doctr.transforms import Resize
>>> transfo = Resize((32, 32))
>>> out = transfo(tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1))
Args:
output_size: expected output size
method: interpolation method
preserve_aspect_ratio: if `True`, preserve aspect ratio and pad the rest with zeros
symmetric_pad: if `True` while preserving aspect ratio, the padding will be done symmetrically
"""
def __init__(
self,
output_size: Union[int, Tuple[int, int]],
method: str = "bilinear",
preserve_aspect_ratio: bool = False,
symmetric_pad: bool = False,
) -> None:
self.output_size = output_size
self.method = method
self.preserve_aspect_ratio = preserve_aspect_ratio
self.symmetric_pad = symmetric_pad
if isinstance(self.output_size, int):
self.wanted_size = (self.output_size, self.output_size)
elif isinstance(self.output_size, (tuple, list)):
self.wanted_size = self.output_size
else:
raise AssertionError("Output size should be either a list, a tuple or an int")
def extra_repr(self) -> str:
_repr = f"output_size={self.output_size}, method='{self.method}'"
if self.preserve_aspect_ratio:
_repr += f", preserve_aspect_ratio={self.preserve_aspect_ratio}, symmetric_pad={self.symmetric_pad}"
return _repr
def __call__(
self,
img: tf.Tensor,
target: Optional[np.ndarray] = None,
) -> Union[tf.Tensor, Tuple[tf.Tensor, np.ndarray]]:
input_dtype = img.dtype
img = tf.image.resize(img, self.wanted_size, self.method, self.preserve_aspect_ratio)
# It will produce an un-padded resized image, with a side shorter than wanted if we preserve aspect ratio
raw_shape = img.shape[:2]
if self.preserve_aspect_ratio:
if isinstance(self.output_size, (tuple, list)):
# In that case we need to pad because we want to enforce both width and height
if not self.symmetric_pad:
offset = (0, 0)
elif self.output_size[0] == img.shape[0]:
offset = (0, int((self.output_size[1] - img.shape[1]) / 2))
else:
offset = (int((self.output_size[0] - img.shape[0]) / 2), 0)
img = tf.image.pad_to_bounding_box(img, *offset, *self.output_size)
# In case boxes are provided, resize boxes if needed (for detection task if preserve aspect ratio)
if target is not None:
if self.preserve_aspect_ratio:
# Get absolute coords
if target.shape[1:] == (4,):
if isinstance(self.output_size, (tuple, list)) and self.symmetric_pad:
if np.max(target) <= 1:
offset = offset[0] / img.shape[0], offset[1] / img.shape[1]
target[:, [0, 2]] = offset[1] + target[:, [0, 2]] * raw_shape[1] / img.shape[1]
target[:, [1, 3]] = offset[0] + target[:, [1, 3]] * raw_shape[0] / img.shape[0]
else:
target[:, [0, 2]] *= raw_shape[1] / img.shape[1]
target[:, [1, 3]] *= raw_shape[0] / img.shape[0]
elif target.shape[1:] == (4, 2):
if isinstance(self.output_size, (tuple, list)) and self.symmetric_pad:
if np.max(target) <= 1:
offset = offset[0] / img.shape[0], offset[1] / img.shape[1]
target[..., 0] = offset[1] + target[..., 0] * raw_shape[1] / img.shape[1]
target[..., 1] = offset[0] + target[..., 1] * raw_shape[0] / img.shape[0]
else:
target[..., 0] *= raw_shape[1] / img.shape[1]
target[..., 1] *= raw_shape[0] / img.shape[0]
else:
raise AssertionError
return tf.cast(img, dtype=input_dtype), target
return tf.cast(img, dtype=input_dtype)
class Normalize(NestedObject):
"""Normalize a tensor to a Gaussian distribution for each channel
>>> import tensorflow as tf
>>> from doctr.transforms import Normalize
>>> transfo = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
>>> out = transfo(tf.random.uniform(shape=[8, 64, 64, 3], minval=0, maxval=1))
Args:
mean: average value per channel
std: standard deviation per channel
"""
def __init__(self, mean: Tuple[float, float, float], std: Tuple[float, float, float]) -> None:
self.mean = tf.constant(mean)
self.std = tf.constant(std)
def extra_repr(self) -> str:
return f"mean={self.mean.numpy().tolist()}, std={self.std.numpy().tolist()}"
def __call__(self, img: tf.Tensor) -> tf.Tensor:
img -= tf.cast(self.mean, dtype=img.dtype)
img /= tf.cast(self.std, dtype=img.dtype)
return img
class LambdaTransformation(NestedObject):
"""Normalize a tensor to a Gaussian distribution for each channel
>>> import tensorflow as tf
>>> from doctr.transforms import LambdaTransformation
>>> transfo = LambdaTransformation(lambda x: x/ 255.)
>>> out = transfo(tf.random.uniform(shape=[8, 64, 64, 3], minval=0, maxval=1))
Args:
fn: the function to be applied to the input tensor
"""
def __init__(self, fn: Callable[[tf.Tensor], tf.Tensor]) -> None:
self.fn = fn
def __call__(self, img: tf.Tensor) -> tf.Tensor:
return self.fn(img)
class ToGray(NestedObject):
"""Convert a RGB tensor (batch of images or image) to a 3-channels grayscale tensor
>>> import tensorflow as tf
>>> from doctr.transforms import ToGray
>>> transfo = ToGray()
>>> out = transfo(tf.random.uniform(shape=[8, 64, 64, 3], minval=0, maxval=1))
"""
def __init__(self, num_output_channels: int = 1):
self.num_output_channels = num_output_channels
def __call__(self, img: tf.Tensor) -> tf.Tensor:
img = tf.image.rgb_to_grayscale(img)
return img if self.num_output_channels == 1 else tf.repeat(img, self.num_output_channels, axis=-1)
class RandomBrightness(NestedObject):
"""Randomly adjust brightness of a tensor (batch of images or image) by adding a delta
to all pixels
>>> import tensorflow as tf
>>> from doctr.transforms import RandomBrightness
>>> transfo = RandomBrightness()
>>> out = transfo(tf.random.uniform(shape=[8, 64, 64, 3], minval=0, maxval=1))
Args:
max_delta: offset to add to each pixel is randomly picked in [-max_delta, max_delta]
p: probability to apply transformation
"""
def __init__(self, max_delta: float = 0.3) -> None:
self.max_delta = max_delta
def extra_repr(self) -> str:
return f"max_delta={self.max_delta}"
def __call__(self, img: tf.Tensor) -> tf.Tensor:
return tf.image.random_brightness(img, max_delta=self.max_delta)
class RandomContrast(NestedObject):
"""Randomly adjust contrast of a tensor (batch of images or image) by adjusting
each pixel: (img - mean) * contrast_factor + mean.
>>> import tensorflow as tf
>>> from doctr.transforms import RandomContrast
>>> transfo = RandomContrast()
>>> out = transfo(tf.random.uniform(shape=[8, 64, 64, 3], minval=0, maxval=1))
Args:
delta: multiplicative factor is picked in [1-delta, 1+delta] (reduce contrast if factor<1)
"""
def __init__(self, delta: float = 0.3) -> None:
self.delta = delta
def extra_repr(self) -> str:
return f"delta={self.delta}"
def __call__(self, img: tf.Tensor) -> tf.Tensor:
return tf.image.random_contrast(img, lower=1 - self.delta, upper=1 / (1 - self.delta))
class RandomSaturation(NestedObject):
"""Randomly adjust saturation of a tensor (batch of images or image) by converting to HSV and
increasing saturation by a factor.
>>> import tensorflow as tf
>>> from doctr.transforms import RandomSaturation
>>> transfo = RandomSaturation()
>>> out = transfo(tf.random.uniform(shape=[8, 64, 64, 3], minval=0, maxval=1))
Args:
delta: multiplicative factor is picked in [1-delta, 1+delta] (reduce saturation if factor<1)
"""
def __init__(self, delta: float = 0.5) -> None:
self.delta = delta
def extra_repr(self) -> str:
return f"delta={self.delta}"
def __call__(self, img: tf.Tensor) -> tf.Tensor:
return tf.image.random_saturation(img, lower=1 - self.delta, upper=1 + self.delta)
class RandomHue(NestedObject):
"""Randomly adjust hue of a tensor (batch of images or image) by converting to HSV and adding a delta
>>> import tensorflow as tf
>>> from doctr.transforms import RandomHue
>>> transfo = RandomHue()
>>> out = transfo(tf.random.uniform(shape=[8, 64, 64, 3], minval=0, maxval=1))
Args:
max_delta: offset to add to each pixel is randomly picked in [-max_delta, max_delta]
"""
def __init__(self, max_delta: float = 0.3) -> None:
self.max_delta = max_delta
def extra_repr(self) -> str:
return f"max_delta={self.max_delta}"
def __call__(self, img: tf.Tensor) -> tf.Tensor:
return tf.image.random_hue(img, max_delta=self.max_delta)
class RandomGamma(NestedObject):
"""randomly performs gamma correction for a tensor (batch of images or image)
>>> import tensorflow as tf
>>> from doctr.transforms import RandomGamma
>>> transfo = RandomGamma()
>>> out = transfo(tf.random.uniform(shape=[8, 64, 64, 3], minval=0, maxval=1))
Args:
min_gamma: non-negative real number, lower bound for gamma param
max_gamma: non-negative real number, upper bound for gamma
min_gain: lower bound for constant multiplier
max_gain: upper bound for constant multiplier
"""
def __init__(
self,
min_gamma: float = 0.5,
max_gamma: float = 1.5,
min_gain: float = 0.8,
max_gain: float = 1.2,
) -> None:
self.min_gamma = min_gamma
self.max_gamma = max_gamma
self.min_gain = min_gain
self.max_gain = max_gain
def extra_repr(self) -> str:
return f"""gamma_range=({self.min_gamma}, {self.max_gamma}),
gain_range=({self.min_gain}, {self.max_gain})"""
def __call__(self, img: tf.Tensor) -> tf.Tensor:
gamma = random.uniform(self.min_gamma, self.max_gamma)
gain = random.uniform(self.min_gain, self.max_gain)
return tf.image.adjust_gamma(img, gamma=gamma, gain=gain)
class RandomJpegQuality(NestedObject):
"""Randomly adjust jpeg quality of a 3 dimensional RGB image
>>> import tensorflow as tf
>>> from doctr.transforms import RandomJpegQuality
>>> transfo = RandomJpegQuality()
>>> out = transfo(tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1))
Args:
min_quality: int between [0, 100]
max_quality: int between [0, 100]
"""
def __init__(self, min_quality: int = 60, max_quality: int = 100) -> None:
self.min_quality = min_quality
self.max_quality = max_quality
def extra_repr(self) -> str:
return f"min_quality={self.min_quality}"
def __call__(self, img: tf.Tensor) -> tf.Tensor:
return tf.image.random_jpeg_quality(img, min_jpeg_quality=self.min_quality, max_jpeg_quality=self.max_quality)
class GaussianBlur(NestedObject):
"""Randomly adjust jpeg quality of a 3 dimensional RGB image
>>> import tensorflow as tf
>>> from doctr.transforms import GaussianBlur
>>> transfo = GaussianBlur(3, (.1, 5))
>>> out = transfo(tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1))
Args:
kernel_shape: size of the blurring kernel
std: min and max value of the standard deviation
"""
def __init__(self, kernel_shape: Union[int, Iterable[int]], std: Tuple[float, float]) -> None:
self.kernel_shape = kernel_shape
self.std = std
def extra_repr(self) -> str:
return f"kernel_shape={self.kernel_shape}, std={self.std}"
@tf.function
def __call__(self, img: tf.Tensor) -> tf.Tensor:
sigma = random.uniform(self.std[0], self.std[1])
return tfa.image.gaussian_filter2d(
img,
filter_shape=self.kernel_shape,
sigma=sigma,
)
class ChannelShuffle(NestedObject):
"""Randomly shuffle channel order of a given image"""
def __init__(self):
pass
def __call__(self, img: tf.Tensor) -> tf.Tensor:
return tf.transpose(tf.random.shuffle(tf.transpose(img, perm=[2, 0, 1])), perm=[1, 2, 0])
class GaussianNoise(NestedObject):
"""Adds Gaussian Noise to the input tensor
>>> import tensorflow as tf
>>> from doctr.transforms import GaussianNoise
>>> transfo = GaussianNoise(0., 1.)
>>> out = transfo(tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1))
Args:
mean : mean of the gaussian distribution
std : std of the gaussian distribution
"""
def __init__(self, mean: float = 0.0, std: float = 1.0) -> None:
super().__init__()
self.std = std
self.mean = mean
def __call__(self, x: tf.Tensor) -> tf.Tensor:
# Reshape the distribution
noise = self.mean + 2 * self.std * tf.random.uniform(x.shape) - self.std
if x.dtype == tf.uint8:
return tf.cast(
tf.clip_by_value(tf.math.round(tf.cast(x, dtype=tf.float32) + 255 * noise), 0, 255), dtype=tf.uint8
)
else:
return tf.cast(tf.clip_by_value(x + noise, 0, 1), dtype=x.dtype)
def extra_repr(self) -> str:
return f"mean={self.mean}, std={self.std}"
class RandomHorizontalFlip(NestedObject):
"""Adds random horizontal flip to the input tensor/np.ndarray
>>> import tensorflow as tf
>>> from doctr.transforms import RandomHorizontalFlip
>>> transfo = RandomHorizontalFlip(p=0.5)
>>> image = tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1)
>>> target = {
>>> "boxes": np.array([[0.1, 0.1, 0.4, 0.5] ], dtype= np.float32),
>>> "labels": np.ones(1, dtype= np.int64)
>>> }
>>> out = transfo(image, target)
Args:
p : probability of Horizontal Flip
"""
def __init__(self, p: float) -> None:
super().__init__()
self.p = p
def __call__(self, img: Union[tf.Tensor, np.ndarray], target: Dict[str, Any]) -> Tuple[tf.Tensor, Dict[str, Any]]:
"""
Args:
img: Image to be flipped.
target: Dictionary with boxes (in relative coordinates of shape (N, 4)) and labels as keys
Returns:
Tuple of numpy nd-array or Tensor and target
"""
if np.random.rand(1) <= self.p:
_img = tf.image.flip_left_right(img)
_target = target.copy()
# Changing the relative bbox coordinates
_target["boxes"][:, ::2] = 1 - target["boxes"][:, [2, 0]]
return _img, _target
return img, target
class RandomShadow(NestedObject):
"""Adds random shade to the input image
>>> import tensorflow as tf
>>> from doctr.transforms import RandomShadow
>>> transfo = RandomShadow(0., 1.)
>>> out = transfo(tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1))
Args:
opacity_range : minimum and maximum opacity of the shade
"""
def __init__(self, opacity_range: Optional[Tuple[float, float]] = None) -> None:
super().__init__()
self.opacity_range = opacity_range if isinstance(opacity_range, tuple) else (0.2, 0.8)
def __call__(self, x: tf.Tensor) -> tf.Tensor:
# Reshape the distribution
if x.dtype == tf.uint8:
return tf.cast(
tf.clip_by_value(
tf.math.round(255 * random_shadow(tf.cast(x, dtype=tf.float32) / 255, self.opacity_range)),
0,
255,
),
dtype=tf.uint8,
)
else:
return tf.clip_by_value(random_shadow(x, self.opacity_range), 0, 1)
def extra_repr(self) -> str:
return f"opacity_range={self.opacity_range}"
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import math
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from PIL.Image import Image
from torch.nn.functional import pad
from torchvision.transforms import functional as F
from torchvision.transforms import transforms as T
from ..functional.pytorch import random_shadow
__all__ = ["Resize", "GaussianNoise", "ChannelShuffle", "RandomHorizontalFlip", "RandomShadow"]
class Resize(T.Resize):
def __init__(
self,
size: Union[int, Tuple[int, int]],
interpolation=F.InterpolationMode.BILINEAR,
preserve_aspect_ratio: bool = False,
symmetric_pad: bool = False,
) -> None:
super().__init__(size, interpolation)
self.preserve_aspect_ratio = preserve_aspect_ratio
self.symmetric_pad = symmetric_pad
if not isinstance(self.size, (int, tuple, list)):
raise AssertionError("size should be either a tuple, a list or an int")
def forward(
self,
img: torch.Tensor,
target: Optional[np.ndarray] = None,
) -> Union[torch.Tensor, Tuple[torch.Tensor, np.ndarray]]:
if isinstance(self.size, int):
target_ratio = img.shape[-2] / img.shape[-1]
else:
target_ratio = self.size[0] / self.size[1]
actual_ratio = img.shape[-2] / img.shape[-1]
if not self.preserve_aspect_ratio or (target_ratio == actual_ratio and (isinstance(self.size, (tuple, list)))):
# If we don't preserve the aspect ratio or the wanted aspect ratio is the same than the original one
# We can use with the regular resize
if target is not None:
return super().forward(img), target
return super().forward(img)
else:
# Resize
if isinstance(self.size, (tuple, list)):
if actual_ratio > target_ratio:
tmp_size = (self.size[0], max(int(self.size[0] / actual_ratio), 1))
else:
tmp_size = (max(int(self.size[1] * actual_ratio), 1), self.size[1])
elif isinstance(self.size, int): # self.size is the longest side, infer the other
if img.shape[-2] <= img.shape[-1]:
tmp_size = (max(int(self.size * actual_ratio), 1), self.size)
else:
tmp_size = (self.size, max(int(self.size / actual_ratio), 1))
# Scale image
img = F.resize(img, tmp_size, self.interpolation)
raw_shape = img.shape[-2:]
if isinstance(self.size, (tuple, list)):
# Pad (inverted in pytorch)
_pad = (0, self.size[1] - img.shape[-1], 0, self.size[0] - img.shape[-2])
if self.symmetric_pad:
half_pad = (math.ceil(_pad[1] / 2), math.ceil(_pad[3] / 2))
_pad = (half_pad[0], _pad[1] - half_pad[0], half_pad[1], _pad[3] - half_pad[1])
img = pad(img, _pad)
# In case boxes are provided, resize boxes if needed (for detection task if preserve aspect ratio)
if target is not None:
if self.preserve_aspect_ratio:
# Get absolute coords
if target.shape[1:] == (4,):
if isinstance(self.size, (tuple, list)) and self.symmetric_pad:
if np.max(target) <= 1:
offset = half_pad[0] / img.shape[-1], half_pad[1] / img.shape[-2]
target[:, [0, 2]] = offset[0] + target[:, [0, 2]] * raw_shape[-1] / img.shape[-1]
target[:, [1, 3]] = offset[1] + target[:, [1, 3]] * raw_shape[-2] / img.shape[-2]
else:
target[:, [0, 2]] *= raw_shape[-1] / img.shape[-1]
target[:, [1, 3]] *= raw_shape[-2] / img.shape[-2]
elif target.shape[1:] == (4, 2):
if isinstance(self.size, (tuple, list)) and self.symmetric_pad:
if np.max(target) <= 1:
offset = half_pad[0] / img.shape[-1], half_pad[1] / img.shape[-2]
target[..., 0] = offset[0] + target[..., 0] * raw_shape[-1] / img.shape[-1]
target[..., 1] = offset[1] + target[..., 1] * raw_shape[-2] / img.shape[-2]
else:
target[..., 0] *= raw_shape[-1] / img.shape[-1]
target[..., 1] *= raw_shape[-2] / img.shape[-2]
else:
raise AssertionError
return img, target
return img
def __repr__(self) -> str:
interpolate_str = self.interpolation.value
_repr = f"output_size={self.size}, interpolation='{interpolate_str}'"
if self.preserve_aspect_ratio:
_repr += f", preserve_aspect_ratio={self.preserve_aspect_ratio}, symmetric_pad={self.symmetric_pad}"
return f"{self.__class__.__name__}({_repr})"
class GaussianNoise(torch.nn.Module):
"""Adds Gaussian Noise to the input tensor
>>> import torch
>>> from doctr.transforms import GaussianNoise
>>> transfo = GaussianNoise(0., 1.)
>>> out = transfo(torch.rand((3, 224, 224)))
Args:
mean : mean of the gaussian distribution
std : std of the gaussian distribution
"""
def __init__(self, mean: float = 0.0, std: float = 1.0) -> None:
super().__init__()
self.std = std
self.mean = mean
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Reshape the distribution
noise = self.mean + 2 * self.std * torch.rand(x.shape, device=x.device) - self.std
if x.dtype == torch.uint8:
return (x + 255 * noise).round().clamp(0, 255).to(dtype=torch.uint8)
else:
return (x + noise.to(dtype=x.dtype)).clamp(0, 1)
def extra_repr(self) -> str:
return f"mean={self.mean}, std={self.std}"
class ChannelShuffle(torch.nn.Module):
"""Randomly shuffle channel order of a given image"""
def __init__(self):
super().__init__()
def forward(self, img: torch.Tensor) -> torch.Tensor:
# Get a random order
chan_order = torch.rand(img.shape[0]).argsort()
return img[chan_order]
class RandomHorizontalFlip(T.RandomHorizontalFlip):
def forward(
self, img: Union[torch.Tensor, Image], target: Dict[str, Any]
) -> Tuple[Union[torch.Tensor, Image], Dict[str, Any]]:
"""
Args:
img: Image to be flipped.
target: Dictionary with boxes (in relative coordinates of shape (N, 4)) and labels as keys
Returns:
Tuple of PIL Image or Tensor and target
"""
if torch.rand(1) < self.p:
_img = F.hflip(img)
_target = target.copy()
# Changing the relative bbox coordinates
_target["boxes"][:, ::2] = 1 - target["boxes"][:, [2, 0]]
return _img, _target
return img, target
class RandomShadow(torch.nn.Module):
"""Adds random shade to the input image
>>> import torch
>>> from doctr.transforms import RandomShadow
>>> transfo = RandomShadow((0., 1.))
>>> out = transfo(torch.rand((3, 64, 64)))
Args:
opacity_range : minimum and maximum opacity of the shade
"""
def __init__(self, opacity_range: Optional[Tuple[float, float]] = None) -> None:
super().__init__()
self.opacity_range = opacity_range if isinstance(opacity_range, tuple) else (0.2, 0.8)
def __call__(self, x: torch.Tensor) -> torch.Tensor:
# Reshape the distribution
try:
if x.dtype == torch.uint8:
return (
(
255
* random_shadow(
x.to(dtype=torch.float32) / 255,
self.opacity_range,
)
)
.round()
.clip(0, 255)
.to(dtype=torch.uint8)
)
else:
return random_shadow(x, self.opacity_range).clip(0, 1)
except ValueError:
return x
def extra_repr(self) -> str:
return f"opacity_range={self.opacity_range}"
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import math
import random
from typing import Any, Callable, Dict, List, Tuple
import numpy as np
from doctr.utils.repr import NestedObject
from .. import functional as F
__all__ = ["SampleCompose", "ImageTransform", "ColorInversion", "OneOf", "RandomApply", "RandomRotate", "RandomCrop"]
class SampleCompose(NestedObject):
"""Implements a wrapper that will apply transformations sequentially on both image and target
.. tabs::
.. tab:: TensorFlow
.. code:: python
>>> import numpy as np
>>> import tensorflow as tf
>>> from doctr.transforms import SampleCompose, ImageTransform, ColorInversion, RandomRotate
>>> transfo = SampleCompose([ImageTransform(ColorInversion((32, 32))), RandomRotate(30)])
>>> out, out_boxes = transfo(tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1), np.zeros((2, 4)))
.. tab:: PyTorch
.. code:: python
>>> import numpy as np
>>> import torch
>>> from doctr.transforms import SampleCompose, ImageTransform, ColorInversion, RandomRotate
>>> transfos = SampleCompose([ImageTransform(ColorInversion((32, 32))), RandomRotate(30)])
>>> out, out_boxes = transfos(torch.rand(8, 64, 64, 3), np.zeros((2, 4)))
Args:
transforms: list of transformation modules
"""
_children_names: List[str] = ["sample_transforms"]
def __init__(self, transforms: List[Callable[[Any, Any], Tuple[Any, Any]]]) -> None:
self.sample_transforms = transforms
def __call__(self, x: Any, target: Any) -> Tuple[Any, Any]:
for t in self.sample_transforms:
x, target = t(x, target)
return x, target
class ImageTransform(NestedObject):
"""Implements a transform wrapper to turn an image-only transformation into an image+target transform
.. tabs::
.. tab:: TensorFlow
.. code:: python
>>> import tensorflow as tf
>>> from doctr.transforms import ImageTransform, ColorInversion
>>> transfo = ImageTransform(ColorInversion((32, 32)))
>>> out, _ = transfo(tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1), None)
.. tab:: PyTorch
.. code:: python
>>> import torch
>>> from doctr.transforms import ImageTransform, ColorInversion
>>> transfo = ImageTransform(ColorInversion((32, 32)))
>>> out, _ = transfo(torch.rand(8, 64, 64, 3), None)
Args:
transform: the image transformation module to wrap
"""
_children_names: List[str] = ["img_transform"]
def __init__(self, transform: Callable[[Any], Any]) -> None:
self.img_transform = transform
def __call__(self, img: Any, target: Any) -> Tuple[Any, Any]:
img = self.img_transform(img)
return img, target
class ColorInversion(NestedObject):
"""Applies the following tranformation to a tensor (image or batch of images):
convert to grayscale, colorize (shift 0-values randomly), and then invert colors
.. tabs::
.. tab:: TensorFlow
.. code:: python
>>> import tensorflow as tf
>>> from doctr.transforms import ColorInversion
>>> transfo = ColorInversion(min_val=0.6)
>>> out = transfo(tf.random.uniform(shape=[8, 64, 64, 3], minval=0, maxval=1))
.. tab:: PyTorch
.. code:: python
>>> import torch
>>> from doctr.transforms import ColorInversion
>>> transfo = ColorInversion(min_val=0.6)
>>> out = transfo(torch.rand(8, 64, 64, 3))
Args:
min_val: range [min_val, 1] to colorize RGB pixels
"""
def __init__(self, min_val: float = 0.5) -> None:
self.min_val = min_val
def extra_repr(self) -> str:
return f"min_val={self.min_val}"
def __call__(self, img: Any) -> Any:
return F.invert_colors(img, self.min_val)
class OneOf(NestedObject):
"""Randomly apply one of the input transformations
.. tabs::
.. tab:: TensorFlow
.. code:: python
>>> import tensorflow as tf
>>> from doctr.transforms import OneOf
>>> transfo = OneOf([JpegQuality(), Gamma()])
>>> out = transfo(tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1))
.. tab:: PyTorch
.. code:: python
>>> import torch
>>> from doctr.transforms import OneOf
>>> transfo = OneOf([JpegQuality(), Gamma()])
>>> out = transfo(torch.rand(1, 64, 64, 3))
Args:
transforms: list of transformations, one only will be picked
"""
_children_names: List[str] = ["transforms"]
def __init__(self, transforms: List[Callable[[Any], Any]]) -> None:
self.transforms = transforms
def __call__(self, img: Any) -> Any:
# Pick transformation
transfo = self.transforms[int(random.random() * len(self.transforms))]
# Apply
return transfo(img)
class RandomApply(NestedObject):
"""Apply with a probability p the input transformation
.. tabs::
.. tab:: TensorFlow
.. code:: python
>>> import tensorflow as tf
>>> from doctr.transforms import RandomApply
>>> transfo = RandomApply(Gamma(), p=.5)
>>> out = transfo(tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1))
.. tab:: PyTorch
.. code:: python
>>> import torch
>>> from doctr.transforms import RandomApply
>>> transfo = RandomApply(Gamma(), p=.5)
>>> out = transfo(torch.rand(1, 64, 64, 3))
Args:
transform: transformation to apply
p: probability to apply
"""
def __init__(self, transform: Callable[[Any], Any], p: float = 0.5) -> None:
self.transform = transform
self.p = p
def extra_repr(self) -> str:
return f"transform={self.transform}, p={self.p}"
def __call__(self, img: Any) -> Any:
if random.random() < self.p:
return self.transform(img)
return img
class RandomRotate(NestedObject):
"""Randomly rotate a tensor image and its boxes
.. image:: https://doctr-static.mindee.com/models?id=v0.4.0/rotation_illustration.png&src=0
:align: center
Args:
max_angle: maximum angle for rotation, in degrees. Angles will be uniformly picked in
[-max_angle, max_angle]
expand: whether the image should be padded before the rotation
"""
def __init__(self, max_angle: float = 5.0, expand: bool = False) -> None:
self.max_angle = max_angle
self.expand = expand
def extra_repr(self) -> str:
return f"max_angle={self.max_angle}, expand={self.expand}"
def __call__(self, img: Any, target: np.ndarray) -> Tuple[Any, np.ndarray]:
angle = random.uniform(-self.max_angle, self.max_angle)
r_img, r_polys = F.rotate_sample(img, target, angle, self.expand)
# Removes deleted boxes
is_kept = (r_polys.max(1) > r_polys.min(1)).sum(1) == 2
return r_img, r_polys[is_kept]
class RandomCrop(NestedObject):
"""Randomly crop a tensor image and its boxes
Args:
scale: tuple of floats, relative (min_area, max_area) of the crop
ratio: tuple of float, relative (min_ratio, max_ratio) where ratio = h/w
"""
def __init__(self, scale: Tuple[float, float] = (0.08, 1.0), ratio: Tuple[float, float] = (0.75, 1.33)) -> None:
self.scale = scale
self.ratio = ratio
def extra_repr(self) -> str:
return f"scale={self.scale}, ratio={self.ratio}"
def __call__(self, img: Any, target: Dict[str, np.ndarray]) -> Tuple[Any, Dict[str, np.ndarray]]:
scale = random.uniform(self.scale[0], self.scale[1])
ratio = random.uniform(self.ratio[0], self.ratio[1])
# Those might overflow
crop_h = math.sqrt(scale * ratio)
crop_w = math.sqrt(scale / ratio)
xmin, ymin = random.uniform(0, 1 - crop_w), random.uniform(0, 1 - crop_h)
xmax, ymax = xmin + crop_w, ymin + crop_h
# Clip them
xmin, ymin = max(xmin, 0), max(ymin, 0)
xmax, ymax = min(xmax, 1), min(ymax, 1)
croped_img, crop_boxes = F.crop_detection(img, target["boxes"], (xmin, ymin, xmax, ymax))
return croped_img, dict(boxes=crop_boxes)
|
from doctr.file_utils import is_tf_available, is_torch_available
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import *
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import math
from copy import deepcopy
from typing import Tuple
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from doctr.utils.geometry import compute_expanded_shape, rotate_abs_geoms
from .base import create_shadow_mask, crop_boxes
__all__ = ["invert_colors", "rotate_sample", "crop_detection", "random_shadow"]
def invert_colors(img: tf.Tensor, min_val: float = 0.6) -> tf.Tensor:
out = tf.image.rgb_to_grayscale(img) # Convert to gray
# Random RGB shift
shift_shape = [img.shape[0], 1, 1, 3] if img.ndim == 4 else [1, 1, 3]
rgb_shift = tf.random.uniform(shape=shift_shape, minval=min_val, maxval=1)
# Inverse the color
if out.dtype == tf.uint8:
out = tf.cast(tf.cast(out, dtype=rgb_shift.dtype) * rgb_shift, dtype=tf.uint8)
else:
out *= tf.cast(rgb_shift, dtype=out.dtype)
# Inverse the color
out = 255 - out if out.dtype == tf.uint8 else 1 - out
return out
def rotated_img_tensor(img: tf.Tensor, angle: float, expand: bool = False) -> tf.Tensor:
"""Rotate image around the center, interpolation=NEAREST, pad with 0 (black)
Args:
img: image to rotate
angle: angle in degrees. +: counter-clockwise, -: clockwise
expand: whether the image should be padded before the rotation
Returns:
the rotated image (tensor)
"""
# Compute the expanded padding
h_crop, w_crop = 0, 0
if expand:
exp_h, exp_w = compute_expanded_shape(img.shape[:-1], angle)
h_diff, w_diff = int(math.ceil(exp_h - img.shape[0])), int(math.ceil(exp_w - img.shape[1]))
h_pad, w_pad = max(h_diff, 0), max(w_diff, 0)
exp_img = tf.pad(img, tf.constant([[h_pad // 2, h_pad - h_pad // 2], [w_pad // 2, w_pad - w_pad // 2], [0, 0]]))
h_crop, w_crop = int(round(max(exp_img.shape[0] - exp_h, 0))), int(round(min(exp_img.shape[1] - exp_w, 0)))
else:
exp_img = img
# Rotate the padded image
rotated_img = tfa.image.rotate(exp_img, angle * math.pi / 180) # Interpolation NEAREST by default
# Crop the rest
if h_crop > 0 or w_crop > 0:
h_slice = slice(h_crop // 2, -h_crop // 2) if h_crop > 0 else slice(rotated_img.shape[0])
w_slice = slice(-w_crop // 2, -w_crop // 2) if w_crop > 0 else slice(rotated_img.shape[1])
rotated_img = rotated_img[h_slice, w_slice]
return rotated_img
def rotate_sample(
img: tf.Tensor,
geoms: np.ndarray,
angle: float,
expand: bool = False,
) -> Tuple[tf.Tensor, np.ndarray]:
"""Rotate image around the center, interpolation=NEAREST, pad with 0 (black)
Args:
img: image to rotate
geoms: array of geometries of shape (N, 4) or (N, 4, 2)
angle: angle in degrees. +: counter-clockwise, -: clockwise
expand: whether the image should be padded before the rotation
Returns:
A tuple of rotated img (tensor), rotated boxes (np array)
"""
# Rotated the image
rotated_img = rotated_img_tensor(img, angle, expand)
# Get absolute coords
_geoms = deepcopy(geoms)
if _geoms.shape[1:] == (4,):
if np.max(_geoms) <= 1:
_geoms[:, [0, 2]] *= img.shape[1]
_geoms[:, [1, 3]] *= img.shape[0]
elif _geoms.shape[1:] == (4, 2):
if np.max(_geoms) <= 1:
_geoms[..., 0] *= img.shape[1]
_geoms[..., 1] *= img.shape[0]
else:
raise AssertionError
# Rotate the boxes: xmin, ymin, xmax, ymax or polygons --> (4, 2) polygon
rotated_geoms: np.ndarray = rotate_abs_geoms(_geoms, angle, img.shape[:-1], expand).astype(np.float32)
# Always return relative boxes to avoid label confusions when resizing is performed aferwards
rotated_geoms[..., 0] = rotated_geoms[..., 0] / rotated_img.shape[1]
rotated_geoms[..., 1] = rotated_geoms[..., 1] / rotated_img.shape[0]
return rotated_img, np.clip(rotated_geoms, 0, 1)
def crop_detection(
img: tf.Tensor, boxes: np.ndarray, crop_box: Tuple[float, float, float, float]
) -> Tuple[tf.Tensor, np.ndarray]:
"""Crop and image and associated bboxes
Args:
img: image to crop
boxes: array of boxes to clip, absolute (int) or relative (float)
crop_box: box (xmin, ymin, xmax, ymax) to crop the image. Relative coords.
Returns:
A tuple of cropped image, cropped boxes, where the image is not resized.
"""
if any(val < 0 or val > 1 for val in crop_box):
raise AssertionError("coordinates of arg `crop_box` should be relative")
h, w = img.shape[:2]
xmin, ymin = int(round(crop_box[0] * (w - 1))), int(round(crop_box[1] * (h - 1)))
xmax, ymax = int(round(crop_box[2] * (w - 1))), int(round(crop_box[3] * (h - 1)))
cropped_img = tf.image.crop_to_bounding_box(img, ymin, xmin, ymax - ymin, xmax - xmin)
# Crop the box
boxes = crop_boxes(boxes, crop_box if boxes.max() <= 1 else (xmin, ymin, xmax, ymax))
return cropped_img, boxes
def random_shadow(img: tf.Tensor, opacity_range: Tuple[float, float], **kwargs) -> tf.Tensor:
"""Apply a random shadow to a given image
Args:
img: image to modify
opacity_range: the minimum and maximum desired opacity of the shadow
Returns:
shaded image
"""
shadow_mask = create_shadow_mask(img.shape[:2], **kwargs)
opacity = np.random.uniform(*opacity_range)
shadow_tensor = 1 - tf.convert_to_tensor(shadow_mask[..., None], dtype=tf.float32)
# Add some blur to make it believable
k = 7 + int(2 * 4 * np.random.rand(1))
shadow_tensor = tfa.image.gaussian_filter2d(
shadow_tensor,
filter_shape=k,
sigma=np.random.uniform(0.5, 5.0),
)
return opacity * shadow_tensor * img + (1 - opacity) * img
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from copy import deepcopy
from typing import Tuple
import numpy as np
import torch
from torchvision.transforms import functional as F
from doctr.utils.geometry import rotate_abs_geoms
from .base import create_shadow_mask, crop_boxes
__all__ = ["invert_colors", "rotate_sample", "crop_detection", "random_shadow"]
def invert_colors(img: torch.Tensor, min_val: float = 0.6) -> torch.Tensor:
out = F.rgb_to_grayscale(img, num_output_channels=3)
# Random RGB shift
shift_shape = [img.shape[0], 3, 1, 1] if img.ndim == 4 else [3, 1, 1]
rgb_shift = min_val + (1 - min_val) * torch.rand(shift_shape)
# Inverse the color
if out.dtype == torch.uint8:
out = (out.to(dtype=rgb_shift.dtype) * rgb_shift).to(dtype=torch.uint8)
else:
out = out * rgb_shift.to(dtype=out.dtype)
# Inverse the color
out = 255 - out if out.dtype == torch.uint8 else 1 - out
return out
def rotate_sample(
img: torch.Tensor,
geoms: np.ndarray,
angle: float,
expand: bool = False,
) -> Tuple[torch.Tensor, np.ndarray]:
"""Rotate image around the center, interpolation=NEAREST, pad with 0 (black)
Args:
img: image to rotate
geoms: array of geometries of shape (N, 4) or (N, 4, 2)
angle: angle in degrees. +: counter-clockwise, -: clockwise
expand: whether the image should be padded before the rotation
Returns:
A tuple of rotated img (tensor), rotated geometries of shape (N, 4, 2)
"""
rotated_img = F.rotate(img, angle=angle, fill=0, expand=expand) # Interpolation NEAREST by default
rotated_img = rotated_img[:3] # when expand=True, it expands to RGBA channels
# Get absolute coords
_geoms = deepcopy(geoms)
if _geoms.shape[1:] == (4,):
if np.max(_geoms) <= 1:
_geoms[:, [0, 2]] *= img.shape[-1]
_geoms[:, [1, 3]] *= img.shape[-2]
elif _geoms.shape[1:] == (4, 2):
if np.max(_geoms) <= 1:
_geoms[..., 0] *= img.shape[-1]
_geoms[..., 1] *= img.shape[-2]
else:
raise AssertionError("invalid format for arg `geoms`")
# Rotate the boxes: xmin, ymin, xmax, ymax or polygons --> (4, 2) polygon
rotated_geoms: np.ndarray = rotate_abs_geoms(
_geoms,
angle,
img.shape[1:], # type: ignore[arg-type]
expand,
).astype(np.float32)
# Always return relative boxes to avoid label confusions when resizing is performed aferwards
rotated_geoms[..., 0] = rotated_geoms[..., 0] / rotated_img.shape[2]
rotated_geoms[..., 1] = rotated_geoms[..., 1] / rotated_img.shape[1]
return rotated_img, np.clip(rotated_geoms, 0, 1)
def crop_detection(
img: torch.Tensor, boxes: np.ndarray, crop_box: Tuple[float, float, float, float]
) -> Tuple[torch.Tensor, np.ndarray]:
"""Crop and image and associated bboxes
Args:
img: image to crop
boxes: array of boxes to clip, absolute (int) or relative (float)
crop_box: box (xmin, ymin, xmax, ymax) to crop the image. Relative coords.
Returns:
A tuple of cropped image, cropped boxes, where the image is not resized.
"""
if any(val < 0 or val > 1 for val in crop_box):
raise AssertionError("coordinates of arg `crop_box` should be relative")
h, w = img.shape[-2:]
xmin, ymin = int(round(crop_box[0] * (w - 1))), int(round(crop_box[1] * (h - 1)))
xmax, ymax = int(round(crop_box[2] * (w - 1))), int(round(crop_box[3] * (h - 1)))
cropped_img = F.crop(img, ymin, xmin, ymax - ymin, xmax - xmin)
# Crop the box
boxes = crop_boxes(boxes, crop_box if boxes.max() <= 1 else (xmin, ymin, xmax, ymax))
return cropped_img, boxes
def random_shadow(img: torch.Tensor, opacity_range: Tuple[float, float], **kwargs) -> torch.Tensor:
"""Crop and image and associated bboxes
Args:
img: image to modify
opacity_range: the minimum and maximum desired opacity of the shadow
Returns:
shaded image
"""
shadow_mask = create_shadow_mask(img.shape[1:], **kwargs) # type: ignore[arg-type]
opacity = np.random.uniform(*opacity_range)
shadow_tensor = 1 - torch.from_numpy(shadow_mask[None, ...])
# Add some blur to make it believable
k = 7 + 2 * int(4 * np.random.rand(1))
sigma = np.random.uniform(0.5, 5.0)
shadow_tensor = F.gaussian_blur(shadow_tensor, k, sigma=[sigma, sigma])
return opacity * shadow_tensor * img + (1 - opacity) * img
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Tuple, Union
import cv2
import numpy as np
from doctr.utils.geometry import rotate_abs_geoms
__all__ = ["crop_boxes", "create_shadow_mask"]
def crop_boxes(
boxes: np.ndarray,
crop_box: Union[Tuple[int, int, int, int], Tuple[float, float, float, float]],
) -> np.ndarray:
"""Crop localization boxes
Args:
boxes: ndarray of shape (N, 4) in relative or abs coordinates
crop_box: box (xmin, ymin, xmax, ymax) to crop the image, in the same coord format that the boxes
Returns:
the cropped boxes
"""
is_box_rel = boxes.max() <= 1
is_crop_rel = max(crop_box) <= 1
if is_box_rel ^ is_crop_rel:
raise AssertionError("both the boxes and the crop need to have the same coordinate convention")
xmin, ymin, xmax, ymax = crop_box
# Clip boxes & correct offset
boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(xmin, xmax) - xmin
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(ymin, ymax) - ymin
# Rescale relative coords
if is_box_rel:
boxes[:, [0, 2]] /= xmax - xmin
boxes[:, [1, 3]] /= ymax - ymin
# Remove 0-sized boxes
is_valid = np.logical_and(boxes[:, 1] < boxes[:, 3], boxes[:, 0] < boxes[:, 2])
return boxes[is_valid]
def expand_line(line: np.ndarray, target_shape: Tuple[int, int]) -> Tuple[float, float]:
"""Expands a 2-point line, so that the first is on the edge. In other terms, we extend the line in
the same direction until we meet one of the edges.
Args:
line: array of shape (2, 2) of the point supposed to be on one edge, and the shadow tip.
target_shape: the desired mask shape
Returns:
2D coordinates of the first point once we extended the line (on one of the edges)
"""
if any(coord == 0 or coord == size for coord, size in zip(line[0], target_shape[::-1])):
return line[0]
# Get the line equation
_tmp = line[1] - line[0]
_direction = _tmp > 0
_flat = _tmp == 0
# vertical case
if _tmp[0] == 0:
solutions = [
# y = 0
(line[0, 0], 0),
# y = bot
(line[0, 0], target_shape[0]),
]
# horizontal
elif _tmp[1] == 0:
solutions = [
# x = 0
(0, line[0, 1]),
# x = right
(target_shape[1], line[0, 1]),
]
else:
alpha = _tmp[1] / _tmp[0]
beta = line[1, 1] - alpha * line[1, 0]
# Solve it for edges
solutions = [
# x = 0
(0, beta),
# y = 0
(-beta / alpha, 0),
# x = right
(target_shape[1], alpha * target_shape[1] + beta),
# y = bot
((target_shape[0] - beta) / alpha, target_shape[0]),
]
for point in solutions:
# Skip points that are out of the final image
if any(val < 0 or val > size for val, size in zip(point, target_shape[::-1])):
continue
if all(
val == ref if _same else (val < ref if _dir else val > ref)
for val, ref, _dir, _same in zip(point, line[1], _direction, _flat)
):
return point
raise ValueError
def create_shadow_mask(
target_shape: Tuple[int, int],
min_base_width=0.3,
max_tip_width=0.5,
max_tip_height=0.3,
) -> np.ndarray:
"""Creates a random shadow mask
Args:
target_shape: the target shape (H, W)
min_base_width: the relative minimum shadow base width
max_tip_width: the relative maximum shadow tip width
max_tip_height: the relative maximum shadow tip height
Returns:
a numpy ndarray of shape (H, W, 1) with values in the range [0, 1]
"""
# Default base is top
_params = np.random.rand(6)
base_width = min_base_width + (1 - min_base_width) * _params[0]
base_center = base_width / 2 + (1 - base_width) * _params[1]
# Ensure tip width is smaller for shadow consistency
tip_width = min(_params[2] * base_width * target_shape[0] / target_shape[1], max_tip_width)
tip_center = tip_width / 2 + (1 - tip_width) * _params[3]
tip_height = _params[4] * max_tip_height
tip_mid = tip_height / 2 + (1 - tip_height) * _params[5]
_order = tip_center < base_center
contour: np.ndarray = np.array(
[
[base_center - base_width / 2, 0],
[base_center + base_width / 2, 0],
[tip_center + tip_width / 2, tip_mid + tip_height / 2 if _order else tip_mid - tip_height / 2],
[tip_center - tip_width / 2, tip_mid - tip_height / 2 if _order else tip_mid + tip_height / 2],
],
dtype=np.float32,
)
# Convert to absolute coords
abs_contour: np.ndarray = (
np.stack(
(contour[:, 0] * target_shape[1], contour[:, 1] * target_shape[0]),
axis=-1,
)
.round()
.astype(np.int32)
)
# Direction
_params = np.random.rand(1)
rotated_contour = (
rotate_abs_geoms(
abs_contour[None, ...],
360 * _params[0],
target_shape,
expand=False,
)[0]
.round()
.astype(np.int32)
)
# Check approx quadrant
quad_idx = int(_params[0] / 0.25)
# Top-bot
if quad_idx % 2 == 0:
intensity_mask = np.repeat(np.arange(target_shape[0])[:, None], target_shape[1], axis=1) / (target_shape[0] - 1)
if quad_idx == 0:
intensity_mask = 1 - intensity_mask
# Left - right
else:
intensity_mask = np.repeat(np.arange(target_shape[1])[None, :], target_shape[0], axis=0) / (target_shape[1] - 1)
if quad_idx == 1:
intensity_mask = 1 - intensity_mask
# Expand base
final_contour = rotated_contour.copy()
final_contour[0] = expand_line(final_contour[[0, 3]], target_shape)
final_contour[1] = expand_line(final_contour[[1, 2]], target_shape)
# If both base are not on the same side, add a point
if not np.any(final_contour[0] == final_contour[1]):
corner_x = 0 if max(final_contour[0, 0], final_contour[1, 0]) < target_shape[1] else target_shape[1]
corner_y = 0 if max(final_contour[0, 1], final_contour[1, 1]) < target_shape[0] else target_shape[0]
corner: np.ndarray = np.array([corner_x, corner_y])
final_contour = np.concatenate((final_contour[:1], corner[None, ...], final_contour[1:]), axis=0)
# Direction & rotate
mask: np.ndarray = np.zeros((*target_shape, 1), dtype=np.uint8)
mask = cv2.fillPoly(mask, [final_contour], (255,), lineType=cv2.LINE_AA)[..., 0]
return (mask / 255).astype(np.float32).clip(0, 1) * intensity_mask.astype(np.float32)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
os.environ["USE_TORCH"] = "1"
import argparse
import logging
import cv2
import matplotlib.pyplot as plt
import torch
from doctr.io.image import read_img_as_tensor
from doctr.models import obj_detection
CLASSES = ["__background__", "QR Code", "Barcode", "Logo", "Photo"]
CM = [(255, 255, 255), (0, 0, 150), (0, 0, 0), (0, 150, 0), (150, 0, 0)]
def plot_predictions(image, boxes, labels):
for box, label in zip(boxes, labels):
# Bounding box around artefacts
cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), CM[label], 2)
text_size, _ = cv2.getTextSize(CLASSES[label], cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
text_w, text_h = text_size
# Filled rectangle above bounding box
cv2.rectangle(image, (box[0], box[1]), (box[0] + text_w, box[1] - text_h), CM[label], -1)
# Text bearing the name of the artefact detected
cv2.putText(image, CLASSES[label], (int(box[0]), int(box[1])), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3)
plt.axis("off")
plt.imshow(image)
plt.show()
@torch.no_grad()
def main(args):
print(args)
model = obj_detection.__dict__[args.arch](pretrained=True, num_classes=5).eval()
# GPU
if isinstance(args.device, int):
if not torch.cuda.is_available():
raise AssertionError("PyTorch cannot access your GPU. Please investigate!")
if args.device >= torch.cuda.device_count():
raise ValueError("Invalid device index")
# Silent default switch to GPU if available
elif torch.cuda.is_available():
args.device = 0
else:
logging.warning("No accessible GPU, target device set to CPU.")
img = read_img_as_tensor(args.img_path).unsqueeze(0)
if torch.cuda.is_available():
torch.cuda.set_device(args.device)
model = model.cuda()
img = img.cuda()
pred = model(img)
labels = pred[0]["labels"].detach().cpu().numpy()
labels = labels.round().astype(int)
boxes = pred[0]["boxes"].detach().cpu().numpy()
boxes = boxes.round().astype(int)
img = img.cpu().permute(0, 2, 3, 1).numpy()[0].copy()
plot_predictions(img, boxes, labels)
def parse_args():
parser = argparse.ArgumentParser(
description="DocTR artefact detection", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("arch", type=str, help="Artefact detection model to use")
parser.add_argument("img_path", type=str, help="path to the image")
parser.add_argument("--device", default=None, type=int, help="device")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
from doctr.io.elements import KIEDocument
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import numpy as np
from tqdm import tqdm
from doctr import datasets
from doctr.file_utils import is_tf_available
from doctr.models import kie_predictor
from doctr.utils.geometry import extract_crops, extract_rcrops
from doctr.utils.metrics import LocalizationConfusion, OCRMetric, TextMatch
# Enable GPU growth if using TF
if is_tf_available():
import tensorflow as tf
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
if any(gpu_devices):
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
else:
import torch
def _pct(val):
return "N/A" if val is None else f"{val:.2%}"
def main(args):
if not args.rotation:
args.eval_straight = True
predictor = kie_predictor(
args.detection,
args.recognition,
pretrained=True,
reco_bs=args.batch_size,
assume_straight_pages=not args.rotation,
)
if args.img_folder and args.label_file:
testset = datasets.OCRDataset(
img_folder=args.img_folder,
label_file=args.label_file,
)
sets = [testset]
else:
train_set = datasets.__dict__[args.dataset](train=True, download=True, use_polygons=not args.eval_straight)
val_set = datasets.__dict__[args.dataset](train=False, download=True, use_polygons=not args.eval_straight)
sets = [train_set, val_set]
reco_metric = TextMatch()
if args.mask_shape:
det_metric = LocalizationConfusion(
iou_thresh=args.iou, use_polygons=not args.eval_straight, mask_shape=(args.mask_shape, args.mask_shape)
)
e2e_metric = OCRMetric(
iou_thresh=args.iou, use_polygons=not args.eval_straight, mask_shape=(args.mask_shape, args.mask_shape)
)
else:
det_metric = LocalizationConfusion(iou_thresh=args.iou, use_polygons=not args.eval_straight)
e2e_metric = OCRMetric(iou_thresh=args.iou, use_polygons=not args.eval_straight)
sample_idx = 0
extraction_fn = extract_crops if args.eval_straight else extract_rcrops
for dataset in sets:
for page, target in tqdm(dataset):
# GT
gt_boxes = target["boxes"]
gt_labels = target["labels"]
if args.img_folder and args.label_file:
x, y, w, h = gt_boxes[:, 0], gt_boxes[:, 1], gt_boxes[:, 2], gt_boxes[:, 3]
xmin, ymin = np.clip(x - w / 2, 0, 1), np.clip(y - h / 2, 0, 1)
xmax, ymax = np.clip(x + w / 2, 0, 1), np.clip(y + h / 2, 0, 1)
gt_boxes = np.stack([xmin, ymin, xmax, ymax], axis=-1)
# Forward
out: KIEDocument
if is_tf_available():
out = predictor(page[None, ...])
crops = extraction_fn(page, gt_boxes)
reco_out = predictor.reco_predictor(crops)
else:
with torch.no_grad():
out = predictor(page[None, ...])
# We directly crop on PyTorch tensors, which are in channels_first
crops = extraction_fn(page, gt_boxes, channels_last=False)
reco_out = predictor.reco_predictor(crops)
if len(reco_out):
reco_words, _ = zip(*reco_out)
else:
reco_words = []
# Unpack preds
pred_boxes = []
pred_labels = []
for page in out.pages:
height, width = page.dimensions
for predictions in page.predictions.values():
for prediction in predictions:
if not args.rotation:
(a, b), (c, d) = prediction.geometry
else:
(
[x1, y1],
[x2, y2],
[x3, y3],
[x4, y4],
) = prediction.geometry
if gt_boxes.dtype == int:
if not args.rotation:
pred_boxes.append([int(a * width), int(b * height), int(c * width), int(d * height)])
else:
if args.eval_straight:
pred_boxes.append(
[
int(width * min(x1, x2, x3, x4)),
int(height * min(y1, y2, y3, y4)),
int(width * max(x1, x2, x3, x4)),
int(height * max(y1, y2, y3, y4)),
]
)
else:
pred_boxes.append(
[
[int(x1 * width), int(y1 * height)],
[int(x2 * width), int(y2 * height)],
[int(x3 * width), int(y3 * height)],
[int(x4 * width), int(y4 * height)],
]
)
else:
if not args.rotation:
pred_boxes.append([a, b, c, d])
else:
if args.eval_straight:
pred_boxes.append(
[
min(x1, x2, x3, x4),
min(y1, y2, y3, y4),
max(x1, x2, x3, x4),
max(y1, y2, y3, y4),
]
)
else:
pred_boxes.append([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
pred_labels.append(prediction.value)
# Update the metric
det_metric.update(gt_boxes, np.asarray(pred_boxes))
reco_metric.update(gt_labels, reco_words)
e2e_metric.update(gt_boxes, np.asarray(pred_boxes), gt_labels, pred_labels)
# Loop break
sample_idx += 1
if isinstance(args.samples, int) and args.samples == sample_idx:
break
if isinstance(args.samples, int) and args.samples == sample_idx:
break
# Unpack aggregated metrics
print(
f"Model Evaluation (model= {args.detection} + {args.recognition}, "
f"dataset={'OCRDataset' if args.img_folder else args.dataset})"
)
recall, precision, mean_iou = det_metric.summary()
print(f"Text Detection - Recall: {_pct(recall)}, Precision: {_pct(precision)}, Mean IoU: {_pct(mean_iou)}")
acc = reco_metric.summary()
print(f"Text Recognition - Accuracy: {_pct(acc['raw'])} (unicase: {_pct(acc['unicase'])})")
recall, precision, mean_iou = e2e_metric.summary()
print(
f"KIE OCR - Recall: {_pct(recall['raw'])} (unicase: {_pct(recall['unicase'])}), "
f"Precision: {_pct(precision['raw'])} (unicase: {_pct(precision['unicase'])}), Mean IoU: {_pct(mean_iou)}"
)
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="DocTR end-to-end evaluation", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("detection", type=str, help="Text detection model to use for analysis")
parser.add_argument("recognition", type=str, help="Text recognition model to use for analysis")
parser.add_argument("--iou", type=float, default=0.5, help="IoU threshold to match a pair of boxes")
parser.add_argument("--dataset", type=str, default="FUNSD", help="choose a dataset: FUNSD, CORD")
parser.add_argument("--img_folder", type=str, default=None, help="Only for local sets, path to images")
parser.add_argument("--label_file", type=str, default=None, help="Only for local sets, path to labels")
parser.add_argument("--rotation", dest="rotation", action="store_true", help="run rotated OCR + postprocessing")
parser.add_argument("-b", "--batch_size", type=int, default=32, help="batch size for recognition")
parser.add_argument("--mask_shape", type=int, default=None, help="mask shape for mask iou (only for rotation)")
parser.add_argument("--samples", type=int, default=None, help="evaluate only on the N first samples")
parser.add_argument(
"--eval-straight",
action="store_true",
help="evaluate on straight pages with straight bbox (to use the quick and light metric)",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
"""
Based on https://github.com/pytorch/pytorch/blob/master/torch/utils/collect_env.py
This script outputs relevant system environment info
Run it with `python collect_env.py`.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import locale
import os
import re
import subprocess
import sys
from collections import namedtuple
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
try:
import doctr
DOCTR_AVAILABLE = True
except (ImportError, NameError, AttributeError, OSError):
DOCTR_AVAILABLE = False
try:
import tensorflow as tf
TF_AVAILABLE = True
except (ImportError, NameError, AttributeError, OSError):
TF_AVAILABLE = False
try:
import torch
TORCH_AVAILABLE = True
except (ImportError, NameError, AttributeError, OSError):
TORCH_AVAILABLE = False
try:
import torchvision
TV_AVAILABLE = True
except (ImportError, NameError, AttributeError, OSError):
TV_AVAILABLE = False
try:
import cv2
CV2_AVAILABLE = True
except (ImportError, NameError, AttributeError, OSError):
CV2_AVAILABLE = False
PY3 = sys.version_info >= (3, 0)
# System Environment Information
SystemEnv = namedtuple(
"SystemEnv",
[
"doctr_version",
"tf_version",
"torch_version",
"torchvision_version",
"cv2_version",
"os",
"python_version",
"is_cuda_available_tf",
"is_cuda_available_torch",
"cuda_runtime_version",
"nvidia_driver_version",
"nvidia_gpu_models",
"cudnn_version",
],
)
def run(command):
"""Returns (return-code, stdout, stderr)"""
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, err = p.communicate()
rc = p.returncode
if PY3:
enc = locale.getpreferredencoding()
output = output.decode(enc)
err = err.decode(enc)
return rc, output.strip(), err.strip()
def run_and_read_all(run_lambda, command):
"""Runs command using run_lambda; reads and returns entire output if rc is 0"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
return out
def run_and_parse_first_match(run_lambda, command, regex):
"""Runs command using run_lambda, returns the first regex match if it exists"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
match = re.search(regex, out)
if match is None:
return None
return match.group(1)
def get_nvidia_driver_version(run_lambda):
if get_platform() == "darwin":
cmd = "kextstat | grep -i cuda"
return run_and_parse_first_match(run_lambda, cmd, r"com[.]nvidia[.]CUDA [(](.*?)[)]")
smi = get_nvidia_smi()
return run_and_parse_first_match(run_lambda, smi, r"Driver Version: (.*?) ")
def get_gpu_info(run_lambda):
if get_platform() == "darwin":
if TF_AVAILABLE and any(tf.config.list_physical_devices("GPU")):
return tf.config.list_physical_devices("GPU")[0].name
return None
smi = get_nvidia_smi()
uuid_regex = re.compile(r" \(UUID: .+?\)")
rc, out, _ = run_lambda(smi + " -L")
if rc != 0:
return None
# Anonymize GPUs by removing their UUID
return re.sub(uuid_regex, "", out)
def get_running_cuda_version(run_lambda):
return run_and_parse_first_match(run_lambda, "nvcc --version", r"release .+ V(.*)")
def get_cudnn_version(run_lambda):
"""This will return a list of libcudnn.so; it's hard to tell which one is being used"""
if get_platform() == "win32":
cudnn_cmd = 'where /R "%CUDA_PATH%\\bin" cudnn*.dll'
elif get_platform() == "darwin":
# CUDA libraries and drivers can be found in /usr/local/cuda/. See
# https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install
# https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac
# Use CUDNN_LIBRARY when cudnn library is installed elsewhere.
cudnn_cmd = "ls /usr/local/cuda/lib/libcudnn*"
else:
cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev'
rc, out, _ = run_lambda(cudnn_cmd)
# find will return 1 if there are permission errors or if not found
if len(out) == 0 or (rc != 1 and rc != 0):
lib = os.environ.get("CUDNN_LIBRARY")
if lib is not None and os.path.isfile(lib):
return os.path.realpath(lib)
return None
files = set()
for fn in out.split("\n"):
fn = os.path.realpath(fn) # eliminate symbolic links
if os.path.isfile(fn):
files.add(fn)
if not files:
return None
# Alphabetize the result because the order is non-deterministic otherwise
files = list(sorted(files))
if len(files) == 1:
return files[0]
result = "\n".join(files)
return "Probably one of the following:\n{}".format(result)
def get_nvidia_smi():
# Note: nvidia-smi is currently available only on Windows and Linux
smi = "nvidia-smi"
if get_platform() == "win32":
smi = '"C:\\Program Files\\NVIDIA Corporation\\NVSMI\\%s"' % smi
return smi
def get_platform():
if sys.platform.startswith("linux"):
return "linux"
elif sys.platform.startswith("win32"):
return "win32"
elif sys.platform.startswith("cygwin"):
return "cygwin"
elif sys.platform.startswith("darwin"):
return "darwin"
else:
return sys.platform
def get_mac_version(run_lambda):
return run_and_parse_first_match(run_lambda, "sw_vers -productVersion", r"(.*)")
def get_windows_version(run_lambda):
return run_and_read_all(run_lambda, "wmic os get Caption | findstr /v Caption")
def get_lsb_version(run_lambda):
return run_and_parse_first_match(run_lambda, "lsb_release -a", r"Description:\t(.*)")
def check_release_file(run_lambda):
return run_and_parse_first_match(run_lambda, "cat /etc/*-release", r'PRETTY_NAME="(.*)"')
def get_os(run_lambda):
platform = get_platform()
if platform == "win32" or platform == "cygwin":
return get_windows_version(run_lambda)
if platform == "darwin":
version = get_mac_version(run_lambda)
if version is None:
return None
return "Mac OSX {}".format(version)
if platform == "linux":
# Ubuntu/Debian based
desc = get_lsb_version(run_lambda)
if desc is not None:
return desc
# Try reading /etc/*-release
desc = check_release_file(run_lambda)
if desc is not None:
return desc
return platform
# Unknown platform
return platform
def get_env_info():
run_lambda = run
doctr_str = doctr.__version__ if DOCTR_AVAILABLE else "N/A"
if TF_AVAILABLE:
tf_str = tf.__version__
tf_cuda_available_str = any(tf.config.list_physical_devices("GPU"))
else:
tf_str = tf_cuda_available_str = "N/A"
if TORCH_AVAILABLE:
torch_str = torch.__version__
torch_cuda_available_str = torch.cuda.is_available()
else:
torch_str = torch_cuda_available_str = "N/A"
tv_str = torchvision.__version__ if TV_AVAILABLE else "N/A"
cv2_str = cv2.__version__ if CV2_AVAILABLE else "N/A"
return SystemEnv(
doctr_version=doctr_str,
tf_version=tf_str,
torch_version=torch_str,
torchvision_version=tv_str,
cv2_version=cv2_str,
python_version=".".join(map(str, sys.version_info[:3])),
is_cuda_available_tf=tf_cuda_available_str,
is_cuda_available_torch=torch_cuda_available_str,
cuda_runtime_version=get_running_cuda_version(run_lambda),
nvidia_gpu_models=get_gpu_info(run_lambda),
nvidia_driver_version=get_nvidia_driver_version(run_lambda),
cudnn_version=get_cudnn_version(run_lambda),
os=get_os(run_lambda),
)
env_info_fmt = """
DocTR version: {doctr_version}
TensorFlow version: {tf_version}
PyTorch version: {torch_version} (torchvision {torchvision_version})
OpenCV version: {cv2_version}
OS: {os}
Python version: {python_version}
Is CUDA available (TensorFlow): {is_cuda_available_tf}
Is CUDA available (PyTorch): {is_cuda_available_torch}
CUDA runtime version: {cuda_runtime_version}
GPU models and configuration: {nvidia_gpu_models}
Nvidia driver version: {nvidia_driver_version}
cuDNN version: {cudnn_version}
""".strip()
def pretty_str(envinfo):
def replace_nones(dct, replacement="Could not collect"):
for key in dct.keys():
if dct[key] is not None:
continue
dct[key] = replacement
return dct
def replace_bools(dct, true="Yes", false="No"):
for key in dct.keys():
if dct[key] is True:
dct[key] = true
elif dct[key] is False:
dct[key] = false
return dct
def maybe_start_on_next_line(string):
# If `string` is multiline, prepend a \n to it.
if string is not None and len(string.split("\n")) > 1:
return "\n{}\n".format(string)
return string
mutable_dict = envinfo._asdict()
# If nvidia_gpu_models is multiline, start on the next line
mutable_dict["nvidia_gpu_models"] = maybe_start_on_next_line(envinfo.nvidia_gpu_models)
# If the machine doesn't have CUDA, report some fields as 'No CUDA'
dynamic_cuda_fields = [
"cuda_runtime_version",
"nvidia_gpu_models",
"nvidia_driver_version",
]
all_cuda_fields = dynamic_cuda_fields + ["cudnn_version"]
all_dynamic_cuda_fields_missing = all(mutable_dict[field] is None for field in dynamic_cuda_fields)
if TF_AVAILABLE and not any(tf.config.list_physical_devices("GPU")) and all_dynamic_cuda_fields_missing:
for field in all_cuda_fields:
mutable_dict[field] = "No CUDA"
# Replace True with Yes, False with No
mutable_dict = replace_bools(mutable_dict)
# Replace all None objects with 'Could not collect'
mutable_dict = replace_nones(mutable_dict)
return env_info_fmt.format(**mutable_dict)
def get_pretty_env_info():
"""Collects environment information for debugging purposes
Returns:
str: environment information
"""
return pretty_str(get_env_info())
def main():
print("Collecting environment information...\n")
output = get_pretty_env_info()
print(output)
if __name__ == "__main__":
main()
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import argparse
import json
import os
from pathlib import Path
from tqdm import tqdm
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from doctr.file_utils import is_tf_available
from doctr.io import DocumentFile
from doctr.models import detection, ocr_predictor
# Enable GPU growth if using TF
if is_tf_available():
import tensorflow as tf
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
if any(gpu_devices):
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
IMAGE_FILE_EXTENSIONS = [".jpeg", ".jpg", ".png", ".tif", ".tiff", ".bmp"]
OTHER_EXTENSIONS = [".pdf"]
def _process_file(model, file_path: Path, out_format: str) -> None:
if out_format not in ["txt", "json", "xml"]:
raise ValueError(f"Unsupported output format: {out_format}")
if os.path.splitext(file_path)[1] in IMAGE_FILE_EXTENSIONS:
doc = DocumentFile.from_images([file_path])
elif os.path.splitext(file_path)[1] in OTHER_EXTENSIONS:
doc = DocumentFile.from_pdf(file_path)
else:
print(f"Skip unsupported file type: {file_path}")
out = model(doc)
if out_format == "json":
output = json.dumps(out.export(), indent=2)
elif out_format == "txt":
output = out.render()
elif out_format == "xml":
output = out.export_as_xml()
path = Path("output").joinpath(file_path.stem + "." + out_format)
if out_format == "xml":
for i, (xml_bytes, xml_tree) in enumerate(output):
path = Path("output").joinpath(file_path.stem + f"_{i}." + out_format)
xml_tree.write(path, encoding="utf-8", xml_declaration=True)
else:
with open(path, "w") as f:
f.write(output)
def main(args):
detection_model = detection.__dict__[args.detection](
pretrained=True,
bin_thresh=args.bin_thresh,
)
model = ocr_predictor(detection_model, args.recognition, pretrained=True)
path = Path(args.path)
os.makedirs(name="output", exist_ok=True)
if path.is_dir():
to_process = [
f for f in path.iterdir() if str(f).lower().endswith(tuple(IMAGE_FILE_EXTENSIONS + OTHER_EXTENSIONS))
]
for file_path in tqdm(to_process):
_process_file(model, file_path, args.format)
else:
_process_file(model, path, args.format)
def parse_args():
parser = argparse.ArgumentParser(
description="DocTR text detection",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("path", type=str, help="Path to process: PDF, image, directory")
parser.add_argument("--detection", type=str, default="db_resnet50", help="Text detection model to use for analysis")
parser.add_argument("--bin-thresh", type=float, default=0.3, help="Binarization threshold for the detection model.")
parser.add_argument(
"--recognition", type=str, default="crnn_vgg16_bn", help="Text recognition model to use for analysis"
)
parser.add_argument("-f", "--format", choices=["txt", "json", "xml"], default="txt", help="Output format")
return parser.parse_args()
if __name__ == "__main__":
parsed_args = parse_args()
main(parsed_args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import numpy as np
from tqdm import tqdm
from doctr import datasets
from doctr.file_utils import is_tf_available
from doctr.models import ocr_predictor
from doctr.utils.geometry import extract_crops, extract_rcrops
from doctr.utils.metrics import LocalizationConfusion, OCRMetric, TextMatch
# Enable GPU growth if using TF
if is_tf_available():
import tensorflow as tf
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
if any(gpu_devices):
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
else:
import torch
def _pct(val):
return "N/A" if val is None else f"{val:.2%}"
def main(args):
if not args.rotation:
args.eval_straight = True
predictor = ocr_predictor(
args.detection,
args.recognition,
pretrained=True,
reco_bs=args.batch_size,
assume_straight_pages=not args.rotation,
)
if args.img_folder and args.label_file:
testset = datasets.OCRDataset(
img_folder=args.img_folder,
label_file=args.label_file,
)
sets = [testset]
else:
train_set = datasets.__dict__[args.dataset](train=True, download=True, use_polygons=not args.eval_straight)
val_set = datasets.__dict__[args.dataset](train=False, download=True, use_polygons=not args.eval_straight)
sets = [train_set, val_set]
reco_metric = TextMatch()
if args.mask_shape:
det_metric = LocalizationConfusion(
iou_thresh=args.iou, use_polygons=not args.eval_straight, mask_shape=(args.mask_shape, args.mask_shape)
)
e2e_metric = OCRMetric(
iou_thresh=args.iou, use_polygons=not args.eval_straight, mask_shape=(args.mask_shape, args.mask_shape)
)
else:
det_metric = LocalizationConfusion(iou_thresh=args.iou, use_polygons=not args.eval_straight)
e2e_metric = OCRMetric(iou_thresh=args.iou, use_polygons=not args.eval_straight)
sample_idx = 0
extraction_fn = extract_crops if args.eval_straight else extract_rcrops
for dataset in sets:
for page, target in tqdm(dataset):
# GT
gt_boxes = target["boxes"]
gt_labels = target["labels"]
if args.img_folder and args.label_file:
x, y, w, h = gt_boxes[:, 0], gt_boxes[:, 1], gt_boxes[:, 2], gt_boxes[:, 3]
xmin, ymin = np.clip(x - w / 2, 0, 1), np.clip(y - h / 2, 0, 1)
xmax, ymax = np.clip(x + w / 2, 0, 1), np.clip(y + h / 2, 0, 1)
gt_boxes = np.stack([xmin, ymin, xmax, ymax], axis=-1)
# Forward
if is_tf_available():
out = predictor(page[None, ...])
crops = extraction_fn(page, gt_boxes)
reco_out = predictor.reco_predictor(crops)
else:
with torch.no_grad():
out = predictor(page[None, ...])
# We directly crop on PyTorch tensors, which are in channels_first
crops = extraction_fn(page, gt_boxes, channels_last=False)
reco_out = predictor.reco_predictor(crops)
if len(reco_out):
reco_words, _ = zip(*reco_out)
else:
reco_words = []
# Unpack preds
pred_boxes = []
pred_labels = []
for page in out.pages:
height, width = page.dimensions
for block in page.blocks:
for line in block.lines:
for word in line.words:
if not args.rotation:
(a, b), (c, d) = word.geometry
else:
(
[x1, y1],
[x2, y2],
[x3, y3],
[x4, y4],
) = word.geometry
if gt_boxes.dtype == int:
if not args.rotation:
pred_boxes.append(
[int(a * width), int(b * height), int(c * width), int(d * height)]
)
else:
if args.eval_straight:
pred_boxes.append(
[
int(width * min(x1, x2, x3, x4)),
int(height * min(y1, y2, y3, y4)),
int(width * max(x1, x2, x3, x4)),
int(height * max(y1, y2, y3, y4)),
]
)
else:
pred_boxes.append(
[
[int(x1 * width), int(y1 * height)],
[int(x2 * width), int(y2 * height)],
[int(x3 * width), int(y3 * height)],
[int(x4 * width), int(y4 * height)],
]
)
else:
if not args.rotation:
pred_boxes.append([a, b, c, d])
else:
if args.eval_straight:
pred_boxes.append(
[
min(x1, x2, x3, x4),
min(y1, y2, y3, y4),
max(x1, x2, x3, x4),
max(y1, y2, y3, y4),
]
)
else:
pred_boxes.append([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
pred_labels.append(word.value)
# Update the metric
det_metric.update(gt_boxes, np.asarray(pred_boxes))
reco_metric.update(gt_labels, reco_words)
e2e_metric.update(gt_boxes, np.asarray(pred_boxes), gt_labels, pred_labels)
# Loop break
sample_idx += 1
if isinstance(args.samples, int) and args.samples == sample_idx:
break
if isinstance(args.samples, int) and args.samples == sample_idx:
break
# Unpack aggregated metrics
print(
f"Model Evaluation (model= {args.detection} + {args.recognition}, "
f"dataset={'OCRDataset' if args.img_folder else args.dataset})"
)
recall, precision, mean_iou = det_metric.summary()
print(f"Text Detection - Recall: {_pct(recall)}, Precision: {_pct(precision)}, Mean IoU: {_pct(mean_iou)}")
acc = reco_metric.summary()
print(f"Text Recognition - Accuracy: {_pct(acc['raw'])} (unicase: {_pct(acc['unicase'])})")
recall, precision, mean_iou = e2e_metric.summary()
print(
f"OCR - Recall: {_pct(recall['raw'])} (unicase: {_pct(recall['unicase'])}), "
f"Precision: {_pct(precision['raw'])} (unicase: {_pct(precision['unicase'])}), Mean IoU: {_pct(mean_iou)}"
)
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="DocTR end-to-end evaluation", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("detection", type=str, help="Text detection model to use for analysis")
parser.add_argument("recognition", type=str, help="Text recognition model to use for analysis")
parser.add_argument("--iou", type=float, default=0.5, help="IoU threshold to match a pair of boxes")
parser.add_argument("--dataset", type=str, default="FUNSD", help="choose a dataset: FUNSD, CORD")
parser.add_argument("--img_folder", type=str, default=None, help="Only for local sets, path to images")
parser.add_argument("--label_file", type=str, default=None, help="Only for local sets, path to labels")
parser.add_argument("--rotation", dest="rotation", action="store_true", help="run rotated OCR + postprocessing")
parser.add_argument("-b", "--batch_size", type=int, default=32, help="batch size for recognition")
parser.add_argument("--mask_shape", type=int, default=None, help="mask shape for mask iou (only for rotation)")
parser.add_argument("--samples", type=int, default=None, help="evaluate only on the N first samples")
parser.add_argument(
"--eval-straight",
action="store_true",
help="evaluate on straight pages with straight bbox (to use the quick and light metric)",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from doctr.file_utils import is_tf_available
from doctr.io import DocumentFile
from doctr.models import ocr_predictor
# Enable GPU growth if using TF
if is_tf_available():
import tensorflow as tf
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
if any(gpu_devices):
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
def main(args):
model = ocr_predictor(args.detection, args.recognition, pretrained=True)
if args.path.lower().endswith(".pdf"):
doc = DocumentFile.from_pdf(args.path)
else:
doc = DocumentFile.from_images(args.path)
out = model(doc)
for page, img in zip(out.pages, doc):
page.show(img, block=not args.noblock, interactive=not args.static)
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="DocTR end-to-end analysis", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("path", type=str, help="Path to the input document (PDF or image)")
parser.add_argument("--detection", type=str, default="db_resnet50", help="Text detection model to use for analysis")
parser.add_argument(
"--recognition", type=str, default="crnn_vgg16_bn", help="Text recognition model to use for analysis"
)
parser.add_argument(
"--noblock", dest="noblock", help="Disables blocking visualization. Used only for CI.", action="store_true"
)
parser.add_argument("--static", dest="static", help="Switches to static visualization", action="store_true")
args = parser.parse_args()
return args
if __name__ == "__main__":
parsed_args = parse_args()
main(parsed_args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
"""
Borrowed & adapted from https://github.com/pytorch/vision/blob/main/.github/process_commit.py
This script finds the merger responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'. If there exists no PR associated with the commit or the PR is properly labeled,
this script is a no-op.
Note: we ping the merger only, not the reviewers, as the reviewers can sometimes be external to torchvision
with no labeling responsibility, so we don't want to bother them.
"""
from typing import Any, Set, Tuple
import requests
# For a PR to be properly labeled it should have one primary label and one secondary label
# Should specify the type of change
PRIMARY_LABELS = {
"type: new feature",
"type: bug",
"type: enhancement",
"type: misc",
}
# Should specify what has been modified
SECONDARY_LABELS = {
"topic: documentation",
"module: datasets",
"module: io",
"module: models",
"module: transforms",
"module: utils",
"ext: api",
"ext: demo",
"ext: docs",
"ext: notebooks",
"ext: references",
"ext: scripts",
"ext: tests",
"topic: build",
"topic: ci",
"topic: docker",
}
GH_ORG = "mindee"
GH_REPO = "doctr"
def query_repo(cmd: str, *, accept) -> Any:
response = requests.get(f"https://api.github.com/repos/{GH_ORG}/{GH_REPO}/{cmd}", headers=dict(Accept=accept))
return response.json()
def get_pr_merger_and_labels(pr_number: int) -> Tuple[str, Set[str]]:
# See https://docs.github.com/en/rest/reference/pulls#get-a-pull-request
data = query_repo(f"pulls/{pr_number}", accept="application/vnd.github.v3+json")
merger = data.get("merged_by", {}).get("login")
labels = {label["name"] for label in data["labels"]}
return merger, labels
def main(args):
merger, labels = get_pr_merger_and_labels(args.pr)
is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels))
if isinstance(merger, str) and not is_properly_labeled:
print(f"@{merger}")
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="PR label checker", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("pr", type=int, help="PR number")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
import doctr
PROJECT_NAME: str = "docTR API template"
PROJECT_DESCRIPTION: str = "Template API for Optical Character Recognition"
VERSION: str = doctr.__version__
DEBUG: bool = os.environ.get("DEBUG", "") != "False"
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import tensorflow as tf
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
if any(gpu_devices):
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
from doctr.models import kie_predictor, ocr_predictor
predictor = ocr_predictor(pretrained=True)
det_predictor = predictor.det_predictor
reco_predictor = predictor.reco_predictor
kie_predictor = kie_predictor(pretrained=True)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Tuple
from pydantic import BaseModel, Field
# Recognition output
class RecognitionOut(BaseModel):
value: str = Field(..., example="Hello")
class DetectionOut(BaseModel):
box: Tuple[float, float, float, float]
class OCROut(RecognitionOut, DetectionOut):
pass
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import time
from fastapi import FastAPI, Request
from fastapi.openapi.utils import get_openapi
from app import config as cfg
from app.routes import detection, kie, ocr, recognition
app = FastAPI(title=cfg.PROJECT_NAME, description=cfg.PROJECT_DESCRIPTION, debug=cfg.DEBUG, version=cfg.VERSION)
# Routing
app.include_router(recognition.router, prefix="/recognition", tags=["recognition"])
app.include_router(detection.router, prefix="/detection", tags=["detection"])
app.include_router(ocr.router, prefix="/ocr", tags=["ocr"])
app.include_router(kie.router, prefix="/kie", tags=["kie"])
# Middleware
@app.middleware("http")
async def add_process_time_header(request: Request, call_next):
start_time = time.time()
response = await call_next(request)
process_time = time.time() - start_time
response.headers["X-Process-Time"] = str(process_time)
return response
# Docs
def custom_openapi():
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title=cfg.PROJECT_NAME,
version=cfg.VERSION,
description=cfg.PROJECT_DESCRIPTION,
routes=app.routes,
)
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from fastapi import APIRouter, File, UploadFile, status
from app.schemas import RecognitionOut
from app.vision import reco_predictor
from doctr.io import decode_img_as_tensor
router = APIRouter()
@router.post("/", response_model=RecognitionOut, status_code=status.HTTP_200_OK, summary="Perform text recognition")
async def text_recognition(file: UploadFile = File(...)):
"""Runs docTR text recognition model to analyze the input image"""
img = decode_img_as_tensor(file.file.read())
out = reco_predictor([img])
return RecognitionOut(value=out[0][0])
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import List
from fastapi import APIRouter, File, UploadFile, status
from app.schemas import DetectionOut
from app.vision import det_predictor
from doctr.file_utils import CLASS_NAME
from doctr.io import decode_img_as_tensor
router = APIRouter()
@router.post("/", response_model=List[DetectionOut], status_code=status.HTTP_200_OK, summary="Perform text detection")
async def text_detection(file: UploadFile = File(...)):
"""Runs docTR text detection model to analyze the input image"""
img = decode_img_as_tensor(file.file.read())
boxes = det_predictor([img])[0]
return [DetectionOut(box=box.tolist()) for box in boxes[CLASS_NAME][:, :-1]]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import List
from fastapi import APIRouter, File, UploadFile, status
from app.schemas import OCROut
from app.vision import predictor
from doctr.io import decode_img_as_tensor
router = APIRouter()
@router.post("/", response_model=List[OCROut], status_code=status.HTTP_200_OK, summary="Perform OCR")
async def perform_ocr(file: UploadFile = File(...)):
"""Runs docTR OCR model to analyze the input image"""
img = decode_img_as_tensor(file.file.read())
out = predictor([img])
return [
OCROut(box=(*word.geometry[0], *word.geometry[1]), value=word.value)
for block in out.pages[0].blocks
for line in block.lines
for word in line.words
]
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Dict, List
from fastapi import APIRouter, File, UploadFile, status
from app.schemas import OCROut
from app.vision import kie_predictor
from doctr.io import decode_img_as_tensor
router = APIRouter()
@router.post("/", response_model=Dict[str, List[OCROut]], status_code=status.HTTP_200_OK, summary="Perform KIE")
async def perform_kie(file: UploadFile = File(...)):
"""Runs docTR KIE model to analyze the input image"""
img = decode_img_as_tensor(file.file.read())
out = kie_predictor([img])
return {
class_name: [
OCROut(box=(*prediction.geometry[0], *prediction.geometry[1]), value=prediction.value)
for prediction in out.pages[0].predictions[class_name]
]
for class_name in out.pages[0].predictions.keys()
}
|
import pytest_asyncio
import requests
from httpx import AsyncClient
from app.main import app
@pytest_asyncio.fixture(scope="session")
def mock_recognition_image(tmpdir_factory):
url = "https://user-images.githubusercontent.com/76527547/117133599-c073fa00-ada4-11eb-831b-412de4d28341.jpeg"
return requests.get(url).content
@pytest_asyncio.fixture(scope="session")
def mock_detection_image(tmpdir_factory):
url = "https://user-images.githubusercontent.com/76527547/117319856-fc35bf00-ae8b-11eb-9b51-ca5aba673466.jpg"
return requests.get(url).content
@pytest_asyncio.fixture(scope="function")
async def test_app_asyncio():
# for httpx>=20, follow_redirects=True (cf. https://github.com/encode/httpx/releases/tag/0.20.0)
async with AsyncClient(app=app, base_url="http://test", follow_redirects=True) as ac:
yield ac # testing happens here
|
import pytest
@pytest.mark.asyncio
async def test_text_recognition(test_app_asyncio, mock_recognition_image):
response = await test_app_asyncio.post("/recognition", files={"file": mock_recognition_image})
assert response.status_code == 200
assert response.json() == {"value": "invite"}
|
import numpy as np
import pytest
from scipy.optimize import linear_sum_assignment
from doctr.utils.metrics import box_iou
@pytest.mark.asyncio
async def test_text_detection(test_app_asyncio, mock_detection_image):
response = await test_app_asyncio.post("/detection", files={"file": mock_detection_image})
assert response.status_code == 200
json_response = response.json()
gt_boxes = np.array([[1240, 430, 1355, 470], [1360, 430, 1495, 470]], dtype=np.float32)
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] / 1654
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] / 2339
# Check that IoU with GT if reasonable
assert isinstance(json_response, list) and len(json_response) == gt_boxes.shape[0]
pred_boxes = np.array([elt["box"] for elt in json_response])
iou_mat = box_iou(gt_boxes, pred_boxes)
gt_idxs, pred_idxs = linear_sum_assignment(-iou_mat)
is_kept = iou_mat[gt_idxs, pred_idxs] >= 0.8
assert gt_idxs[is_kept].shape[0] == gt_boxes.shape[0]
|
import numpy as np
import pytest
from scipy.optimize import linear_sum_assignment
from doctr.utils.metrics import box_iou
@pytest.mark.asyncio
async def test_perform_ocr(test_app_asyncio, mock_detection_image):
response = await test_app_asyncio.post("/ocr", files={"file": mock_detection_image})
assert response.status_code == 200
json_response = response.json()
gt_boxes = np.array([[1240, 430, 1355, 470], [1360, 430, 1495, 470]], dtype=np.float32)
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] / 1654
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] / 2339
gt_labels = ["Hello", "world!"]
# Check that IoU with GT if reasonable
assert isinstance(json_response, list) and len(json_response) == gt_boxes.shape[0]
pred_boxes = np.array([elt["box"] for elt in json_response])
pred_labels = np.array([elt["value"] for elt in json_response])
iou_mat = box_iou(gt_boxes, pred_boxes)
gt_idxs, pred_idxs = linear_sum_assignment(-iou_mat)
is_kept = iou_mat[gt_idxs, pred_idxs] >= 0.8
gt_idxs, pred_idxs = gt_idxs[is_kept], pred_idxs[is_kept]
assert gt_idxs.shape[0] == gt_boxes.shape[0]
assert all(gt_labels[gt_idx] == pred_labels[pred_idx] for gt_idx, pred_idx in zip(gt_idxs, pred_idxs))
|
import numpy as np
import pytest
from scipy.optimize import linear_sum_assignment
from doctr.utils.metrics import box_iou
@pytest.mark.asyncio
async def test_perform_kie(test_app_asyncio, mock_detection_image):
response = await test_app_asyncio.post("/kie", files={"file": mock_detection_image})
assert response.status_code == 200
json_response = response.json()
gt_boxes = np.array([[1240, 430, 1355, 470], [1360, 430, 1495, 470]], dtype=np.float32)
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] / 1654
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] / 2339
gt_labels = ["Hello", "world!"]
# Check that IoU with GT if reasonable
assert isinstance(json_response, dict) and len(list(json_response.values())[0]) == gt_boxes.shape[0]
pred_boxes = np.array([elt["box"] for json_out in json_response.values() for elt in json_out])
pred_labels = np.array([elt["value"] for json_out in json_response.values() for elt in json_out])
iou_mat = box_iou(gt_boxes, pred_boxes)
gt_idxs, pred_idxs = linear_sum_assignment(-iou_mat)
is_kept = iou_mat[gt_idxs, pred_idxs] >= 0.8
gt_idxs, pred_idxs = gt_idxs[is_kept], pred_idxs[is_kept]
assert gt_idxs.shape[0] == gt_boxes.shape[0]
assert all(gt_labels[gt_idx] == pred_labels[pred_idx] for gt_idx, pred_idx in zip(gt_idxs, pred_idxs))
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import setuptools
setuptools.setup(
name="tensorboard_plugin_torch_profiler",
version="0.1.0",
description="PyTorch Profiler TensorBoard Plugin",
packages=setuptools.find_packages(),
package_data={
"tensorboard_plugin_torch_profiler": ["static/**"],
},
install_requires=[
"tensorboard",
"pandas"
],
entry_points={
"tensorboard_plugins": [
"torch_profiler = tensorboard_plugin_torch_profiler.plugin:TorchProfilerPlugin",
],
},
)
|
import unittest
import math
from tensorboard_plugin_torch_profiler.profiler.overall_parser import (
merge_ranges, subtract_ranges_lists, intersection_ranges_lists, get_ranges_sum
)
def check_ranges_equal(ranges1, ranges2):
if len(ranges1) != len(ranges2):
return False
for i in range(len(ranges1)):
if ranges1[i][0] != ranges2[i][0] or ranges1[i][1] != ranges2[i][1]:
return False
return True
class TestOverallParser(unittest.TestCase):
def test_merge_ranges(self):
src_ranges = [(1.1, 2.2), (1.5, 2.3), (3.3, 3.9), (3.5, 3.6), (3.7, 3.8), (4.1, 4.2)]
expected_ranges = [(1.1, 2.3), (3.3, 3.9), (4.1, 4.2)]
dst_ranges = merge_ranges(src_ranges, True)
is_equal = check_ranges_equal(dst_ranges, expected_ranges)
self.assertTrue(is_equal)
def test_subtract_ranges_lists(self):
ranges1 = [(1.1, 2.2), (3.3, 4.4), (5.5, 6.6)]
ranges2 = [(0, 0.1), (1.0, 1.4), (1.5, 1.6), (1.9, 3.4), (4.3, 4.6)]
expected_ranges = [(1.4, 1.5), (1.6, 1.9), (3.4, 4.3), (5.5, 6.6)]
dst_ranges = subtract_ranges_lists(ranges1, ranges2)
is_equal = check_ranges_equal(dst_ranges, expected_ranges)
self.assertTrue(is_equal)
def test_intersection_ranges_lists(self):
ranges1 = [(1.1, 2.2), (3.3, 4.4), (5.5, 6.6)]
ranges2 = [(0, 0.1), (1.0, 1.4), (1.5, 1.6), (1.9, 3.4), (4.3, 4.6)]
expected_ranges = [(1.1, 1.4), (1.5, 1.6), (1.9, 2.2), (3.3, 3.4), (4.3, 4.4)]
dst_ranges = intersection_ranges_lists(ranges1, ranges2)
is_equal = check_ranges_equal(dst_ranges, expected_ranges)
self.assertTrue(is_equal)
def test_get_ranges_sum(self):
ranges = [(1.1, 2.2), (3.3, 4.4), (5.5, 6.6)]
expected_sum = 3.3
dst_sum = get_ranges_sum(ranges)
self.assertTrue(math.isclose(dst_sum, expected_sum))
if __name__ == '__main__':
unittest.main()
|
import json
import unittest
import tensorboard_plugin_torch_profiler.profiler.trace as trace
from tensorboard_plugin_torch_profiler.profiler.data import RunProfileData
SCHEMA_VERSION = 1
WORKER_NAME = "worker0"
def parse_json_trace(json_content):
trace_json = json.loads(json_content)
profile = RunProfileData(WORKER_NAME)
parser = trace.get_event_parser(SCHEMA_VERSION)
profile.events = []
for data in trace_json:
event = parser.parse(data)
if event is not None:
profile.events.append(event)
return profile
'''
All the events in json string are only simulation, not actual generated events.
We removed the data fields that not used by current version of our profiler,
for easy to check correctness and shorter in length.
We even renamed the data values such as kernel name or "ts", to simplify the string.
'''
class TestProfiler(unittest.TestCase):
# A test case including all 7 event categories.
def test_all_categories(self):
json_content = """
[{
"ph": "X", "cat": "Operator",
"name": "enumerate(DataLoader)#_SingleProcessDataLoaderIter.__next__", "pid": 13721, "tid": "123",
"ts": 100, "dur": 180,
"args": {"Input dims": [], "External id": 2}
},
{
"ph": "X", "cat": "Operator",
"name": "aten::to", "pid": 13721, "tid": "123",
"ts": 200, "dur": 60,
"args": {"Input dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3}
},
{
"ph": "X", "cat": "Operator",
"name": "aten::nll_loss_backward", "pid": 13721, "tid": "456",
"ts": 340, "dur": 70,
"args": {"Input dims": [[], [32, 1000], [32], [], [], [], []], "External id": 4}
},
{
"ph": "X", "cat": "Operator",
"name": "ProfilerStep#1", "pid": 13721, "tid": "123",
"ts": 50, "dur": 400,
"args": {"Input dims": [], "External id": 1}
},
{
"ph": "X", "cat": "Memcpy",
"name": "Memcpy HtoD (Pageable -> Device)", "pid": 0, "tid": "stream 7",
"ts": 405, "dur": 10,
"args": {"stream": 7, "correlation": 334, "external id": 4}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaMemcpyAsync", "pid": 13721, "tid": "456",
"ts": 360, "dur": 20,
"args": {"correlation": 334, "external id": 4}
},
{
"ph": "X", "cat": "Memset",
"name": "Memset (Device)", "pid": 0, "tid": "stream 7",
"ts": 420, "dur": 5,
"args": {"stream": 7, "correlation": 40344, "external id": 4}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaMemsetAsync", "pid": 13721, "tid": "456",
"ts": 390, "dur": 10,
"args": {"correlation": 40344, "external id": 4}
},
{
"ph": "X", "cat": "Kernel",
"name": "void cunn_ClassNLLCriterion_updateGradInput_kernel<float>", "pid": 0, "tid": "stream 7",
"ts": 430, "dur": 15,
"args": {"correlation": 40348, "external id": 4}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaLaunchKernel", "pid": 13721, "tid": "456",
"ts": 405, "dur": 5,
"args": {"correlation": 40348, "external id": 4}
}]
"""
profile = parse_json_trace(json_content)
profile.process()
self.assertTrue(profile.is_gpu_used)
step = profile.steps_costs[0]
self.assertEqual(step.kernel_cost, 15)
self.assertEqual(step.memcpy_cost, 10)
self.assertEqual(step.memset_cost, 5)
self.assertEqual(step.runtime_cost, 30)
self.assertEqual(step.dataloader_cost, 180)
self.assertEqual(step.cpuop_cost, 35)
self.assertEqual(step.other_cost, 125)
self.assertEqual(len(profile.op_list_groupby_name), 2)
self.assertEqual(len(profile.op_list_groupby_name_input), 2)
def test_op_list(op_list):
op_count = 0
for op_agg in op_list:
if op_agg.name == "aten::to":
op_count += 1
self.assertEqual(op_agg.input_shape, "[[2, 8, 5], [], [], [], [], [], [], []]")
self.assertEqual(op_agg.calls, 1)
self.assertEqual(op_agg.host_duration, 60)
self.assertEqual(op_agg.device_duration, 0)
self.assertEqual(op_agg.self_host_duration, 60)
self.assertEqual(op_agg.self_device_duration, 0)
if op_agg.name == "aten::nll_loss_backward":
op_count += 1
self.assertEqual(op_agg.input_shape, "[[], [32, 1000], [32], [], [], [], []]")
self.assertEqual(op_agg.calls, 1)
self.assertEqual(op_agg.host_duration, 70)
self.assertEqual(op_agg.device_duration, 30)
self.assertEqual(op_agg.self_host_duration, 70)
self.assertEqual(op_agg.self_device_duration, 30)
self.assertEqual(op_count, 2)
test_op_list(profile.op_list_groupby_name)
test_op_list(profile.op_list_groupby_name_input)
self.assertEqual(len(profile.kernel_list_groupby_name_op), 1)
self.assertEqual(profile.kernel_stat.shape[0], 1)
self.assertEqual(profile.kernel_list_groupby_name_op[0].name,
"void cunn_ClassNLLCriterion_updateGradInput_kernel<float>")
self.assertEqual(profile.kernel_list_groupby_name_op[0].op_name, "aten::nll_loss_backward")
self.assertEqual(profile.kernel_list_groupby_name_op[0].calls, 1)
self.assertEqual(profile.kernel_list_groupby_name_op[0].total_duration, 15)
self.assertEqual(profile.kernel_list_groupby_name_op[0].min_duration, 15)
self.assertEqual(profile.kernel_list_groupby_name_op[0].max_duration, 15)
self.assertEqual(profile.kernel_stat.iloc[0]["count"], 1)
self.assertEqual(profile.kernel_stat.iloc[0]["sum"], 15)
self.assertEqual(profile.kernel_stat.iloc[0]["mean"], 15)
self.assertEqual(profile.kernel_stat.iloc[0]["min"], 15)
self.assertEqual(profile.kernel_stat.iloc[0]["max"], 15)
# Test using external_id to build relationship between Operator and Runtime.
# Use external_id to build correlation with its father OperatorNode or ProfilerStepNode.
# Because in the case when RuntimeNode has duration 0 and starts at same time as a OperatorNode,
# just use interval containing relationship can't tell it is child or brother of the OperatorNode.
def test_external_id(self):
json_content = """
[{
"ph": "X", "cat": "Operator",
"name": "aten::mat_mul", "pid": 13721, "tid": "456",
"ts": 100, "dur": 100,
"args": {"Input dims": [], "External id": 2}
},
{
"ph": "X", "cat": "Operator",
"name": "aten::mm", "pid": 13721, "tid": "456",
"ts": 120, "dur": 70,
"args": {"Input dims": [], "External id": 4}
},
{
"ph": "X", "cat": "Kernel",
"name": "void cunn_ClassNLLCriterion_updateGradInput_kernel<float>", "pid": 0, "tid": "stream 7",
"ts": 130, "dur": 5,
"args": {"correlation": 334, "external id": 4}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaLaunchKernel", "pid": 13721, "tid": "456",
"ts": 120, "dur": 0,
"args": {"correlation": 334, "external id": 4}
},
{
"ph": "X", "cat": "Kernel",
"name": "void cunn_ClassNLLCriterion_updateGradInput_kernel<float>", "pid": 0, "tid": "stream 7",
"ts": 130, "dur": 6,
"args": {"correlation": 335, "external id": 2}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaLaunchKernel", "pid": 13721, "tid": "456",
"ts": 120, "dur": 0,
"args": {"correlation": 335, "external id": 2}
},
{
"ph": "X", "cat": "Kernel",
"name": "void cunn_ClassNLLCriterion_updateGradInput_kernel<float>", "pid": 0, "tid": "stream 7",
"ts": 130, "dur": 7,
"args": {"correlation": 336, "external id": 4}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaLaunchKernel", "pid": 13721, "tid": "456",
"ts": 190, "dur": 0,
"args": {"correlation": 336, "external id": 4}
},
{
"ph": "X", "cat": "Kernel",
"name": "void cunn_ClassNLLCriterion_updateGradInput_kernel<float>", "pid": 0, "tid": "stream 7",
"ts": 130, "dur": 8,
"args": {"correlation": 337, "external id": 2}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaLaunchKernel", "pid": 13721, "tid": "456",
"ts": 190, "dur": 0,
"args": {"correlation": 337, "external id": 2}
}]
"""
profile = parse_json_trace(json_content)
profile.process()
op_count = 0
for op_agg in profile.op_list_groupby_name:
if op_agg.name == "aten::mat_mul":
op_count += 1
self.assertEqual(op_agg.device_duration, 5 + 6 + 7 + 8)
self.assertEqual(op_agg.self_device_duration, 6 + 8)
if op_agg.name == "aten::mm":
op_count += 1
self.assertEqual(op_agg.device_duration, 5 + 7)
self.assertEqual(op_agg.self_device_duration, 5 + 7)
self.assertEqual(op_count, 2)
# Test operator's father-child relationship when they have same start time or end time.
def test_operator_relation(self):
# 2 events with same start time.
json_content = """
[{
"ph": "X", "cat": "Operator",
"name": "aten::mat_mul", "pid": 13721, "tid": "456",
"ts": 100, "dur": 100,
"args": {"Input dims": [], "External id": 2}
},
{
"ph": "X", "cat": "Operator",
"name": "aten::mm", "pid": 13721, "tid": "456",
"ts": 100, "dur": 70,
"args": {"Input dims": [], "External id": 4}
}]
"""
profile = parse_json_trace(json_content)
profile.process()
op_count = 0
for op_agg in profile.op_list_groupby_name:
if op_agg.name == "aten::mat_mul":
op_count += 1
self.assertEqual(op_agg.self_host_duration, 100 - 70)
if op_agg.name == "aten::mm":
op_count += 1
self.assertEqual(op_agg.self_host_duration, 70)
self.assertEqual(op_count, 2)
# 2 events with same end time.
json_content = """
[{
"ph": "X", "cat": "Operator",
"name": "aten::mat_mul", "pid": 13721, "tid": "456",
"ts": 100, "dur": 100,
"args": {"Input dims": [], "External id": 2}
},
{
"ph": "X", "cat": "Operator",
"name": "aten::mm", "pid": 13721, "tid": "456",
"ts": 130, "dur": 70,
"args": {"Input dims": [], "External id": 4}
}]
"""
profile = parse_json_trace(json_content)
profile.process()
op_count = 0
for op_agg in profile.op_list_groupby_name:
if op_agg.name == "aten::mat_mul":
op_count += 1
self.assertEqual(op_agg.self_host_duration, 100 - 70)
if op_agg.name == "aten::mm":
op_count += 1
self.assertEqual(op_agg.self_host_duration, 70)
self.assertEqual(op_count, 2)
# Test multiple father-child operators with same name.
# In this case, all the operators except the top operator should be removed,
# and all runtime/kernels belong to the children operators should be attached to the only kept one.
# This behavior is to keep consistent with _remove_dup_nodes in torch/autograd/profiler.py.
def test_remove_dup_nodes(self):
json_content = """
[{
"ph": "X", "cat": "Operator",
"name": "aten::mm", "pid": 13721, "tid": "456",
"ts": 100, "dur": 100,
"args": {"Input dims": [], "External id": 2}
},
{
"ph": "X", "cat": "Operator",
"name": "aten::mm", "pid": 13721, "tid": "456",
"ts": 110, "dur": 80,
"args": {"Input dims": [], "External id": 3}
},
{
"ph": "X", "cat": "Operator",
"name": "aten::mm", "pid": 13721, "tid": "456",
"ts": 120, "dur": 60,
"args": {"Input dims": [], "External id": 4}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaLaunchKernel", "pid": 13721, "tid": "456",
"ts": 130, "dur": 20,
"args": {"correlation": 335, "external id": 4}
},
{
"ph": "X", "cat": "Kernel",
"name": "void gemmSN_TN_kernel_64addr", "pid": 0, "tid": "stream 7",
"ts": 220, "dur": 8,
"args": {"correlation": 335, "external id": 4}
}]
"""
profile = parse_json_trace(json_content)
profile.process()
self.assertEqual(len(profile.op_list_groupby_name), 1)
self.assertEqual(profile.op_list_groupby_name[0].self_device_duration, 8)
# Test Runtime with "external id" 0.
# This kind of Runtime should not be attached to any operator,
# and should be included in accumulating device time.
def test_top_level_runtime(self):
# This operator is different thread with the runtime.
json_content = """
[{
"ph": "X", "cat": "Operator",
"name": "aten::mm", "pid": 13721, "tid": "123",
"ts": 100, "dur": 100,
"args": {"Input dims": [], "External id": 2}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaLaunchKernel", "pid": 13721, "tid": "456",
"ts": 130, "dur": 20,
"args": {"correlation": 335, "external id": 0}
},
{
"ph": "X", "cat": "Kernel",
"name": "void gemmSN_TN_kernel_64addr", "pid": 0, "tid": "stream 7",
"ts": 220, "dur": 8,
"args": {"correlation": 335, "external id": 0}
}]
"""
profile = parse_json_trace(json_content)
profile.process()
self.assertEqual(profile.op_list_groupby_name[0].device_duration, 0)
self.assertEqual(profile.op_list_groupby_name[0].self_device_duration, 0)
self.assertEqual(profile.kernel_stat.iloc[0]["count"], 1)
# Test Runtime directly called in ProfilerStep, not inside any operator.
def test_runtime_called_by_profilerstep(self):
json_content = """
[{
"ph": "X", "cat": "Operator",
"name": "ProfilerStep#1", "pid": 13721, "tid": "456",
"ts": 100, "dur": 300,
"args": {"Input dims": [], "External id": 2}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaLaunchKernel", "pid": 13721, "tid": "456",
"ts": 130, "dur": 20,
"args": {"correlation": 335, "external id": 2}
},
{
"ph": "X", "cat": "Kernel",
"name": "void gemmSN_TN_kernel_64addr", "pid": 0, "tid": "stream 7",
"ts": 220, "dur": 8,
"args": {"correlation": 335, "external id": 2}
}]
"""
profile = parse_json_trace(json_content)
profile.process()
step = profile.steps_costs[0]
self.assertEqual(step.kernel_cost, 8)
self.assertEqual(step.runtime_cost, 20)
self.assertEqual(step.cpuop_cost, 0)
self.assertEqual(step.other_cost, 300 - 8 - 20)
self.assertEqual(len(profile.op_list_groupby_name), 0) # ProfilerStep is not regarded as an operator.
self.assertEqual(len(profile.op_list_groupby_name_input), 0)
self.assertEqual(profile.kernel_stat.iloc[0]["count"], 1)
self.assertEqual(len(profile.kernel_list_groupby_name_op), 1)
# Test one Runtime lauch more than one Kernels.
# Sometimes such as running Bert using DataParallel mode(1 process, 2GPUs),
# one runtime such as cudaLaunchCooperativeKernelMultiDevice could trigger more than one kernel,
# each Kernel runs at a seperate GPU card.
def test_runtime_launch_multipe_kernels(self):
json_content = """
[{
"ph": "X", "cat": "Operator",
"name": "Broadcast", "pid": 13721, "tid": "456",
"ts": 100, "dur": 300,
"args": {"Input dims": [], "External id": 2}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaLaunchCooperativeKernelMultiDevice", "pid": 13721, "tid": "456",
"ts": 130, "dur": 20,
"args": {"correlation": 335, "external id": 2}
},
{
"ph": "X", "cat": "Kernel",
"name": "ncclBroadcastRingLLKernel_copy_i8(ncclColl)", "pid": 0, "tid": "stream 13",
"ts": 160, "dur": 120318,
"args": {"device": 0, "context": 1, "stream": 13,
"correlation": 335, "external id": 2}
},
{
"ph": "X", "cat": "Kernel",
"name": "ncclBroadcastRingLLKernel_copy_i8(ncclColl)", "pid": 0, "tid": "stream 22",
"ts": 170, "dur": 132800,
"args": {"device": 1, "context": 2, "stream": 22,
"correlation": 335, "external id": 2}
}]
"""
profile = parse_json_trace(json_content)
profile.process()
self.assertEqual(profile.op_list_groupby_name[0].device_duration, 120318 + 132800)
self.assertEqual(profile.kernel_stat.iloc[0]["count"], 2)
self.assertEqual(len(profile.kernel_list_groupby_name_op), 1)
# Test when there is no ProfilerStep#.
def test_no_profilerstep(self):
json_content = """
[{
"ph": "X", "cat": "Operator",
"name": "aten::to", "pid": 13721, "tid": "123",
"ts": 100, "dur": 60,
"args": {"Input dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 3}
},
{
"ph": "X", "cat": "Operator",
"name": "aten::nll_loss_backward", "pid": 13721, "tid": "456",
"ts": 300, "dur": 70,
"args": {"Input dims": [[], [32, 1000], [32], [], [], [], []], "External id": 4}
},
{
"ph": "X", "cat": "Kernel",
"name": "void cunn_ClassNLLCriterion_updateGradInput_kernel<float>", "pid": 0, "tid": "stream 7",
"ts": 320, "dur": 100,
"args": {"correlation": 40348, "external id": 4}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaLaunchKernel", "pid": 13721, "tid": "456",
"ts": 310, "dur": 20,
"args": {"correlation": 40348, "external id": 4}
}]
"""
profile = parse_json_trace(json_content)
profile.process()
self.assertTrue(profile.is_gpu_used)
self.assertEqual(len(profile.steps_costs), 1)
step = profile.steps_costs[0]
self.assertEqual(step.kernel_cost, 100)
self.assertEqual(step.memcpy_cost, 0)
self.assertEqual(step.memset_cost, 0)
self.assertEqual(step.runtime_cost, 320 - 310)
self.assertEqual(step.dataloader_cost, 0)
self.assertEqual(step.cpuop_cost, 60 + (310 - 300))
# If no ProfilerStep, all events will be regarded as a step.
self.assertEqual(step.other_cost, 300 - (100 + 60))
self.assertEqual(step.step_total_cost, (320 + 100) - 100)
self.assertEqual(len(profile.op_list_groupby_name), 2)
self.assertEqual(len(profile.op_list_groupby_name_input), 2)
self.assertEqual(profile.kernel_stat.iloc[0]["count"], 1)
self.assertEqual(len(profile.kernel_list_groupby_name_op), 1)
def test_op_list(op_list):
op_count = 0
for op_agg in op_list:
if op_agg.name == "aten::to":
op_count += 1
self.assertEqual(op_agg.input_shape, "[[2, 8, 5], [], [], [], [], [], [], []]")
self.assertEqual(op_agg.calls, 1)
self.assertEqual(op_agg.host_duration, 60)
self.assertEqual(op_agg.device_duration, 0)
self.assertEqual(op_agg.self_host_duration, 60)
self.assertEqual(op_agg.self_device_duration, 0)
if op_agg.name == "aten::nll_loss_backward":
op_count += 1
self.assertEqual(op_agg.input_shape, "[[], [32, 1000], [32], [], [], [], []]")
self.assertEqual(op_agg.calls, 1)
self.assertEqual(op_agg.host_duration, 70)
self.assertEqual(op_agg.device_duration, 100)
self.assertEqual(op_agg.self_host_duration, 70)
self.assertEqual(op_agg.self_device_duration, 100)
self.assertEqual(op_count, 2)
test_op_list(profile.op_list_groupby_name)
test_op_list(profile.op_list_groupby_name_input)
self.assertEqual(profile.kernel_list_groupby_name_op[0].name,
"void cunn_ClassNLLCriterion_updateGradInput_kernel<float>")
self.assertEqual(profile.kernel_list_groupby_name_op[0].op_name, "aten::nll_loss_backward")
self.assertEqual(profile.kernel_list_groupby_name_op[0].calls, 1)
self.assertEqual(profile.kernel_list_groupby_name_op[0].total_duration, 100)
self.assertEqual(profile.kernel_list_groupby_name_op[0].min_duration, 100)
self.assertEqual(profile.kernel_list_groupby_name_op[0].max_duration, 100)
self.assertEqual(profile.kernel_stat.iloc[0]["count"], 1)
self.assertEqual(profile.kernel_stat.iloc[0]["sum"], 100)
self.assertEqual(profile.kernel_stat.iloc[0]["mean"], 100)
self.assertEqual(profile.kernel_stat.iloc[0]["min"], 100)
self.assertEqual(profile.kernel_stat.iloc[0]["max"], 100)
def test_multiple_profilersteps(self):
json_content = """
[{
"ph": "X", "cat": "Operator",
"name": "ProfilerStep#1", "pid": 13721, "tid": "123",
"ts": 100, "dur": 200,
"args": {"Input dims": [], "External id": 1}
},
{
"ph": "X", "cat": "Operator",
"name": "aten::to", "pid": 13721, "tid": "123",
"ts": 200, "dur": 60,
"args": {"Input dims": [[2, 8, 5], [], [], [], [], [], [], []], "External id": 2}
},
{
"ph": "X", "cat": "Operator",
"name": "ProfilerStep#2", "pid": 13721, "tid": "123",
"ts": 350, "dur": 150,
"args": {"Input dims": [], "External id": 3}
},
{
"ph": "X", "cat": "Operator",
"name": "aten::mm", "pid": 13721, "tid": "123",
"ts": 360, "dur": 50,
"args": {"Input dims": [], "External id": 4}
},
{
"ph": "X", "cat": "Memcpy",
"name": "Memcpy HtoD (Pageable -> Device)", "pid": 0, "tid": "stream 7",
"ts": 280, "dur": 40,
"args": {"stream": 7, "correlation": 334, "external id": 2}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaMemcpyAsync", "pid": 13721, "tid": "123",
"ts": 250, "dur": 5,
"args": {"correlation": 334, "external id": 2}
},
{
"ph": "X", "cat": "Kernel",
"name": "void cunn_ClassNLLCriterion_updateGradInput_kernel<float>", "pid": 0, "tid": "stream 7",
"ts": 410, "dur": 200,
"args": {"correlation": 40348, "external id": 4}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaLaunchKernel", "pid": 13721, "tid": "123",
"ts": 400, "dur": 20,
"args": {"correlation": 40348, "external id": 4}
}]
"""
profile = parse_json_trace(json_content)
profile.process()
self.assertTrue(profile.is_gpu_used)
self.assertEqual(len(profile.steps_costs), 2)
step = profile.steps_costs[0]
self.assertEqual(step.kernel_cost, 0)
self.assertEqual(step.memcpy_cost, (100 + 200) - 280)
self.assertEqual(step.memset_cost, 0)
self.assertEqual(step.runtime_cost, 5)
self.assertEqual(step.dataloader_cost, 0)
self.assertEqual(step.cpuop_cost, 60 - 5)
self.assertEqual(step.other_cost, 200 - 60 - 20)
self.assertEqual(step.step_total_cost, 200) # Only the time inside ProfilerStep will count.
step = profile.steps_costs[1]
self.assertEqual(step.kernel_cost, (350 + 150) - 410)
self.assertEqual(step.memcpy_cost, 0)
self.assertEqual(step.memset_cost, 0)
self.assertEqual(step.runtime_cost, 410 - 400)
self.assertEqual(step.dataloader_cost, 0)
self.assertEqual(step.cpuop_cost, 400 - 360)
self.assertEqual(step.other_cost, 360 - 350)
self.assertEqual(step.step_total_cost, 150) # Only the time inside ProfilerStep will count.
self.assertEqual(profile.avg_costs.step_total_cost, (200 + 150) / 2)
self.assertEqual(len(profile.op_list_groupby_name), 2)
self.assertEqual(len(profile.op_list_groupby_name_input), 2)
def test_op_list(op_list):
op_count = 0
for op_agg in op_list:
if op_agg.name == "aten::to":
op_count += 1
self.assertEqual(op_agg.input_shape, "[[2, 8, 5], [], [], [], [], [], [], []]")
self.assertEqual(op_agg.calls, 1)
self.assertEqual(op_agg.host_duration, 60)
self.assertEqual(op_agg.device_duration, 40)
self.assertEqual(op_agg.self_host_duration, 60)
self.assertEqual(op_agg.self_device_duration, 40)
if op_agg.name == "aten::mm":
op_count += 1
self.assertEqual(op_agg.input_shape, "[]")
self.assertEqual(op_agg.calls, 1)
self.assertEqual(op_agg.host_duration, 50)
self.assertEqual(op_agg.device_duration, 200)
self.assertEqual(op_agg.self_host_duration, 50)
self.assertEqual(op_agg.self_device_duration, 200)
self.assertEqual(op_count, 2)
test_op_list(profile.op_list_groupby_name)
test_op_list(profile.op_list_groupby_name_input)
self.assertEqual(len(profile.kernel_list_groupby_name_op), 1)
self.assertEqual(profile.kernel_stat.shape[0], 1)
self.assertEqual(profile.kernel_list_groupby_name_op[0].name,
"void cunn_ClassNLLCriterion_updateGradInput_kernel<float>")
self.assertEqual(profile.kernel_list_groupby_name_op[0].op_name, "aten::mm")
self.assertEqual(profile.kernel_list_groupby_name_op[0].calls, 1)
self.assertEqual(profile.kernel_list_groupby_name_op[0].total_duration, 200)
self.assertEqual(profile.kernel_list_groupby_name_op[0].min_duration, 200)
self.assertEqual(profile.kernel_list_groupby_name_op[0].max_duration, 200)
self.assertEqual(profile.kernel_stat.iloc[0]["count"], 1)
self.assertEqual(profile.kernel_stat.iloc[0]["sum"], 200)
self.assertEqual(profile.kernel_stat.iloc[0]["mean"], 200)
self.assertEqual(profile.kernel_stat.iloc[0]["min"], 200)
self.assertEqual(profile.kernel_stat.iloc[0]["max"], 200)
# Test self time and total time on operator with nested operator.
def test_self_time(self):
def test_external_id(self):
json_content = """
[{
"ph": "X", "cat": "Operator",
"name": "aten::mat_mul", "pid": 13721, "tid": "456",
"ts": 100, "dur": 100,
"args": {"Input dims": [], "External id": 2}
},
{
"ph": "X", "cat": "Operator",
"name": "aten::mm", "pid": 13721, "tid": "456",
"ts": 120, "dur": 40,
"args": {"Input dims": [], "External id": 4}
},
{
"ph": "X", "cat": "Kernel",
"name": "void cunn_ClassNLLCriterion_updateGradInput_kernel<float>", "pid": 0, "tid": "stream 7",
"ts": 155, "dur": 20,
"args": {"correlation": 334, "external id": 4}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaLaunchKernel", "pid": 13721, "tid": "456",
"ts": 150, "dur": 10,
"args": {"correlation": 334, "external id": 4}
},
{
"ph": "X", "cat": "Kernel",
"name": "void cunn_ClassNLLCriterion_updateGradInput_kernel<float>", "pid": 0, "tid": "stream 7",
"ts": 210, "dur": 16,
"args": {"correlation": 335, "external id": 2}
},
{
"ph": "X", "cat": "Runtime",
"name": "cudaLaunchKernel", "pid": 13721, "tid": "456",
"ts": 170, "dur": 25,
"args": {"correlation": 335, "external id": 2}
}]
"""
profile = parse_json_trace(json_content)
profile.process()
op_count = 0
for op_agg in profile.op_list_groupby_name:
if op_agg.name == "aten::mat_mul":
op_count += 1
self.assertEqual(op_agg.host_duration, 100)
self.assertEqual(op_agg.device_duration, 20 + 16)
self.assertEqual(op_agg.self_host_duration, 100 - 40)
self.assertEqual(op_agg.self_device_duration, 16)
if op_agg.name == "aten::mm":
op_count += 1
self.assertEqual(op_agg.host_duration, 40)
self.assertEqual(op_agg.device_duration, 20)
self.assertEqual(op_agg.self_host_duration, 40)
self.assertEqual(op_agg.self_device_duration, 20)
self.assertEqual(op_count, 2)
if __name__ == '__main__':
unittest.main()
|
import os
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision
import torchvision.transforms as T
import torchvision.models as models
from torch.autograd.profiler import profile
model = models.resnet50(pretrained=True)
model.cuda()
cudnn.benchmark = True
transform = T.Compose([T.Resize(256), T.CenterCrop(224), T.ToTensor()])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=32,
shuffle=True, num_workers=0)
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
device = torch.device("cuda:0")
model.train()
with profile(use_cuda=True, use_kineto=True, record_shapes=True) as p:
for step, data in enumerate(trainloader, 0):
print("step:{}".format(step))
inputs, labels = data[0].to(device=device), data[1].to(device=device)
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step >= 5:
break
try:
os.mkdir("result")
except Exception:
pass
p.export_chrome_trace("./result/worker0.pt.trace.json") |
import os
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision
import torchvision.transforms as T
import torchvision.datasets as datasets
import torchvision.models as models
import torch.profiler
model = models.resnet50(pretrained=True)
model.cuda()
cudnn.benchmark = True
transform = T.Compose([T.Resize(256), T.CenterCrop(224), T.ToTensor()])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=32,
shuffle=True, num_workers=0)
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
device = torch.device("cuda:0")
model.train()
def output_fn(p):
try:
os.mkdir("result")
except Exception as e:
pass
p.export_chrome_trace("./result/worker0.pt.trace.json")
with torch.profiler.profile(
activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA],
schedule=torch.profiler.schedule(
wait=2,
warmup=3,
active=6),
on_trace_ready=output_fn,
record_shapes=True
) as p:
for step, data in enumerate(trainloader, 0):
print("step:{}".format(step))
inputs, labels = data[0].to(device=device), data[1].to(device=device)
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step + 1 >= 11:
break
p.step()
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
class Run(object):
""" A profiler run. For visualization purpose only.
May contain profiling results from multiple workers. E.g. distributed scenario.
"""
def __init__(self, name, run_dir):
self.name = name
self.run_dir = run_dir
self.profiles = OrderedDict()
@property
def workers(self):
return list(self.profiles.keys())
@property
def views(self):
profile = self.get_profile()
if profile is None:
return None
return profile.views
def add_profile(self, profile):
self.profiles[profile.worker] = profile
def get_profile(self, worker=None):
if len(self.profiles) == 0:
return None
if not worker:
return next(iter(self.profiles.values()))
return self.profiles.get(worker, None)
class RunProfile(object):
""" Cooked profiling result for a worker. For visualization purpose only.
"""
def __init__(self, worker):
self.worker = worker
self.views = []
self.is_gpu_used = False
self.overview = None
self.operation_pie_by_name = None
self.operation_table_by_name = None
self.operation_pie_by_name_input = None
self.operation_table_by_name_input = None
self.kernel_op_table = None
self.kernel_pie = None
self.kernel_table = None
self.trace_file_path = None
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
# Entry point for Pytorch TensorBoard plugin package.
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
PLUGIN_NAME = "torch_profiler"
TRACE_FILE_SUFFIX = ".pt.trace.json"
TRACE_GZIP_FILE_SUFFIX = ".pt.trace.json.gz"
MONITOR_RUN_REFRESH_INTERNAL_IN_SECONDS = 10
View = namedtuple("View", "id, name, display_name")
OVERALL_VIEW = View(1, "overall", "Overview")
OP_VIEW = View(2, "operator", "Operator")
KERNEL_VIEW = View(3, "kernel", "Kernel")
TRACE_VIEW = View(4, "trace", "Trace")
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from . import consts
def get_logger():
logger = logging.getLogger(consts.PLUGIN_NAME)
logger.setLevel(logging.INFO)
return logger
def is_chrome_trace_file(path):
return path.endswith(consts.TRACE_GZIP_FILE_SUFFIX) or path.endswith(consts.TRACE_FILE_SUFFIX)
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import multiprocessing
import os
import threading
import time
from collections import OrderedDict
import werkzeug
from tensorboard.plugins import base_plugin
from werkzeug import wrappers
from . import consts
from . import utils
from .profiler import RunLoader
from .run import Run
logger = utils.get_logger()
class TorchProfilerPlugin(base_plugin.TBPlugin):
"""TensorBoard plugin for Torch Profiler."""
plugin_name = consts.PLUGIN_NAME
def __init__(self, context):
"""Instantiates TorchProfilerPlugin.
Args:
context: A base_plugin.TBContext instance.
"""
super(TorchProfilerPlugin, self).__init__(context)
self.logdir = os.path.abspath(context.logdir)
self._is_active = None
self._is_active_initialized_event = threading.Event()
self._runs = OrderedDict()
self._runs_lock = threading.Lock()
self._queue = multiprocessing.Queue()
monitor_runs = threading.Thread(target=self.monitor_runs, name="monitor_runs", daemon=True)
monitor_runs.start()
receive_runs = threading.Thread(target=self.receive_runs, name="receive_runs", daemon=True)
receive_runs.start()
def is_active(self):
"""Returns whether there is relevant data for the plugin to process.
"""
self._is_active_initialized_event.wait()
return self._is_active
def get_plugin_apps(self):
return {
"/index.js": self.static_file_route,
"/main.js": self.static_file_route,
"/index.html": self.static_file_route,
"/overall.html": self.static_file_route,
"/trace_viewer_full.html": self.static_file_route,
"/trace_embedding.html": self.static_file_route,
"/operator.html": self.static_file_route,
"/kernel.html": self.static_file_route,
"/runs": self.runs_route,
"/views": self.views_route,
"/workers": self.workers_route,
"/overview": self.overview_route,
"/operation": self.operation_pie_route,
"/operation/table": self.operation_table_route,
"/kernel": self.kernel_pie_route,
"/kernel/table": self.kernel_table_route,
"/trace": self.trace_route
}
def frontend_metadata(self):
return base_plugin.FrontendMetadata(es_module_path="/index.js")
def monitor_runs(self):
logger.info("Monitor runs begin")
# Set _is_active quickly based on file pattern match, don't wait for data loading
self._is_active = any(self._get_run_dirs())
self._is_active_initialized_event.set()
touched = set()
while True:
try:
logger.debug("Scan run dir")
run_dirs = self._get_run_dirs()
# Assume no deletion on run directories, trigger async load if find a new run
for name, run_dir in run_dirs:
if name not in touched:
logger.info("Find run %s under %s", name, run_dir)
touched.add(name)
# Use multiprocessing to avoid UI stall and reduce data parsing time
process = multiprocessing.Process(target=_load_run, args=(self._queue, name, run_dir))
process.daemon = True
process.start()
except Exception as ex:
logger.warning("Failed to scan runs. Exception=%s", ex, exc_info=True)
time.sleep(consts.MONITOR_RUN_REFRESH_INTERNAL_IN_SECONDS)
def receive_runs(self):
while True:
run = self._queue.get()
if run is None:
continue
logger.info("Add run %s", run.name)
with self._runs_lock:
is_new = run.name not in self._runs
self._runs[run.name] = run
if is_new:
self._runs = OrderedDict(sorted(self._runs.items()))
# Update is_active
if not self._is_active:
self._is_active = True
def _get_run_dirs(self):
"""Scan logdir, find PyTorch Profiler run directories.
A directory is considered to be a run if it contains 1 or more *.pt.trace.json[.gz].
E.g. there are 2 runs: run1, run2
/run1
/[worker1].pt.trace.json.gz
/[worker2].pt.trace.json.gz
/run2
/[worker1].pt.trace.json
"""
for root, _, files in os.walk(self.logdir):
for file in files:
if utils.is_chrome_trace_file(file):
run_dir = os.path.abspath(root)
if run_dir == self.logdir:
name = os.path.basename(run_dir)
else:
name = os.path.relpath(run_dir, self.logdir)
yield name, run_dir
break
def get_run(self, name) -> Run:
with self._runs_lock:
return self._runs.get(name, None)
@wrappers.Request.application
def runs_route(self, request):
with self._runs_lock:
names = list(self._runs.keys())
return self.respond_as_json(names)
@wrappers.Request.application
def views_route(self, request):
name = request.args.get("run")
run = self.get_run(name)
views = sorted(run.views, key=lambda x: x.id)
views_list = []
for view in views:
views_list.append(view.display_name)
return self.respond_as_json(views_list)
@wrappers.Request.application
def workers_route(self, request):
name = request.args.get("run")
run = self.get_run(name)
return self.respond_as_json(run.workers)
@wrappers.Request.application
def overview_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
run = self.get_run(name)
profile = run.get_profile(worker)
data = profile.overview
data["environments"] = [{"title": "Number of Worker(s)", "value": str(len(run.workers))},
{"title": "Device Type", "value": "GPU" if profile.is_gpu_used else "CPU"}]
if profile.is_gpu_used:
data["environments"].append({"title": "Number of Device(s)", "value": "1"})
return self.respond_as_json(data)
@wrappers.Request.application
def operation_pie_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
group_by = request.args.get("group_by")
run = self.get_run(name)
profile = run.get_profile(worker)
if group_by == "OperationAndInputShape":
return self.respond_as_json(profile.operation_pie_by_name_input)
else:
return self.respond_as_json(profile.operation_pie_by_name)
@wrappers.Request.application
def operation_table_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
group_by = request.args.get("group_by")
run = self.get_run(name)
profile = run.get_profile(worker)
if group_by == "OperationAndInputShape":
return self.respond_as_json(profile.operation_table_by_name_input)
else:
return self.respond_as_json(profile.operation_table_by_name)
@wrappers.Request.application
def kernel_pie_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
run = self.get_run(name)
profile = run.get_profile(worker)
return self.respond_as_json(profile.kernel_pie)
@wrappers.Request.application
def kernel_table_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
group_by = request.args.get("group_by")
run = self.get_run(name)
profile = run.get_profile(worker)
if group_by == "Kernel":
return self.respond_as_json(profile.kernel_table)
else:
return self.respond_as_json(profile.kernel_op_table)
@wrappers.Request.application
def trace_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
run = self.get_run(name)
profile = run.get_profile(worker)
fopen = open
with fopen(profile.trace_file_path, 'rb') as f:
raw_data = f.read()
if profile.trace_file_path.endswith('.gz'):
headers = []
headers.append(('Content-Encoding', 'gzip'))
return werkzeug.Response(raw_data, content_type="application/json", headers=headers)
else:
return werkzeug.Response(raw_data, content_type="application/json")
@wrappers.Request.application
def static_file_route(self, request):
filename = os.path.basename(request.path)
extension = os.path.splitext(filename)[1]
if extension == '.html':
mimetype = 'text/html'
elif extension == '.css':
mimetype = 'text/css'
elif extension == '.js':
mimetype = 'application/javascript'
else:
mimetype = 'application/octet-stream'
filepath = os.path.join(os.path.dirname(__file__), 'static', filename)
try:
with open(filepath, 'rb') as infile:
contents = infile.read()
except IOError:
return werkzeug.Response('404 Not Found', 'text/plain', code=404)
return werkzeug.Response(
contents, content_type=mimetype
)
@staticmethod
def respond_as_json(obj):
content = json.dumps(obj)
return werkzeug.Response(content, content_type="application/json")
def _load_run(queue, name, run_dir):
import absl.logging
absl.logging.use_absl_handler()
try:
logger.info("Load run %s", name)
# Currently, assume run data is immutable, so just load once
loader = RunLoader(name, run_dir)
run = loader.load()
logger.info("Run %s loaded", name)
queue.put(run)
except Exception as ex:
logger.warning("Failed to load run %s. Exception=%s", ex, name, exc_info=True)
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .. import utils
__all__ = ["EventTypes", "get_event_parser"]
logger = utils.get_logger()
class EventTypes(object):
NET = "NetEvent"
OPERATOR = "OperatorEvent"
PROFILER_STEP = "ProfilerStepEvent"
RUNTIME = "RuntimeEvent"
KERNEL = "KernelEvent"
MEMCPY = "MemcpyEvent"
MEMSET = "MemsetEvent"
PYTHON = "PythonEvent"
class TraceEvent(object):
def __init__(self, type, data):
self.type = type
self.category = data.get("cat", "")
self.name = data.get("name", None)
self.ts = data.get("ts", None)
self.duration = data.get("dur", None)
self.pid = data.get("pid", None)
self.tid = data.get("tid", None)
self.args = data.get("args", None)
def to_dict(self):
return vars(self)
class NetEvent(TraceEvent):
def __init__(self, data):
super(NetEvent, self).__init__(EventTypes.NET, data)
class OperatorEvent(TraceEvent):
def __init__(self, data):
super(OperatorEvent, self).__init__(EventTypes.OPERATOR, data)
class ProfilerStepEvent(TraceEvent):
def __init__(self, data):
super(ProfilerStepEvent, self).__init__(EventTypes.PROFILER_STEP, data)
# torch.profiler.profile.step will invoke record_function with name like "ProfilerStep#5"
self.step = int(self.name.split("#")[1])
class RuntimeEvent(TraceEvent):
def __init__(self, data):
super(RuntimeEvent, self).__init__(EventTypes.RUNTIME, data)
class KernelEvent(TraceEvent):
def __init__(self, data):
super(KernelEvent, self).__init__(EventTypes.KERNEL, data)
class MemcpyEvent(TraceEvent):
def __init__(self, data):
super(MemcpyEvent, self).__init__(EventTypes.MEMCPY, data)
class MemsetEvent(TraceEvent):
def __init__(self, data):
super(MemsetEvent, self).__init__(EventTypes.MEMSET, data)
class PythonEvent(TraceEvent):
def __init__(self, data):
super(PythonEvent, self).__init__(EventTypes.PYTHON, data)
class EventParser(object):
def __init__(self):
self._handlers = {
"X": {
"Net": NetEvent,
"Operator": self._parse_operator_event,
"Runtime": RuntimeEvent,
"Kernel": KernelEvent,
"Memcpy": MemcpyEvent,
"Memset": MemsetEvent,
"Python": PythonEvent,
}
}
def _get_handler(self, type=None, category=None):
handlers = self._handlers.get(type, None)
if handlers is None:
return None
return handlers.get(category, None)
def parse(self, event):
try:
type = event.get("ph", None)
category = event.get("cat", None)
handler = self._get_handler(type, category)
if handler is None:
return None
return handler(event)
except Exception as ex:
logger.warning("Failed to parse profile event. Exception=%s. Event=%s", ex, event, exc_info=True)
raise ex
def _parse_operator_event(self, event):
name = event.get("name")
if name.startswith("ProfilerStep#"):
return ProfilerStepEvent(event)
return OperatorEvent(event)
def get_event_parser(version=None):
return EventParser()
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from .loader import RunLoader
__all__ = ["RunLoader"]
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from .data import RunData, RunProfileData
from .run_generator import RunGenerator
from .. import consts, utils
from ..run import Run
logger = utils.get_logger()
class RunLoader(object):
def __init__(self, name, run_dir):
self.run = RunData(name, run_dir)
def load(self):
self._parse()
if len(self.run.profiles) == 0:
logger.warning("No profile data found.")
return None
self._process()
self._analyze()
run = self._generate_run()
return run
def _parse(self):
workers = []
for path in os.listdir(self.run.run_dir):
if os.path.isdir(path):
continue
for pattern in [consts.TRACE_GZIP_FILE_SUFFIX, consts.TRACE_FILE_SUFFIX]:
if path.endswith(pattern):
worker = path[:-len(pattern)]
workers.append(worker)
break
for worker in sorted(workers):
try:
data = RunProfileData.parse(self.run.run_dir, worker)
self.run.profiles[worker] = data
except Exception as ex:
logger.warning("Failed to parse profile data for Run %s on %s. Exception=%s",
self.run.name, worker, ex, exc_info=True)
def _process(self):
for data in self.run.profiles.values():
logger.debug("Processing profile data")
data.process()
logger.debug("Processing profile data finish")
def _analyze(self):
for data in self.run.profiles.values():
logger.debug("Analyzing profile data")
data.analyze()
logger.debug("Analyzing profile data finish")
def _generate_run(self):
run = Run(self.run.name, self.run.run_dir)
for worker, data in self.run.profiles.items():
generator = RunGenerator(worker, data)
profile = generator.generate_run_profile()
run.add_profile(profile)
return run
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .. import consts
from ..run import RunProfile
class RunGenerator(object):
def __init__(self, worker, profile_data):
self.worker = worker
self.profile_data = profile_data
def generate_run_profile(self):
profile_run = RunProfile(self.worker)
profile_run.is_gpu_used = self.profile_data.is_gpu_used
profile_run.views.append(consts.OVERALL_VIEW)
profile_run.overview = self._generate_overview()
profile_run.views.append(consts.OP_VIEW)
profile_run.operation_pie_by_name = self._generate_op_pie()
profile_run.operation_table_by_name = self._generate_op_table()
profile_run.operation_pie_by_name_input = self._generate_op_pie(True)
profile_run.operation_table_by_name_input = self._generate_op_table(True)
if self.profile_data.is_gpu_used:
profile_run.views.append(consts.KERNEL_VIEW)
profile_run.kernel_op_table = self._generate_kernel_op_table()
profile_run.kernel_pie = self._generate_kernel_pie()
profile_run.kernel_table = self._generate_kernel_table()
profile_run.views.append(consts.TRACE_VIEW)
profile_run.trace_file_path = self.profile_data.trace_file_path
return profile_run
def _generate_overview(self):
def build_part_time_str(part_cost, part_name):
format_str = '<div class="visualization-tooltip" style="white-space: nowrap;">' \
'Step {}<br>' \
'Total: {}us<br>' \
'<b>{}: {}us</b><br>' \
'Percentage: {}%' \
'</div>'
percentage = round(100 * part_cost / costs.step_total_cost, 2)
return format_str.format(step_name, costs.step_total_cost, part_name, part_cost, percentage)
def build_avg_cost_dict(part_name, part_cost):
cost_dict = {"name": part_name,
"description": "",
"value": round(part_cost),
"extra": round(100 * part_cost / self.profile_data.avg_costs.step_total_cost, 2)}
return cost_dict
show_gpu = self.profile_data.is_gpu_used
column_tootip = {"type": "string", "role": "tooltip", "p": {"html": "true"}}
data = {}
data["steps"] = {}
data["steps"]["columns"] = [{"type": "string", "name": "Step"}]
if show_gpu:
data["steps"]["columns"].extend([{"type": "number", "name": "Kernel"},
column_tootip,
{"type": "number", "name": "Memcpy"},
column_tootip,
{"type": "number", "name": "Memset"},
column_tootip,
{"type": "number", "name": "Runtime"},
column_tootip])
data["steps"]["columns"].extend([{"type": "number", "name": "DataLoader"},
column_tootip,
{"type": "number", "name": "CPU Exec"},
column_tootip,
{"type": "number", "name": "Other"},
column_tootip])
data["steps"]["rows"] = []
for i in range(len(self.profile_data.steps_costs)):
costs = self.profile_data.steps_costs[i]
step_name = self.profile_data.steps_names[i]
row = [step_name]
if show_gpu:
row.extend([costs.kernel_cost,
build_part_time_str(costs.kernel_cost, "Kernel"),
costs.memcpy_cost,
build_part_time_str(costs.memcpy_cost, "Memcpy"),
costs.memset_cost,
build_part_time_str(costs.memset_cost, "Memset"),
costs.runtime_cost,
build_part_time_str(costs.runtime_cost, "Runtime")])
row.extend([costs.dataloader_cost,
build_part_time_str(costs.dataloader_cost, "DataLoader"),
costs.cpuop_cost,
build_part_time_str(costs.cpuop_cost, "CPU Exec"),
costs.other_cost,
build_part_time_str(costs.other_cost, "Other")])
data["steps"]["rows"].append(row)
avg_costs = []
if show_gpu:
avg_costs.extend([
build_avg_cost_dict("Kernel", self.profile_data.avg_costs.kernel_cost),
build_avg_cost_dict("Memcpy", self.profile_data.avg_costs.memcpy_cost),
build_avg_cost_dict("Memset", self.profile_data.avg_costs.memset_cost),
build_avg_cost_dict("Runtime", self.profile_data.avg_costs.runtime_cost)
])
avg_costs.extend([
build_avg_cost_dict("DataLoader", self.profile_data.avg_costs.dataloader_cost),
build_avg_cost_dict("CPU Exec", self.profile_data.avg_costs.cpuop_cost),
build_avg_cost_dict("Other", self.profile_data.avg_costs.other_cost)
])
data["performance"] = [{"name": "Average Step Time", "description": "",
"value": round(self.profile_data.avg_costs.step_total_cost),
"extra": 100, "children": avg_costs}]
if len(self.profile_data.recommendations) == 0:
html = "<li>N/A</li>"
else:
html = ""
for recommendation in self.profile_data.recommendations:
html += "<li>{}</li>".format(recommendation)
data["recommendations"] = "<ul>{}</ul>".format(html)
return data
def _generate_op_pie(self, group_by_input_shape=False):
op_device_total_time = []
op_device_self_time = []
op_host_total_time = []
op_host_self_time = []
if group_by_input_shape:
op_list = self.profile_data.op_list_groupby_name_input
else:
op_list = self.profile_data.op_list_groupby_name
for op_agg in op_list:
# Whether device_duration & self_device_duration are accurate or not depends on the input tracing data.
if op_agg.device_duration > 0:
op_device_total_time.append([op_agg.name, op_agg.device_duration])
if op_agg.self_device_duration > 0:
op_device_self_time.append([op_agg.name, op_agg.self_device_duration])
if op_agg.host_duration > 0:
op_host_total_time.append([op_agg.name, op_agg.host_duration])
if op_agg.self_host_duration > 0:
op_host_self_time.append([op_agg.name, op_agg.self_host_duration])
op_device_total_time.sort(key=lambda x: x[1], reverse=True)
op_device_self_time.sort(key=lambda x: x[1], reverse=True)
op_host_total_time.sort(key=lambda x: x[1], reverse=True)
op_host_self_time.sort(key=lambda x: x[1], reverse=True)
data = {}
device_total_time = {}
device_self_time = {}
host_total_time = {}
host_self_time = {}
if len(op_device_total_time) > 0:
device_total_time["title"] = "Device Total Time"
device_total_time["columns"] = [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}]
device_total_time["rows"] = op_device_total_time
else:
device_total_time = None
if len(op_device_self_time) > 0:
device_self_time["title"] = "Device Self Time"
device_self_time["columns"] = [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}]
device_self_time["rows"] = op_device_self_time
else:
device_self_time = None
if len(op_host_total_time) > 0:
host_total_time["title"] = "Host Total Time"
host_total_time["columns"] = [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}]
host_total_time["rows"] = op_host_total_time
else:
host_total_time = None
if len(op_host_self_time) > 0:
host_self_time["title"] = "Host Self Time"
host_self_time["columns"] = [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}]
host_self_time["rows"] = op_host_self_time
else:
host_self_time = None
data["device_total_time"] = device_total_time
data["device_self_time"] = device_self_time
data["host_total_time"] = host_total_time
data["host_self_time"] = host_self_time
return data
def _generate_op_table(self, group_by_input_shape=False):
show_gpu = self.profile_data.is_gpu_used
columns = [{"type": "string", "name": "Name"}]
if group_by_input_shape:
columns.append({"type": "string", "name": "Input Shape"})
columns.append({"type": "number", "name": "Calls"})
if show_gpu:
columns.extend([{"type": "number", "name": "Device Self Duration (us)"},
{"type": "number", "name": "Device Total Duration (us)"}])
columns.extend([{"type": "number", "name": "Host Self Duration (us)"},
{"type": "number", "name": "Host Total Duration (us)"}])
if group_by_input_shape:
op_list = self.profile_data.op_list_groupby_name_input
else:
op_list = self.profile_data.op_list_groupby_name
op_list = sorted(op_list,
key=lambda x: x.self_device_duration if show_gpu else x.self_host_duration,
reverse=True)
rows = []
for op in op_list:
# Whether device_duration & self_device_duration are accurate or not depends on the input tracing data.
row = [op.name]
if group_by_input_shape:
row.append(op.input_shape)
row.append(op.calls)
if show_gpu:
row.extend([round(op.self_device_duration), round(op.device_duration)])
row.extend([round(op.self_host_duration), round(op.host_duration)])
rows.append(row)
data = {"data": {"columns": columns, "rows": rows}}
return data
def _generate_kernel_op_table(self):
table = {}
table["columns"] = [{"type": "string", "name": "Name"}, {"type": "string", "name": "Operator"}]
col_names = ["Calls", "Total Duration (us)", "Mean Duration (us)", "Max Duration (us)", "Min Duration (us)"]
for column in col_names:
table["columns"].append({"type": "number", "name": column})
table["rows"] = []
kernel_list = sorted(self.profile_data.kernel_list_groupby_name_op, key=lambda x: x.total_duration,
reverse=True)
for agg_by_name_op in kernel_list:
kernel_op_row = [agg_by_name_op.name, agg_by_name_op.op_name, agg_by_name_op.calls,
agg_by_name_op.total_duration, agg_by_name_op.avg_duration,
agg_by_name_op.min_duration, agg_by_name_op.max_duration]
table["rows"].append(kernel_op_row)
data = {"data": table}
return data
def _generate_kernel_pie(self):
pie = {"columns": [{"type": "string", "name": "name"}, {"type": "number", "name": "value"}], "rows": []}
for _id, (name, row) in enumerate(self.profile_data.kernel_stat.iterrows()):
pie["rows"].append([name, row["sum"]])
data = {"total": pie}
return data
def _generate_kernel_table(self):
table = {}
table["columns"] = [{"type": "string", "name": "Name"}]
columns = ["count", "sum", "mean", "max", "min"]
col_names = ["Calls", "Total Duration (us)", "Mean Duration (us)", "Max Duration (us)", "Min Duration (us)"]
for column in col_names:
table["columns"].append({"type": "number", "name": column})
table["rows"] = []
for _id, (name, row) in enumerate(self.profile_data.kernel_stat.iterrows()):
kernel_row = [name]
for column in columns:
kernel_row.append(round(row[column]))
table["rows"].append(kernel_row)
data = {"data": table}
return data
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
import pandas as pd
class KernelParser:
def __init__(self):
self.kernel_stat = None
def parse_events(self, events):
events_dict = []
for event in events:
events_dict.append(event.to_dict())
events = events_dict
events = pd.DataFrame(events)
events = events.astype({"type": "category", "category": "category", "name": "string"}, copy=False)
kernels = events[events["category"] == "Kernel"]
self.kernel_stat = kernels.groupby("name")["duration"].agg(["count", "sum", "mean", "max", "min"]) \
.sort_values("sum", ascending=False)
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
import sys
from .. import utils
from .trace import EventTypes
logger = utils.get_logger()
def merge_ranges(src_ranges, is_sorted=False):
merged_ranges = []
if len(src_ranges) > 0:
if not is_sorted:
src_ranges.sort(key=lambda x: x[0])
src_id = 0
merged_ranges.append(
(src_ranges[src_id][0], src_ranges[src_id][1]))
for src_id in range(1, len(src_ranges)):
dst_id = len(merged_ranges) - 1
if src_ranges[src_id][1] > merged_ranges[dst_id][1]:
if src_ranges[src_id][0] <= merged_ranges[dst_id][1]:
merged_ranges[dst_id] = (merged_ranges[dst_id][0], src_ranges[src_id][1])
else:
merged_ranges.append(
(src_ranges[src_id][0], src_ranges[src_id][1]))
return merged_ranges
def subtract_ranges_lists(range_list1, range_list2):
range_list_dst = []
if len(range_list1) == 0:
return range_list_dst
if len(range_list2) == 0:
range_list_dst = list(range_list1)
return range_list_dst
r1 = range_list1[0]
r2 = range_list2[0]
i1 = i2 = 0
while i1 < len(range_list1):
if i2 == len(range_list2):
range_list_dst.append(r1)
r1, i1 = pop_list(range_list1, i1)
elif r2[1] <= r1[0]:
r2, i2 = pop_list(range_list2, i2)
elif r2[0] <= r1[0] and r2[1] < r1[1]:
r1 = (r2[1], r1[1])
r2, i2 = pop_list(range_list2, i2)
elif r2[0] <= r1[0]:
assert (r2[1] >= r1[1])
r2 = (r1[1], r2[1])
r1, i1 = pop_list(range_list1, i1)
elif r2[0] < r1[1]:
assert (r2[0] > r1[0])
range_list_dst.append((r1[0], r2[0]))
r1 = (r2[0], r1[1])
else:
assert (r2[0] >= r1[1])
range_list_dst.append(r1)
r1, i1 = pop_list(range_list1, i1)
return range_list_dst
def intersection_ranges_lists(range_list1, range_list2):
range_list_dst = []
if len(range_list1) == 0 or len(range_list2) == 0:
return range_list_dst
r1 = range_list1[0]
r2 = range_list2[0]
i1 = i2 = 0
while i1 < len(range_list1):
if i2 == len(range_list2):
break
elif r2[1] <= r1[0]:
r2, i2 = pop_list(range_list2, i2)
elif r2[0] <= r1[0] and r2[1] < r1[1]:
assert (r2[1] > r1[0])
range_list_dst.append((r1[0], r2[1]))
r1 = (r2[1], r1[1])
r2, i2 = pop_list(range_list2, i2)
elif r2[0] <= r1[0]:
assert (r2[1] >= r1[1])
range_list_dst.append(r1)
r2 = (r1[1], r2[1])
r1, i1 = pop_list(range_list1, i1)
elif r2[1] < r1[1]:
assert (r2[0] > r1[0])
range_list_dst.append(r2)
r1 = (r2[1], r1[1])
r2, i2 = pop_list(range_list2, i2)
elif r2[0] < r1[1]:
assert (r2[1] >= r1[1])
range_list_dst.append((r2[0], r1[1]))
r2 = (r1[1], r2[1])
r1, i1 = pop_list(range_list1, i1)
else:
assert (r2[0] >= r1[1])
r1, i1 = pop_list(range_list1, i1)
return range_list_dst
def get_ranges_sum(ranges):
sum = 0
for range in ranges:
sum += (range[1] - range[0])
return sum
def pop_list(range_list, index):
next_index = index + 1
if next_index >= len(range_list):
return None, len(range_list)
next_item = range_list[next_index]
return next_item, next_index
class OverallParser(object):
class Costs:
def __init__(self):
self.step_total_cost = 0
self.kernel_cost = 0
self.memcpy_cost = 0
self.memset_cost = 0
self.runtime_cost = 0
self.dataloader_cost = 0
self.cpuop_cost = 0
self.other_cost = 0
def calculate_costs(self, statistics, step):
self.step_total_cost = step[1] - step[0]
self.kernel_cost = get_ranges_sum(statistics.kernel_cost_ranges)
self.memcpy_cost = get_ranges_sum(statistics.memcpy_cost_ranges)
self.memset_cost = get_ranges_sum(statistics.memset_cost_ranges)
self.runtime_cost = get_ranges_sum(statistics.runtime_cost_ranges)
self.dataloader_cost = get_ranges_sum(statistics.dataloader_cost_ranges)
self.cpuop_cost = get_ranges_sum(statistics.cpuop_cost_ranges)
self.other_cost = get_ranges_sum(statistics.other_cost_ranges)
class Statistics:
def __init__(self):
self.kernel_cost_ranges = []
self.memcpy_cost_ranges = []
self.memset_cost_ranges = []
self.runtime_cost_ranges = []
self.dataloader_cost_ranges = []
self.cpuop_cost_ranges = []
self.other_cost_ranges = []
def intersection_with_step(self, step):
result = OverallParser.Statistics()
step = [step]
result.kernel_cost_ranges = intersection_ranges_lists(step, self.kernel_cost_ranges)
result.memcpy_cost_ranges = intersection_ranges_lists(step, self.memcpy_cost_ranges)
result.memset_cost_ranges = intersection_ranges_lists(step, self.memset_cost_ranges)
result.runtime_cost_ranges = intersection_ranges_lists(step, self.runtime_cost_ranges)
result.dataloader_cost_ranges = intersection_ranges_lists(step, self.dataloader_cost_ranges)
result.cpuop_cost_ranges = intersection_ranges_lists(step, self.cpuop_cost_ranges)
result.other_cost_ranges = intersection_ranges_lists(step, self.other_cost_ranges)
return result
def __init__(self):
self.kernel_ranges = []
self.memcpy_ranges = []
self.memset_ranges = []
self.runtime_ranges = []
self.dataloader_ranges = []
self.cpuop_ranges = []
self.steps = []
self.steps_names = []
self.is_gpu_used = False
self.min_ts = sys.maxsize
self.max_ts = -sys.maxsize - 1
self.steps_costs = []
self.avg_costs = OverallParser.Costs()
def parse_events(self, events):
logger.debug("Overall, parse events")
for event in events:
self.parse_event(event)
self.kernel_ranges = merge_ranges(self.kernel_ranges)
self.memcpy_ranges = merge_ranges(self.memcpy_ranges)
self.memset_ranges = merge_ranges(self.memset_ranges)
self.runtime_ranges = merge_ranges(self.runtime_ranges)
self.dataloader_ranges = merge_ranges(self.dataloader_ranges)
self.cpuop_ranges = merge_ranges(self.cpuop_ranges)
if len(self.steps) == 0:
self.steps.append((self.min_ts, self.max_ts))
self.steps_names.append("0")
merged_steps = list(self.steps)
merged_steps = merge_ranges(merged_steps)
logger.debug("Overall, statistics")
global_stats = OverallParser.Statistics()
global_stats.kernel_cost_ranges = self.kernel_ranges
slots = subtract_ranges_lists(merged_steps, self.kernel_ranges)
global_stats.memcpy_cost_ranges = intersection_ranges_lists(slots, self.memcpy_ranges)
slots = subtract_ranges_lists(slots, global_stats.memcpy_cost_ranges)
global_stats.memset_cost_ranges = intersection_ranges_lists(slots, self.memset_ranges)
slots = subtract_ranges_lists(slots, global_stats.memset_cost_ranges)
global_stats.runtime_cost_ranges = intersection_ranges_lists(slots, self.runtime_ranges)
slots = subtract_ranges_lists(slots, global_stats.runtime_cost_ranges)
global_stats.dataloader_cost_ranges = intersection_ranges_lists(slots, self.dataloader_ranges)
slots = subtract_ranges_lists(slots, global_stats.dataloader_cost_ranges)
global_stats.cpuop_cost_ranges = intersection_ranges_lists(slots, self.cpuop_ranges)
slots = subtract_ranges_lists(slots, global_stats.cpuop_cost_ranges)
global_stats.other_cost_ranges = slots
logger.debug("Overall, aggregation")
valid_steps = len(self.steps)
for i in range(valid_steps):
steps_stat = global_stats.intersection_with_step(self.steps[i])
self.steps_costs.append(OverallParser.Costs())
self.steps_costs[i].calculate_costs(steps_stat, self.steps[i])
self.avg_costs.step_total_cost += self.steps_costs[i].step_total_cost
self.avg_costs.kernel_cost += self.steps_costs[i].kernel_cost
self.avg_costs.memcpy_cost += self.steps_costs[i].memcpy_cost
self.avg_costs.memset_cost += self.steps_costs[i].memset_cost
self.avg_costs.runtime_cost += self.steps_costs[i].runtime_cost
self.avg_costs.dataloader_cost += self.steps_costs[i].dataloader_cost
self.avg_costs.cpuop_cost += self.steps_costs[i].cpuop_cost
self.avg_costs.other_cost += self.steps_costs[i].other_cost
self.avg_costs.step_total_cost /= valid_steps
self.avg_costs.kernel_cost /= valid_steps
self.avg_costs.memcpy_cost /= valid_steps
self.avg_costs.memset_cost /= valid_steps
self.avg_costs.runtime_cost /= valid_steps
self.avg_costs.dataloader_cost /= valid_steps
self.avg_costs.cpuop_cost /= valid_steps
self.avg_costs.other_cost /= valid_steps
def parse_event(self, event):
ts = event.ts
dur = event.duration
evt_type = event.type
if evt_type == EventTypes.KERNEL:
self.kernel_ranges.append((ts, ts + dur))
elif evt_type == EventTypes.MEMCPY:
self.memcpy_ranges.append((ts, ts + dur))
elif evt_type == EventTypes.MEMSET:
self.memset_ranges.append((ts, ts + dur))
elif evt_type == EventTypes.RUNTIME:
self.runtime_ranges.append((ts, ts + dur))
elif evt_type == EventTypes.OPERATOR and event.name.startswith("enumerate(DataLoader)#") \
and event.name.endswith(".__next__"):
self.dataloader_ranges.append((ts, ts + dur))
elif event.type == EventTypes.PROFILER_STEP:
self.steps.append((ts, ts + dur))
self.steps_names.append(str(event.step))
elif evt_type in [EventTypes.PYTHON, EventTypes.OPERATOR]:
self.cpuop_ranges.append((ts, ts + dur))
if evt_type == EventTypes.RUNTIME:
self.is_gpu_used = True
if ts < self.min_ts:
self.min_ts = ts
if ts + dur > self.max_ts:
self.max_ts = ts + dur
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import json
import os
from collections import OrderedDict
from . import trace
from .kernel_parser import KernelParser
from .module_parser import ModuleParser
from .overall_parser import OverallParser
from .. import consts, utils
logger = utils.get_logger()
class RunData(object):
def __init__(self, name, run_dir):
self.name = name
self.run_dir = run_dir
self.profiles = OrderedDict()
class RunProfileData(object):
def __init__(self, worker):
self.worker = worker
self.data_schema_version = None
self.events = None
self.trace_file_path = None
self.is_gpu_used = False
self.steps_costs = None
self.steps_names = None
self.avg_costs = None
self.op_list_groupby_name = None
self.op_list_groupby_name_input = None
self.kernel_list_groupby_name_op = None
self.kernel_stat = None
self.recommendations = []
@staticmethod
def parse(run_dir, worker):
logger.debug("Parse trace, run_dir=%s, worker=%s", run_dir, worker)
trace_path = os.path.join(run_dir, "{}{}".format(worker, consts.TRACE_FILE_SUFFIX))
fopen = open
if not os.path.isfile(trace_path):
trace_path += ".gz"
fopen = gzip.open
if not os.path.isfile(trace_path):
raise FileNotFoundError(trace_path)
with fopen(trace_path, 'r') as f:
trace_json = json.load(f)
profile = RunProfileData(worker)
profile.trace_file_path = trace_path
if type(trace_json) is dict:
metadata = trace_json.get("profilerMetadata", None)
version = metadata.get("DataSchemaVersion") if metadata else None
profile.data_schema_version = version
trace_json = trace_json["traceEvents"]
parser = trace.get_event_parser(profile.data_schema_version)
profile.events = []
for data in trace_json:
event = parser.parse(data)
if event is not None:
profile.events.append(event)
return profile
def process(self):
logger.debug("OverallParser")
overall_parser = OverallParser()
overall_parser.parse_events(self.events)
self.is_gpu_used = overall_parser.is_gpu_used
self.steps_costs = overall_parser.steps_costs
self.steps_names = overall_parser.steps_names
self.avg_costs = overall_parser.avg_costs
logger.debug("ModuleParser")
module_parser = ModuleParser()
module_parser.parse_events(self.events)
self.op_list_groupby_name = module_parser.op_list_groupby_name
self.op_list_groupby_name_input = module_parser.op_list_groupby_name_input
self.kernel_list_groupby_name_op = module_parser.kernel_list_groupby_name_op
if self.is_gpu_used:
logger.debug("KernelParser")
kernel_parser = KernelParser()
kernel_parser.parse_events(self.events)
self.kernel_stat = kernel_parser.kernel_stat
def analyze(self):
self.recommendations = []
dataloader_ratio = self.avg_costs.dataloader_cost / self.avg_costs.step_total_cost
if dataloader_ratio > 0.05:
text = "This run has high time cost on input data loading. " \
"{}% of the step time is in DataLoader. You could " \
"try to set num_workers on DataLoader's construction " \
"and enable multi-processes on data loading. " \
"Reference: <a href =\"{}\" target=\"_blank\">Single- and Multi-process Data Loading</a>".format(
round(dataloader_ratio * 100, 1),
"https://pytorch.org/docs/stable/data.html#single-and-multi-process-data-loading"
)
self.recommendations.append(text)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.